code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import copy
import json
import logging
import os
import sys
from io import open
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
class PretrainedConfig(object):
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
pass
def save_pretrained(self, save_directory):
""" Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
"""
assert os.path.isdir(
save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_path, **kwargs):
json_file = os.path.join(pretrained_path)
# Load config
config = cls.from_json_file(json_file)
# Update config with kwargs if needed
for key, value in kwargs.items():
setattr(config, key, value)
logger.info("Model config %s", config)
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
setattr(config, key, value)
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class BertConfig(PretrainedConfig):
r"""
:class:`~transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class DistillBertConfig(PretrainedConfig):
r"""
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=6,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12,
sequence_classif_dropout_prob=0.2,
**kwargs):
super(DistillBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.sequence_classif_dropout_prob = sequence_classif_dropout_prob
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class ALBertConfig(PretrainedConfig):
r"""Constructs AlbertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
embedding_size=128,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(ALBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)") | finetune/configuration_bert.py | import copy
import json
import logging
import os
import sys
from io import open
logger = logging.getLogger(__name__)
CONFIG_NAME = "config.json"
class PretrainedConfig(object):
pretrained_config_archive_map = {}
def __init__(self, **kwargs):
pass
def save_pretrained(self, save_directory):
""" Save a configuration object to the directory `save_directory`, so that it
can be re-loaded using the :func:`~transformers.PretrainedConfig.from_pretrained` class method.
"""
assert os.path.isdir(
save_directory), "Saving path should be a directory where the model and configuration can be saved"
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file)
logger.info("Configuration saved in {}".format(output_config_file))
@classmethod
def from_pretrained(cls, pretrained_path, **kwargs):
json_file = os.path.join(pretrained_path)
# Load config
config = cls.from_json_file(json_file)
# Update config with kwargs if needed
for key, value in kwargs.items():
setattr(config, key, value)
logger.info("Model config %s", config)
return config
@classmethod
def from_dict(cls, json_object):
"""Constructs a `Config` from a Python dictionary of parameters."""
config = cls(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
setattr(config, key, value)
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
class BertConfig(PretrainedConfig):
r"""
:class:`~transformers.BertConfig` is the configuration class to store the configuration of a
`BertModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(BertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class DistillBertConfig(PretrainedConfig):
r"""
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
hidden_size=768,
num_hidden_layers=6,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
initializer_range=0.02,
layer_norm_eps=1e-12,
sequence_classif_dropout_prob=0.2,
**kwargs):
super(DistillBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.sequence_classif_dropout_prob = sequence_classif_dropout_prob
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
class ALBertConfig(PretrainedConfig):
r"""Constructs AlbertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_hidden_groups: Number of group for the hidden layers, parameters in
the same group are shared.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
inner_group_num: int, number of inner repetition of attention and ffn.
down_scale_factor: float, the scale to apply
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
def __init__(self,
vocab_size_or_config_json_file=21128,
embedding_size=128,
hidden_size=768,
num_hidden_layers=12,
num_hidden_groups=1,
num_attention_heads=64,
intermediate_size=16384,
inner_group_num=1,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
**kwargs):
super(ALBertConfig, self).__init__(**kwargs)
if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
and isinstance(vocab_size_or_config_json_file, unicode)):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.embedding_size = embedding_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_hidden_groups = num_hidden_groups
self.num_attention_heads = num_attention_heads
self.inner_group_num = inner_group_num
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)") | 0.688573 | 0.183301 |
from enum import unique
from django.db import transaction
from .models import Teacher, Student, Assignment, AssignmentCompleted, Classroom
from rest_framework import serializers
class TeacherSerializer(serializers.ModelSerializer):
id = serializers.HyperlinkedRelatedField(
view_name="teacher-detail", read_only=True)
class Meta:
model = Teacher
fields = ["id", "username", "password",
"profile_pic", "first_name", "last_name", "type"]
extra_kwargs = {
"password": {
"write_only": True,
"style": {"input_type": "password"}
},
"type": {"read_only": True}
}
def create(self, validated_data):
""" Create and return a new user """
user = Teacher.objects.create_user(
username=validated_data["username"],
first_name=validated_data["first_name"],
last_name=validated_data["last_name"],
password=validated_data["password"]
)
return user
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = ["id", "username", "password",
"first_name", "last_name", "profile_pic", "type"]
extra_kwargs = {
"password": {"write_only": True, "style": {"input_type": "password"}},
"type": {"read_only": True}
}
def create(self, validated_data):
""" Create and return a new user """
user = Student.objects.create_user(
username=validated_data["username"],
first_name=validated_data["first_name"],
last_name=validated_data["last_name"],
password=validated_data["password"]
)
return user
class AssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = Assignment
fields = "__all__"
extra_kwargs = {"owner_teacher": {"read_only": True}}
class AssignmentCompletedSerializer(serializers.ModelSerializer):
class Meta:
model = AssignmentCompleted
fields = "__all__"
extra_kwargs = {
"owner_student": {"read_only": True}
}
class ClassroomSerializer(serializers.ModelSerializer):
class Meta:
model = Classroom
fields = "__all__"
extra_kwargs = {
"owner_teacher": {"read_only": True}
} | assignments/serializers.py | from enum import unique
from django.db import transaction
from .models import Teacher, Student, Assignment, AssignmentCompleted, Classroom
from rest_framework import serializers
class TeacherSerializer(serializers.ModelSerializer):
id = serializers.HyperlinkedRelatedField(
view_name="teacher-detail", read_only=True)
class Meta:
model = Teacher
fields = ["id", "username", "password",
"profile_pic", "first_name", "last_name", "type"]
extra_kwargs = {
"password": {
"write_only": True,
"style": {"input_type": "password"}
},
"type": {"read_only": True}
}
def create(self, validated_data):
""" Create and return a new user """
user = Teacher.objects.create_user(
username=validated_data["username"],
first_name=validated_data["first_name"],
last_name=validated_data["last_name"],
password=validated_data["password"]
)
return user
class StudentSerializer(serializers.ModelSerializer):
class Meta:
model = Student
fields = ["id", "username", "password",
"first_name", "last_name", "profile_pic", "type"]
extra_kwargs = {
"password": {"write_only": True, "style": {"input_type": "password"}},
"type": {"read_only": True}
}
def create(self, validated_data):
""" Create and return a new user """
user = Student.objects.create_user(
username=validated_data["username"],
first_name=validated_data["first_name"],
last_name=validated_data["last_name"],
password=validated_data["password"]
)
return user
class AssignmentSerializer(serializers.ModelSerializer):
class Meta:
model = Assignment
fields = "__all__"
extra_kwargs = {"owner_teacher": {"read_only": True}}
class AssignmentCompletedSerializer(serializers.ModelSerializer):
class Meta:
model = AssignmentCompleted
fields = "__all__"
extra_kwargs = {
"owner_student": {"read_only": True}
}
class ClassroomSerializer(serializers.ModelSerializer):
class Meta:
model = Classroom
fields = "__all__"
extra_kwargs = {
"owner_teacher": {"read_only": True}
} | 0.722429 | 0.19112 |
import subprocess
import os
from db_handler import DBHandler, DataHandler
from defs import *
import util_functions
from file_scanner import FileScanner
import sys
import argparse
from csv import DictReader
def main():
try:
data_handler: DataHandler = DBHandler()
checker: Checker = Checker(data_handler)
# Create the parser
input_path = get_csv_path()
print("Welcome to Vulnerability Checker")
if input_path is None:
print("Scanning the all packages on machine for vulnerabilities...\n")
checker.scan_machine()
else:
print("Scanning the given packages for vulnerabilities...\n")
checker.scan_user_list(input_path)
except User_Exit_Interrupt:
print("\n\nUser stopped the scan. Exiting program...")
sys.exit(0)
except KeyboardInterrupt:
print("\n\nUser stopped the scan. Exiting program...")
sys.exit(0)
def get_csv_path():
# Create the parser
my_parser = argparse.ArgumentParser(
description='Vulnerability Checker is a python app that scans installed packages on Linux machines for vulnerabilities. The app is compatible for Debian Linux distributions as well as Red Hat Linux and utilizes DPKG and RPM package managers (respectively)')
my_parser.add_argument('Path',
metavar='path',
type=str,
help='Path to csv with package list to scan. CSV Format: < name >,< version >,< architecture >',
nargs='?')
args = my_parser.parse_args()
return args.Path
class Checker():
def __init__(self, data_handler: DataHandler):
self.PackageOrganized = DPKG_MAP if util_functions.is_debian() else RPM_MAP
self.file_scanner: FileScanner = FileScanner(
VUL_FUNC_PATH, STRINGS_PATH)
self.data_handler: DataHandler = data_handler
self.data_handler.create_tables()
self.vul_entry_list = []
self.strings_entry_list = []
def scan_machine(self):
libs = util_functions.get_installed_libraries_debian(
) if self.PackageOrganized["manager"] == "dpkg" else util_functions.get_installed_libraries_rpm
self.scan_library_list(libs)
def scan_user_list(self, csv_path):
with open(csv_path, 'r') as read_obj:
dict_reader = DictReader(read_obj, fieldnames=[
'name', 'version', 'architecture'])
list_of_dict = list(dict_reader)
# print list of dict i.e. rows
self.scan_library_list(list_of_dict)
def scan_library_list(self, libs: list):
if not isinstance(libs, list):
libs = libs()
for lib in libs:
# scan library returns list of vulnerabiliy entries and string entries
self.vul_entry_list = []
self.strings_entry_list = []
self.scan_library(lib)
if len(self.vul_entry_list) > 0:
self.data_handler.insert(
self.vul_entry_list, self.strings_entry_list)
def get_library_version(self, name):
""" gets the library version
Args:
name (str): library name
Returns:
str : version of the library
"""
flag = self.PackageOrganized["version"]
manager = self.PackageOrganized["manager"]
version = subprocess.run(f"{manager} {flag} {name} | grep 'Version'",
shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
version = ((version.split(":"))[1]).strip(' \t\n\r')
return version
def get_paths_files_in_library(self, name):
"""gets all files from a library
Args:
name (str): the name of the library
Returns:
list: list of paths
"""
manager = self.PackageOrganized["manager"]
flag = self.PackageOrganized["listOfFiles"]
output = subprocess.run(f"{manager} {flag} {name}", shell=True,
stdout=subprocess.PIPE).stdout.decode('utf-8')
path_list = output.split("\n")
path_list_filtered = list(
filter(lambda x: os.path.isfile(x), path_list))
return path_list_filtered
def scan_library(self, library):
"""does checkPath for each file in a library.
Args:
library (str): str that represent library name
Returns:
vul (list):
string (list):
"""
manager = self.PackageOrganized["manager"]
if manager == "dpkg":
library = library['name']
version = self.get_library_version(library)
if self.data_handler.should_scan_library(version, library, manager):
print(
f"scanning library {library}, version {version}, package manager = {manager}")
path_list = self.get_paths_files_in_library(library)
for path in path_list:
self.checkPath(path, library, version, manager)
else:
print(
f"skipping library {library}, version {version}, package manager = {manager}")
def exists_in_package(self, path: str):
for entry in self.vul_entry_list:
if entry[FULL_PATH] == path:
return True
return False
"""
row_dictionary: row- list of strings that represents row in csv to add
"""
def checkPath(self, path: str, package_name: str, version: str, package_manager: str):
"""receives path and package info. Returns entry for vul table and lines if relevant
Returns:
dictionary: all the information of the file for the DB
list: list of lines that has secert keys
"""
if not self.exists_in_package(path) and self.data_handler.should_check(path, version, package_name, package_manager):
path = path.strip(' \t\n\r')
if len(path) > 0:
entry_dict = {}
file_info, file_type = util_functions.get_file_type(path)
if file_type == "TEXT" or file_type == "ELF":
if file_type == "TEXT":
entry_dict, lines = self.file_scanner.txtFileChecker(
path)
# path + strings output + keys
else:
# ELF
entry_dict = self.file_scanner.scan_binary(path)
if entry_dict:
self.fill_entry_dict(
entry_dict, path, package_name, version, package_manager, file_info)
self.vul_entry_list.append(entry_dict)
if file_type == "TEXT":
line_list = []
for line in lines:
line_list.append({PACKAGE: entry_dict[PACKAGE],
PACKAGE_MANAGER: entry_dict[PACKAGE_MANAGER],
VERSION: entry_dict[VERSION],
FULL_PATH: entry_dict[FULL_PATH],
LINE: line})
if len(line_list) > 0:
self.strings_entry_list.extend(line_list)
elif (file_type == "LINK"):
pointed_to = util_functions.get_path_symb_link(path)
self.checkPath(pointed_to, package_name,
version, package_manager)
def fill_entry_dict(self, entry_dict: dict, path: str, package_name: str, version: str, package_manager: str, file_info: str):
entry_dict[PACKAGE_MANAGER] = package_manager
entry_dict[PACKAGE] = package_name
entry_dict[VERSION] = version
entry_dict[FULL_PATH] = path
path_file = os.path.dirname(path)
entry_dict[DIRECTORY] = os.path.basename(path_file)
entry_dict[FILE_NAME] = os.path.split(path)[-1]
entry_dict[FILE_OUTPUT] = file_info
hash_dict = self.file_scanner.generate_hashes(path)
entry_dict[SHA1] = hash_dict[SHA1]
entry_dict[SHA256] = hash_dict[SHA256]
entry_dict[MD5] = hash_dict[MD5]
entry_dict[B2] = hash_dict[B2]
if __name__ == '__main__':
main() | src/checker.py | import subprocess
import os
from db_handler import DBHandler, DataHandler
from defs import *
import util_functions
from file_scanner import FileScanner
import sys
import argparse
from csv import DictReader
def main():
try:
data_handler: DataHandler = DBHandler()
checker: Checker = Checker(data_handler)
# Create the parser
input_path = get_csv_path()
print("Welcome to Vulnerability Checker")
if input_path is None:
print("Scanning the all packages on machine for vulnerabilities...\n")
checker.scan_machine()
else:
print("Scanning the given packages for vulnerabilities...\n")
checker.scan_user_list(input_path)
except User_Exit_Interrupt:
print("\n\nUser stopped the scan. Exiting program...")
sys.exit(0)
except KeyboardInterrupt:
print("\n\nUser stopped the scan. Exiting program...")
sys.exit(0)
def get_csv_path():
# Create the parser
my_parser = argparse.ArgumentParser(
description='Vulnerability Checker is a python app that scans installed packages on Linux machines for vulnerabilities. The app is compatible for Debian Linux distributions as well as Red Hat Linux and utilizes DPKG and RPM package managers (respectively)')
my_parser.add_argument('Path',
metavar='path',
type=str,
help='Path to csv with package list to scan. CSV Format: < name >,< version >,< architecture >',
nargs='?')
args = my_parser.parse_args()
return args.Path
class Checker():
def __init__(self, data_handler: DataHandler):
self.PackageOrganized = DPKG_MAP if util_functions.is_debian() else RPM_MAP
self.file_scanner: FileScanner = FileScanner(
VUL_FUNC_PATH, STRINGS_PATH)
self.data_handler: DataHandler = data_handler
self.data_handler.create_tables()
self.vul_entry_list = []
self.strings_entry_list = []
def scan_machine(self):
libs = util_functions.get_installed_libraries_debian(
) if self.PackageOrganized["manager"] == "dpkg" else util_functions.get_installed_libraries_rpm
self.scan_library_list(libs)
def scan_user_list(self, csv_path):
with open(csv_path, 'r') as read_obj:
dict_reader = DictReader(read_obj, fieldnames=[
'name', 'version', 'architecture'])
list_of_dict = list(dict_reader)
# print list of dict i.e. rows
self.scan_library_list(list_of_dict)
def scan_library_list(self, libs: list):
if not isinstance(libs, list):
libs = libs()
for lib in libs:
# scan library returns list of vulnerabiliy entries and string entries
self.vul_entry_list = []
self.strings_entry_list = []
self.scan_library(lib)
if len(self.vul_entry_list) > 0:
self.data_handler.insert(
self.vul_entry_list, self.strings_entry_list)
def get_library_version(self, name):
""" gets the library version
Args:
name (str): library name
Returns:
str : version of the library
"""
flag = self.PackageOrganized["version"]
manager = self.PackageOrganized["manager"]
version = subprocess.run(f"{manager} {flag} {name} | grep 'Version'",
shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
version = ((version.split(":"))[1]).strip(' \t\n\r')
return version
def get_paths_files_in_library(self, name):
"""gets all files from a library
Args:
name (str): the name of the library
Returns:
list: list of paths
"""
manager = self.PackageOrganized["manager"]
flag = self.PackageOrganized["listOfFiles"]
output = subprocess.run(f"{manager} {flag} {name}", shell=True,
stdout=subprocess.PIPE).stdout.decode('utf-8')
path_list = output.split("\n")
path_list_filtered = list(
filter(lambda x: os.path.isfile(x), path_list))
return path_list_filtered
def scan_library(self, library):
"""does checkPath for each file in a library.
Args:
library (str): str that represent library name
Returns:
vul (list):
string (list):
"""
manager = self.PackageOrganized["manager"]
if manager == "dpkg":
library = library['name']
version = self.get_library_version(library)
if self.data_handler.should_scan_library(version, library, manager):
print(
f"scanning library {library}, version {version}, package manager = {manager}")
path_list = self.get_paths_files_in_library(library)
for path in path_list:
self.checkPath(path, library, version, manager)
else:
print(
f"skipping library {library}, version {version}, package manager = {manager}")
def exists_in_package(self, path: str):
for entry in self.vul_entry_list:
if entry[FULL_PATH] == path:
return True
return False
"""
row_dictionary: row- list of strings that represents row in csv to add
"""
def checkPath(self, path: str, package_name: str, version: str, package_manager: str):
"""receives path and package info. Returns entry for vul table and lines if relevant
Returns:
dictionary: all the information of the file for the DB
list: list of lines that has secert keys
"""
if not self.exists_in_package(path) and self.data_handler.should_check(path, version, package_name, package_manager):
path = path.strip(' \t\n\r')
if len(path) > 0:
entry_dict = {}
file_info, file_type = util_functions.get_file_type(path)
if file_type == "TEXT" or file_type == "ELF":
if file_type == "TEXT":
entry_dict, lines = self.file_scanner.txtFileChecker(
path)
# path + strings output + keys
else:
# ELF
entry_dict = self.file_scanner.scan_binary(path)
if entry_dict:
self.fill_entry_dict(
entry_dict, path, package_name, version, package_manager, file_info)
self.vul_entry_list.append(entry_dict)
if file_type == "TEXT":
line_list = []
for line in lines:
line_list.append({PACKAGE: entry_dict[PACKAGE],
PACKAGE_MANAGER: entry_dict[PACKAGE_MANAGER],
VERSION: entry_dict[VERSION],
FULL_PATH: entry_dict[FULL_PATH],
LINE: line})
if len(line_list) > 0:
self.strings_entry_list.extend(line_list)
elif (file_type == "LINK"):
pointed_to = util_functions.get_path_symb_link(path)
self.checkPath(pointed_to, package_name,
version, package_manager)
def fill_entry_dict(self, entry_dict: dict, path: str, package_name: str, version: str, package_manager: str, file_info: str):
entry_dict[PACKAGE_MANAGER] = package_manager
entry_dict[PACKAGE] = package_name
entry_dict[VERSION] = version
entry_dict[FULL_PATH] = path
path_file = os.path.dirname(path)
entry_dict[DIRECTORY] = os.path.basename(path_file)
entry_dict[FILE_NAME] = os.path.split(path)[-1]
entry_dict[FILE_OUTPUT] = file_info
hash_dict = self.file_scanner.generate_hashes(path)
entry_dict[SHA1] = hash_dict[SHA1]
entry_dict[SHA256] = hash_dict[SHA256]
entry_dict[MD5] = hash_dict[MD5]
entry_dict[B2] = hash_dict[B2]
if __name__ == '__main__':
main() | 0.476823 | 0.128443 |
import itertools
import uuid
import functools
from django.contrib.gis import geos
from django.contrib.gis.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, get_language
from django.contrib.sites.models import Site
from services.meta import TranslatableModel
from . import utils
def default_authentication_token():
return str(uuid.uuid4())
class ContentFetchingMixin(object):
def page_data(self, language='en'):
return None
def content(self, language='en'):
return None
def metadata(self, language='en'):
return None
class __ContentFetchingMixin(object):
def page_data(self, language='en'):
system_language = get_language()
if system_language:
language = system_language[:2]
page = utils.fetch_content(self.uri, language)
return page
def content(self, language='en'):
def sort_inherited(p):
if 'important' in p and p['important']:
if 'inherited' not in p or not p['inherited']:
return -1
else:
return 0
if 'inherited' in p and p['inherited']:
if 'position_hierarchy' in p and p['position_hierarchy'] == 'U':
return 1
else:
return 3
return 2
page = self.page_data(language)['content']
for p in page:
p['inherited'] = False
parents = [x.page_data(language) for x in self.parents]
parents = [x['content'] for x in parents if 'content' in x]
parents = [list(filter(lambda x: 'inherited' in x and x['inherited'], (p or []))) for p in parents]
parents.append(page)
page = functools.reduce(lambda a, b: (a or []) + (b or []), parents, [])
page = sorted(page, key=sort_inherited)
for i, k in enumerate(page):
k['index'] = i
return page
def metadata(self, language='en'):
import functools
parents = [x.page_data(language) for x in self.parents]
parents = [x['metadata']['banners'] for x in parents if 'metadata' in x and 'banners' in x['metadata']]
metadata = self.page_data(language)['metadata']
metadata['banners'] = functools.reduce(lambda a, b: (a or []) + (b or []), parents, []) + (
metadata['banners'] if 'banners' in metadata else []
)
return metadata
class GeographicRegion(TranslatableModel, models.Model, ContentFetchingMixin):
"""Common model to represent levels 1, 2, 3"""
__translatable__ = {
"title": lambda l: models.CharField(
_("Title in {LANGUAGE_NAME}".format(**l)),
max_length=256,
default='',
blank=True,
),
}
level = models.IntegerField(
choices=[
(1, _('Country')),
(2, _('Region')),
(3, _('City')),
]
)
geom = models.MultiPolygonField(srid=4326,blank=True,
null=True,)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
name = models.CharField(max_length=256, default='')
slug = models.CharField(max_length=100, default='')
code = models.CharField(max_length=16, blank=True)
hidden = models.BooleanField(default=False)
languages_available = models.CharField(
max_length=300,
blank=True,
help_text=_('Comma separated values of languages available in this region')
)
restrict_access_to = models.TextField(
null=True,
blank=True,
help_text=_('Comma separated values of code of siblings visible from this region')
)
site = models.ForeignKey(Site, related_name='+', null=True, blank=True)
objects = models.GeoManager()
def __str__(self):
return "%s %s" % (self.get_level_display(), self.name)
@property
def centroid(self):
return self.geom.centroid
@property
def depth(self):
return len(list(self.parents))
@property
def parents(self):
me = self
while me.parent:
me = me.parent
yield me
@property
def important_information(self):
pages = [{
"id": p.page.id,
"slug": p.page.slug,
"code": p.page.slug,
"title": p.page.title,
"name": p.page.title,
"hidden": False,
"metadata": {"page_title": p.page.title,},
"content": [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(),
"metadata": {},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
}] + [{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": z,
"inherited": False,
} for z, sp in enumerate(p.page.get_sub_sections())
]
} for i, p in enumerate(self.pages_with_order.filter(page__important=True).order_by('index'))]
return pages
@property
def full_slug(self):
return "--".join(reversed([self.slug] + [p.slug for p in self.parents]))
@property
def uri(self):
return "/".join(reversed([self.slug] + [p.slug for p in self.parents if p.level != 2])) + '/'
def get_all_languages(self):
return set([])
def get_sections(self, language='en', environment='production'):
pages = [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(language),
"metadata": {
"page_title": p.page.title
},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
} for i, p in enumerate(
self.pages_with_order.filter(page__important=False, page__banner=False, page__status=environment).order_by(
'index'))]
page_like_objects = [
[
{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": i,
"inherited": False,
}
for i, sp in enumerate(p.page.get_sub_sections(language))
]
for p in self.pages_with_order.filter(page__important=False, page__banner=False, page__status=environment)
]
return pages + list(itertools.chain.from_iterable(page_like_objects))
def get_sub_pages(self, environment='production'):
pages = [{
"id": p.page.id,
"slug": p.page.slug,
"code": p.page.slug,
"title": p.page.title,
"name": p.page.title,
"hidden": False,
"metadata": {"page_title": p.page.title,},
"content": [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(),
"metadata": {},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
}] + [{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": z,
"inherited": False,
} for z, sp in enumerate(p.page.get_sub_sections())
]
} for i, p in enumerate(
self.pages_with_order.filter(page__important=True, page__banner=False, page__status=environment).order_by(
'index'))]
return pages
def metadata(self, language='en', environment='production'):
banners = self.pages_with_order.filter(page__banner=True, page__status=environment)
return {
"banners": [p.page.html() for p in banners],
"page_title": self.title
}
def get_all_children(self):
return GeographicRegion.objects.filter(Q(parent=self) | Q(parent__parent=self) | Q(id=self.id))
class Meta:
ordering = ['level', 'name']
class ImportantInformation(TranslatableModel, models.Model, ContentFetchingMixin):
"""
Model to reflect the content available in the platform that may or may not be tied to a geographic location
"""
__translatable__ = {
"title": lambda l: models.CharField(
_("Title in {LANGUAGE_NAME}".format(**l)),
max_length=256,
default='',
blank=True,
),
}
region = models.ForeignKey(GeographicRegion, related_name='+', null=True, blank=True)
name = models.CharField(max_length=256, blank=True)
slug = models.CharField(max_length=100, blank=True, null=True)
code = models.CharField(max_length=16, blank=True, help_text=_('Used to sort the important information'))
icon = models.CharField(max_length=256, blank=True, null=True)
hidden = models.BooleanField(default=False)
@property
def parents(self):
if self.region:
yield self.region
for p in self.region.parents:
if p.level != 2:
yield p
@property
def full_slug(self):
return "--".join(reversed([self.slug] + [p.slug for p in self.parents]))
@property
def uri(self):
return "/".join(reversed([self.slug] + [p.slug for p in self.parents])) + '/'
def content(self, language='en'):
return []
class IPGeoLocationManager(models.Manager):
def find_by_ip(self, ip):
import textwrap
import ipaddress
if ':' in ip:
# v6
separator = ':'
ip_type = 'v6'
ip = ipaddress.IPv6Address(ip).exploded
bits = "".join(["{0:08b}".format(int(x, 16)) for x in ip.split(separator)])
else:
separator = '.'
ip_type = 'v4'
ip = ipaddress.IPv4Address(ip).exploded
bits = "".join(["{0:08b}".format(int(x)) for x in ip.split(separator)])
all_networks = []
for a in range(0, len(bits)):
if ip_type == 'v4':
ip_network = [str(int(b, 2)) for b in textwrap.wrap(bits[0:a].ljust(32, '0'), 8)]
else:
ip_network = [str(int(b, 2)) for b in textwrap.wrap(bits[0:a].ljust(128, '0'), 16)]
all_networks.append("{}/{}".format(separator.join(ip_network), str(a)))
all_networks = reversed(all_networks)
qs = self.get_queryset().filter(network__in=all_networks, type=ip_type)
if not qs:
return None
if qs:
return qs[0]
def find_region_by_ip(self, ip):
network = self.find_by_ip(ip)
if not network:
return None
point = geos.Point(float(network.longitude), float(network.latitude), srid=4326)
regions = GeographicRegion.objects.filter(geom__contains=point)
if not regions:
return None
regions = sorted(regions, key=lambda r: r.level, reverse=True)
return regions[0]
class IPGeoLocation(models.Model):
"""
Map of IP addresses and country. Does not need to be that accurate.
Source: http://dev.maxmind.com/geoip/geoip2/geolite2/
Data from CSV
network,geoname_id,registered_country_geoname_id,represented_country_geoname_id,is_anonymous_proxy,is_satellite_provider,postal_code,latitude,longitude
"""
network = models.CharField(max_length=50, blank=True, null=True, db_index=True)
geoname_id = models.IntegerField(default=0)
registered_country_geoname_id = models.IntegerField(default=0)
represented_country_geoname_id = models.IntegerField(default=0)
is_anonymous_proxy = models.NullBooleanField(default=False)
is_satellite_provider = models.NullBooleanField(default=False)
postal_code = models.CharField(max_length=50, blank=True, null=True)
latitude = models.DecimalField(max_digits=17, decimal_places=14, default=0, null=True)
longitude = models.DecimalField(max_digits=17, decimal_places=14, default=0, null=True)
type = models.CharField(max_length=2, choices=(('v4', 'IPV4'), ('v6', 'IPV6'),), default='v4')
objects = IPGeoLocationManager()
class ContentRate(models.Model):
region = models.ForeignKey(GeographicRegion, null=True, blank=True)
content_index = models.IntegerField(null=True, blank=True)
content_slug = models.CharField(max_length=250, null=True, blank=True)
thumbs_up = models.PositiveIntegerField(default=0)
thumbs_down = models.PositiveIntegerField(default=0)
class Meta:
unique_together = ('region', 'content_index', 'content_slug') | regions/models.py | import itertools
import uuid
import functools
from django.contrib.gis import geos
from django.contrib.gis.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _, get_language
from django.contrib.sites.models import Site
from services.meta import TranslatableModel
from . import utils
def default_authentication_token():
return str(uuid.uuid4())
class ContentFetchingMixin(object):
def page_data(self, language='en'):
return None
def content(self, language='en'):
return None
def metadata(self, language='en'):
return None
class __ContentFetchingMixin(object):
def page_data(self, language='en'):
system_language = get_language()
if system_language:
language = system_language[:2]
page = utils.fetch_content(self.uri, language)
return page
def content(self, language='en'):
def sort_inherited(p):
if 'important' in p and p['important']:
if 'inherited' not in p or not p['inherited']:
return -1
else:
return 0
if 'inherited' in p and p['inherited']:
if 'position_hierarchy' in p and p['position_hierarchy'] == 'U':
return 1
else:
return 3
return 2
page = self.page_data(language)['content']
for p in page:
p['inherited'] = False
parents = [x.page_data(language) for x in self.parents]
parents = [x['content'] for x in parents if 'content' in x]
parents = [list(filter(lambda x: 'inherited' in x and x['inherited'], (p or []))) for p in parents]
parents.append(page)
page = functools.reduce(lambda a, b: (a or []) + (b or []), parents, [])
page = sorted(page, key=sort_inherited)
for i, k in enumerate(page):
k['index'] = i
return page
def metadata(self, language='en'):
import functools
parents = [x.page_data(language) for x in self.parents]
parents = [x['metadata']['banners'] for x in parents if 'metadata' in x and 'banners' in x['metadata']]
metadata = self.page_data(language)['metadata']
metadata['banners'] = functools.reduce(lambda a, b: (a or []) + (b or []), parents, []) + (
metadata['banners'] if 'banners' in metadata else []
)
return metadata
class GeographicRegion(TranslatableModel, models.Model, ContentFetchingMixin):
"""Common model to represent levels 1, 2, 3"""
__translatable__ = {
"title": lambda l: models.CharField(
_("Title in {LANGUAGE_NAME}".format(**l)),
max_length=256,
default='',
blank=True,
),
}
level = models.IntegerField(
choices=[
(1, _('Country')),
(2, _('Region')),
(3, _('City')),
]
)
geom = models.MultiPolygonField(srid=4326,blank=True,
null=True,)
parent = models.ForeignKey('self', related_name='children', null=True, blank=True)
name = models.CharField(max_length=256, default='')
slug = models.CharField(max_length=100, default='')
code = models.CharField(max_length=16, blank=True)
hidden = models.BooleanField(default=False)
languages_available = models.CharField(
max_length=300,
blank=True,
help_text=_('Comma separated values of languages available in this region')
)
restrict_access_to = models.TextField(
null=True,
blank=True,
help_text=_('Comma separated values of code of siblings visible from this region')
)
site = models.ForeignKey(Site, related_name='+', null=True, blank=True)
objects = models.GeoManager()
def __str__(self):
return "%s %s" % (self.get_level_display(), self.name)
@property
def centroid(self):
return self.geom.centroid
@property
def depth(self):
return len(list(self.parents))
@property
def parents(self):
me = self
while me.parent:
me = me.parent
yield me
@property
def important_information(self):
pages = [{
"id": p.page.id,
"slug": p.page.slug,
"code": p.page.slug,
"title": p.page.title,
"name": p.page.title,
"hidden": False,
"metadata": {"page_title": p.page.title,},
"content": [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(),
"metadata": {},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
}] + [{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": z,
"inherited": False,
} for z, sp in enumerate(p.page.get_sub_sections())
]
} for i, p in enumerate(self.pages_with_order.filter(page__important=True).order_by('index'))]
return pages
@property
def full_slug(self):
return "--".join(reversed([self.slug] + [p.slug for p in self.parents]))
@property
def uri(self):
return "/".join(reversed([self.slug] + [p.slug for p in self.parents if p.level != 2])) + '/'
def get_all_languages(self):
return set([])
def get_sections(self, language='en', environment='production'):
pages = [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(language),
"metadata": {
"page_title": p.page.title
},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
} for i, p in enumerate(
self.pages_with_order.filter(page__important=False, page__banner=False, page__status=environment).order_by(
'index'))]
page_like_objects = [
[
{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": i,
"inherited": False,
}
for i, sp in enumerate(p.page.get_sub_sections(language))
]
for p in self.pages_with_order.filter(page__important=False, page__banner=False, page__status=environment)
]
return pages + list(itertools.chain.from_iterable(page_like_objects))
def get_sub_pages(self, environment='production'):
pages = [{
"id": p.page.id,
"slug": p.page.slug,
"code": p.page.slug,
"title": p.page.title,
"name": p.page.title,
"hidden": False,
"metadata": {"page_title": p.page.title,},
"content": [{
"vector_icon": p.page.icon,
"hide_from_toc": p.page.pop_up,
"section": p.page.html(),
"metadata": {},
"title": p.page.title,
"important": False,
"anchor_name": p.page.slug,
"index": i,
"inherited": False,
}] + [{
"vector_icon": "",
"hide_from_toc": True,
"section": sp['html'],
"metadata": {
"page_title": sp['title']
},
"title": sp['title'],
"important": False,
"anchor_name": sp['slug'],
"index": z,
"inherited": False,
} for z, sp in enumerate(p.page.get_sub_sections())
]
} for i, p in enumerate(
self.pages_with_order.filter(page__important=True, page__banner=False, page__status=environment).order_by(
'index'))]
return pages
def metadata(self, language='en', environment='production'):
banners = self.pages_with_order.filter(page__banner=True, page__status=environment)
return {
"banners": [p.page.html() for p in banners],
"page_title": self.title
}
def get_all_children(self):
return GeographicRegion.objects.filter(Q(parent=self) | Q(parent__parent=self) | Q(id=self.id))
class Meta:
ordering = ['level', 'name']
class ImportantInformation(TranslatableModel, models.Model, ContentFetchingMixin):
"""
Model to reflect the content available in the platform that may or may not be tied to a geographic location
"""
__translatable__ = {
"title": lambda l: models.CharField(
_("Title in {LANGUAGE_NAME}".format(**l)),
max_length=256,
default='',
blank=True,
),
}
region = models.ForeignKey(GeographicRegion, related_name='+', null=True, blank=True)
name = models.CharField(max_length=256, blank=True)
slug = models.CharField(max_length=100, blank=True, null=True)
code = models.CharField(max_length=16, blank=True, help_text=_('Used to sort the important information'))
icon = models.CharField(max_length=256, blank=True, null=True)
hidden = models.BooleanField(default=False)
@property
def parents(self):
if self.region:
yield self.region
for p in self.region.parents:
if p.level != 2:
yield p
@property
def full_slug(self):
return "--".join(reversed([self.slug] + [p.slug for p in self.parents]))
@property
def uri(self):
return "/".join(reversed([self.slug] + [p.slug for p in self.parents])) + '/'
def content(self, language='en'):
return []
class IPGeoLocationManager(models.Manager):
def find_by_ip(self, ip):
import textwrap
import ipaddress
if ':' in ip:
# v6
separator = ':'
ip_type = 'v6'
ip = ipaddress.IPv6Address(ip).exploded
bits = "".join(["{0:08b}".format(int(x, 16)) for x in ip.split(separator)])
else:
separator = '.'
ip_type = 'v4'
ip = ipaddress.IPv4Address(ip).exploded
bits = "".join(["{0:08b}".format(int(x)) for x in ip.split(separator)])
all_networks = []
for a in range(0, len(bits)):
if ip_type == 'v4':
ip_network = [str(int(b, 2)) for b in textwrap.wrap(bits[0:a].ljust(32, '0'), 8)]
else:
ip_network = [str(int(b, 2)) for b in textwrap.wrap(bits[0:a].ljust(128, '0'), 16)]
all_networks.append("{}/{}".format(separator.join(ip_network), str(a)))
all_networks = reversed(all_networks)
qs = self.get_queryset().filter(network__in=all_networks, type=ip_type)
if not qs:
return None
if qs:
return qs[0]
def find_region_by_ip(self, ip):
network = self.find_by_ip(ip)
if not network:
return None
point = geos.Point(float(network.longitude), float(network.latitude), srid=4326)
regions = GeographicRegion.objects.filter(geom__contains=point)
if not regions:
return None
regions = sorted(regions, key=lambda r: r.level, reverse=True)
return regions[0]
class IPGeoLocation(models.Model):
"""
Map of IP addresses and country. Does not need to be that accurate.
Source: http://dev.maxmind.com/geoip/geoip2/geolite2/
Data from CSV
network,geoname_id,registered_country_geoname_id,represented_country_geoname_id,is_anonymous_proxy,is_satellite_provider,postal_code,latitude,longitude
"""
network = models.CharField(max_length=50, blank=True, null=True, db_index=True)
geoname_id = models.IntegerField(default=0)
registered_country_geoname_id = models.IntegerField(default=0)
represented_country_geoname_id = models.IntegerField(default=0)
is_anonymous_proxy = models.NullBooleanField(default=False)
is_satellite_provider = models.NullBooleanField(default=False)
postal_code = models.CharField(max_length=50, blank=True, null=True)
latitude = models.DecimalField(max_digits=17, decimal_places=14, default=0, null=True)
longitude = models.DecimalField(max_digits=17, decimal_places=14, default=0, null=True)
type = models.CharField(max_length=2, choices=(('v4', 'IPV4'), ('v6', 'IPV6'),), default='v4')
objects = IPGeoLocationManager()
class ContentRate(models.Model):
region = models.ForeignKey(GeographicRegion, null=True, blank=True)
content_index = models.IntegerField(null=True, blank=True)
content_slug = models.CharField(max_length=250, null=True, blank=True)
thumbs_up = models.PositiveIntegerField(default=0)
thumbs_down = models.PositiveIntegerField(default=0)
class Meta:
unique_together = ('region', 'content_index', 'content_slug') | 0.461017 | 0.228759 |
import ROOT
import itertools
import Analysis
import AnalysisHelpers as AH
import Constants
#======================================================================
class WZAnalysis(Analysis.Analysis):
"""Analysis searching for the pair production of WZ with both boson decaying to leptons"""
def __init__(self, store):
super(WZAnalysis, self).__init__(store)
def initialize(self):
self.invMass = self.addStandardHistogram("invMass")
self.WtMass = self.addStandardHistogram("WtMass")
self.hist_leptn = self.addStandardHistogram("lep_n")
self.hist_leptpt = self.addStandardHistogram("lep_pt")
self.hist_lepteta = self.addStandardHistogram("lep_eta")
self.hist_leptE = self.addStandardHistogram("lep_E")
self.hist_leptphi = self.addStandardHistogram("lep_phi")
self.hist_leptch = self.addStandardHistogram("lep_charge")
self.hist_leptID = self.addStandardHistogram("lep_type")
self.hist_leptptc = self.addStandardHistogram("lep_ptconerel30")
self.hist_leptetc = self.addStandardHistogram("lep_etconerel20")
self.hist_lepz0 = self.addStandardHistogram("lep_z0")
self.hist_lepd0 = self.addStandardHistogram("lep_d0")
self.hist_etmiss = self.addStandardHistogram("etmiss")
self.hist_vxp_z = self.addStandardHistogram("vxp_z")
self.hist_pvxp_n = self.addStandardHistogram("pvxp_n")
def ZWindow(self, lep1, lep2):
return abs((lep1.tlv()+lep2.tlv()).M() - Constants.Z_Mass)
def TestWZCandidate(self, candidate):
return self.ZWindow(candidate[0], candidate[1])
def WZCandidate(self, leptons):
def isValidCandidate(lep1, lep2):
if lep1.charge()*lep2.charge() > 0: return False
if abs(lep1.pdgId()) != abs(lep2.pdgId()): return False
return True
bestCandidate = None
for p in itertools.permutations(leptons, 3):
if not isValidCandidate(p[0], p[1]): continue
if bestCandidate is None:
bestCandidate = p
if self.TestWZCandidate(p) < self.TestWZCandidate(bestCandidate):
bestCandidate = p
return bestCandidate
def analyze(self):
# retrieving objects
eventinfo = self.Store.getEventInfo()
weight = eventinfo.scalefactor()*eventinfo.eventWeight() if not self.getIsData() else 1
# apply standard event based selection
if not AH.StandardEventCuts(eventinfo): return False
self.countEvent("EventCuts", weight)
# Lepton Requirements
goodLeptons = AH.selectAndSortContainer(self.Store.getLeptons(), AH.isGoodLepton, lambda p: p.pt())
if not (len(goodLeptons) == 3): return False
self.countEvent("3 high pt Leptons", weight)
# find candidate for WZ system
candidate = self.WZCandidate(goodLeptons)
if candidate is None: return False;
z1Lepton = candidate[0]
z2Lepton = candidate[1]
wLepton = candidate[2]
etmiss = self.Store.getEtMiss()
# test candidate for WZ system
if not self.ZWindow(z1Lepton, z2Lepton) < -999: return False;# TO DO: Find a good value for this cut
if not AH.WTransverseMass(wLepton, etmiss) > -999: return False;# TO DO: Find a good value for this cut
# histograms for missing et
self.hist_etmiss.Fill(etmiss.et(),weight)
# vertex histograms
self.hist_vxp_z.Fill(eventinfo.primaryVertexPosition(), weight)
self.hist_pvxp_n.Fill(eventinfo.numberOfVertices(), weight)
# WZ system histograms
self.invMass.Fill((z1Lepton.tlv() + z2Lepton.tlv()).M(), weight)
self.WtMass.Fill(AH.WTransverseMass(wLepton, etmiss), weight)
# lepton histograms
self.hist_leptn.Fill(len(goodLeptons), weight)
[self.hist_leptpt.Fill(lep.pt(), weight) for lep in goodLeptons]
[self.hist_lepteta.Fill(lep.eta(), weight) for lep in goodLeptons]
[self.hist_leptE.Fill(lep.e(), weight) for lep in goodLeptons]
[self.hist_leptphi.Fill(lep.phi(), weight) for lep in goodLeptons]
[self.hist_leptch.Fill(lep.charge(), weight) for lep in goodLeptons]
[self.hist_leptID.Fill(lep.pdgId(), weight) for lep in goodLeptons]
[self.hist_leptptc.Fill(lep.isoptconerel30(), weight) for lep in goodLeptons]
[self.hist_leptetc.Fill(lep.isoetconerel20(), weight) for lep in goodLeptons]
[self.hist_lepz0.Fill(lep.z0(), weight) for lep in goodLeptons]
[self.hist_lepd0.Fill(lep.d0(), weight) for lep in goodLeptons]
return True
def finalize(self):
pass | atlas-outreach-data-tools-framework-1.1/Analysis/WZAnalysis.py | import ROOT
import itertools
import Analysis
import AnalysisHelpers as AH
import Constants
#======================================================================
class WZAnalysis(Analysis.Analysis):
"""Analysis searching for the pair production of WZ with both boson decaying to leptons"""
def __init__(self, store):
super(WZAnalysis, self).__init__(store)
def initialize(self):
self.invMass = self.addStandardHistogram("invMass")
self.WtMass = self.addStandardHistogram("WtMass")
self.hist_leptn = self.addStandardHistogram("lep_n")
self.hist_leptpt = self.addStandardHistogram("lep_pt")
self.hist_lepteta = self.addStandardHistogram("lep_eta")
self.hist_leptE = self.addStandardHistogram("lep_E")
self.hist_leptphi = self.addStandardHistogram("lep_phi")
self.hist_leptch = self.addStandardHistogram("lep_charge")
self.hist_leptID = self.addStandardHistogram("lep_type")
self.hist_leptptc = self.addStandardHistogram("lep_ptconerel30")
self.hist_leptetc = self.addStandardHistogram("lep_etconerel20")
self.hist_lepz0 = self.addStandardHistogram("lep_z0")
self.hist_lepd0 = self.addStandardHistogram("lep_d0")
self.hist_etmiss = self.addStandardHistogram("etmiss")
self.hist_vxp_z = self.addStandardHistogram("vxp_z")
self.hist_pvxp_n = self.addStandardHistogram("pvxp_n")
def ZWindow(self, lep1, lep2):
return abs((lep1.tlv()+lep2.tlv()).M() - Constants.Z_Mass)
def TestWZCandidate(self, candidate):
return self.ZWindow(candidate[0], candidate[1])
def WZCandidate(self, leptons):
def isValidCandidate(lep1, lep2):
if lep1.charge()*lep2.charge() > 0: return False
if abs(lep1.pdgId()) != abs(lep2.pdgId()): return False
return True
bestCandidate = None
for p in itertools.permutations(leptons, 3):
if not isValidCandidate(p[0], p[1]): continue
if bestCandidate is None:
bestCandidate = p
if self.TestWZCandidate(p) < self.TestWZCandidate(bestCandidate):
bestCandidate = p
return bestCandidate
def analyze(self):
# retrieving objects
eventinfo = self.Store.getEventInfo()
weight = eventinfo.scalefactor()*eventinfo.eventWeight() if not self.getIsData() else 1
# apply standard event based selection
if not AH.StandardEventCuts(eventinfo): return False
self.countEvent("EventCuts", weight)
# Lepton Requirements
goodLeptons = AH.selectAndSortContainer(self.Store.getLeptons(), AH.isGoodLepton, lambda p: p.pt())
if not (len(goodLeptons) == 3): return False
self.countEvent("3 high pt Leptons", weight)
# find candidate for WZ system
candidate = self.WZCandidate(goodLeptons)
if candidate is None: return False;
z1Lepton = candidate[0]
z2Lepton = candidate[1]
wLepton = candidate[2]
etmiss = self.Store.getEtMiss()
# test candidate for WZ system
if not self.ZWindow(z1Lepton, z2Lepton) < -999: return False;# TO DO: Find a good value for this cut
if not AH.WTransverseMass(wLepton, etmiss) > -999: return False;# TO DO: Find a good value for this cut
# histograms for missing et
self.hist_etmiss.Fill(etmiss.et(),weight)
# vertex histograms
self.hist_vxp_z.Fill(eventinfo.primaryVertexPosition(), weight)
self.hist_pvxp_n.Fill(eventinfo.numberOfVertices(), weight)
# WZ system histograms
self.invMass.Fill((z1Lepton.tlv() + z2Lepton.tlv()).M(), weight)
self.WtMass.Fill(AH.WTransverseMass(wLepton, etmiss), weight)
# lepton histograms
self.hist_leptn.Fill(len(goodLeptons), weight)
[self.hist_leptpt.Fill(lep.pt(), weight) for lep in goodLeptons]
[self.hist_lepteta.Fill(lep.eta(), weight) for lep in goodLeptons]
[self.hist_leptE.Fill(lep.e(), weight) for lep in goodLeptons]
[self.hist_leptphi.Fill(lep.phi(), weight) for lep in goodLeptons]
[self.hist_leptch.Fill(lep.charge(), weight) for lep in goodLeptons]
[self.hist_leptID.Fill(lep.pdgId(), weight) for lep in goodLeptons]
[self.hist_leptptc.Fill(lep.isoptconerel30(), weight) for lep in goodLeptons]
[self.hist_leptetc.Fill(lep.isoetconerel20(), weight) for lep in goodLeptons]
[self.hist_lepz0.Fill(lep.z0(), weight) for lep in goodLeptons]
[self.hist_lepd0.Fill(lep.d0(), weight) for lep in goodLeptons]
return True
def finalize(self):
pass | 0.458349 | 0.17259 |
import torch, os, time, random, generator, discri, classify, utils
import numpy as np
import torch.nn as nn
import torchvision.utils as tvls
import torch.nn.functional as F
from utils import log_sum_exp, save_tensor_images
from torch.autograd import Variable
import torch.optim as optim
import torch.autograd as autograd
import statistics
import torch.distributions as tdist
device = "cuda"
num_classes = 1000
save_img_dir = './res_all' # all attack imgs
os.makedirs(save_img_dir, exist_ok=True)
success_dir = './res_success'
os.makedirs(success_dir, exist_ok=True)
def reparameterize(mu, logvar):
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def dist_inversion_multi_targets(G, D, T, E, iden, itr, lr=2e-2, momentum=0.9, lamda=100, iter_times=1500, clip_range=1, improved=False, num_seeds=5):
iden = iden.view(-1).long().cuda()
criterion = nn.CrossEntropyLoss().cuda()
bs = iden.shape[0]
G.eval()
D.eval()
for model_idx in range(len(T)):
T[model_idx][0].eval()
E.eval()
no = torch.zeros(bs) # index for saving all success attack images
tf = time.time()
#NOTE
mu = Variable(torch.zeros(bs, 100), requires_grad=True)
log_var = Variable(torch.ones(bs, 100), requires_grad=True)
params = [mu, log_var]
solver = optim.Adam(params, lr=lr)
# scheduler = torch.optim.lr_scheduler.StepLR(solver, 1800, gamma=0.1)
for i in range(iter_times):
z = reparameterize(mu, log_var)
fake = G(z)
if improved == True:
_, label = D(fake)
else:
label = D(fake)
#get the ouput of all targets
out=[]
for model_idx in range(len(T)):
target_model, model_weight = T[model_idx]
current_out = target_model(fake)[-1]
out.append([current_out, model_weight])
for p in params:
if p.grad is not None:
p.grad.data.zero_()
if improved:
Prior_Loss = torch.mean(F.softplus(log_sum_exp(label))) - torch.mean(log_sum_exp(label))
else:
Prior_Loss = - label.mean()
Iden_Loss = []
for t_model_out, model_weight in out:
current_model_loss = model_weight * criterion(t_model_out.float(), iden)
Iden_Loss.append(current_model_loss)
Iden_Loss = sum(Iden_Loss)
Total_Loss = Prior_Loss + lamda * Iden_Loss
Total_Loss.backward()
solver.step()
z = torch.clamp(z.detach(), -clip_range, clip_range).float()
Prior_Loss_val = Prior_Loss.item()
Iden_Loss_val = Iden_Loss.item()
if (i+1) % 1 == 0:
fake_img = G(z.detach())
eval_prob = E(utils.low2high(fake_img))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs
print("Iteration:{}\tPrior Loss:{:.2f}\tIden Loss:{:.2f}\tAttack Acc:{:.2f}".format(i+1, Prior_Loss_val, Iden_Loss_val, acc))
interval = time.time() - tf
print("Time:{:.2f}".format(interval))
res = []
res5 = []
seed_acc = torch.zeros((bs, 5))
for random_seed in range(num_seeds):
tf = time.time()
z = reparameterize(mu, log_var)
fake = G(z)
score = T(fake)[-1]
eval_prob = E(utils.low2high(fake))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
cnt, cnt5 = 0, 0
for i in range(bs):
gt = iden[i].item()
sample = fake[i]
save_tensor_images(sample.detach(), os.path.join(save_img_dir, "attack_iden_{}_{}.png".format(gt+1, random_seed)))
if eval_iden[i].item() == gt:
seed_acc[i, random_seed] = 1
cnt += 1
best_img = G(z)[i]
save_tensor_images(best_img.detach(), os.path.join(success_dir, "{}_attack_iden_{}_{}.png".format(itr, gt+1, int(no[i]))))
no[i] += 1
_, top5_idx = torch.topk(eval_prob[i], 5)
if gt in top5_idx:
cnt5 += 1
interval = time.time() - tf
print("Time:{:.2f}\tSeed:{}\tAcc:{:.2f}\t".format(interval, random_seed, cnt * 1.0 / bs))
res.append(cnt * 1.0 / bs)
res5.append(cnt5 * 1.0 / bs)
torch.cuda.empty_cache()
acc, acc_5 = statistics.mean(res), statistics.mean(res5)
acc_var = statistics.variance(res)
acc_var5 = statistics.variance(res5)
print("Acc:{:.2f}\tAcc_5:{:.2f}\tAcc_var:{:.4f}\tAcc_var5:{:.4f}".format(acc, acc_5, acc_var, acc_var5))
return acc, acc_5, acc_var, acc_var5
def inversion_multi_targets(G, D, T, E, iden, itr, lr=2e-2, momentum=0.9, lamda=100, iter_times=1500, clip_range=1, improved=False, num_seeds=5):
iden = iden.view(-1).long().cuda()
criterion = nn.CrossEntropyLoss().cuda()
bs = iden.shape[0]
G.eval()
D.eval()
for model_idx in range(len(T)):
T[model_idx][0].eval()
E.eval()
flag = torch.zeros(bs)
no = torch.zeros(bs) # index for saving all success attack images
res = []
res5 = []
seed_acc = torch.zeros((bs, 5))
for random_seed in range(num_seeds):
tf = time.time()
r_idx = random_seed
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
z = torch.randn(bs, 100).cuda().float()
z.requires_grad = True
v = torch.zeros(bs, 100).cuda().float()
for i in range(iter_times):
fake = G(z)
if improved == True:
_, label = D(fake)
else:
label = D(fake)
#get the ouput of all targets
out=[]
for model_idx in range(len(T)):
target_model, model_weight = T[model_idx]
current_out = target_model(fake)[-1]
out.append([current_out, model_weight])
if z.grad is not None:
z.grad.data.zero_()
if improved:
Prior_Loss = torch.mean(F.softplus(log_sum_exp(label))) - torch.mean(log_sum_exp(label))
else:
Prior_Loss = - label.mean()
Iden_Loss = []
for t_model_out, model_weight in out:
current_model_loss = model_weight * criterion(t_model_out.float(), iden)
Iden_Loss.append(current_model_loss)
Iden_Loss = sum(Iden_Loss)
Total_Loss = Prior_Loss + lamda * Iden_Loss
Total_Loss.backward()
v_prev = v.clone()
gradient = z.grad.data
v = momentum * v - lr * gradient
z = z + ( - momentum * v_prev + (1 + momentum) * v)
z = torch.clamp(z.detach(), -clip_range, clip_range).float()
z.requires_grad = True
Prior_Loss_val = Prior_Loss.item()
Iden_Loss_val = Iden_Loss.item()
if (i+1) % 300 == 0:
fake_img = G(z.detach())
eval_prob = E(utils.low2high(fake_img))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs
print("Iteration:{}\tPrior Loss:{:.2f}\tIden Loss:{:.2f}\tAttack Acc:{:.2f}".format(i+1, Prior_Loss_val, Iden_Loss_val, acc))
fake = G(z)
score = T(fake)[-1]
eval_prob = E(utils.low2high(fake))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
cnt, cnt5 = 0, 0
for i in range(bs):
gt = iden[i].item()
sample = G(z)[i]
# save_tensor_images(sample.detach(), os.path.join(save_img_dir, "attack_iden_{}_{}.png".format(gt+1, r_idx)))
if eval_iden[i].item() == gt:
seed_acc[i, r_idx] = 1
cnt += 1
flag[i] = 1
best_img = G(z)[i]
# save_tensor_images(best_img.detach(), os.path.join(success_dir, "{}_attack_iden_{}_{}.png".format(itr, iden[0]+i+1, int(no[i]))))
no[i] += 1
_, top5_idx = torch.topk(eval_prob[i], 5)
if gt in top5_idx:
cnt5 += 1
interval = time.time() - tf
print("Time:{:.2f}\tAcc:{:.2f}\t".format(interval, cnt * 1.0 / bs))
res.append(cnt * 1.0 / bs)
res5.append(cnt5 * 1.0 / bs)
torch.cuda.empty_cache()
acc, acc_5 = statistics.mean(res), statistics.mean(res5)
acc_var = statistics.variance(res)
acc_var5 = statistics.variance(res5)
print("Acc:{:.2f}\tAcc_5:{:.2f}\tAcc_var:{:.4f}\tAcc_var5:{:.4f}".format(acc, acc_5, acc_var, acc_var5))
print("seeds variance:", seed_var)
return acc, acc_5, acc_var, acc_var5 | multi-target/attack_multi_targets.py | import torch, os, time, random, generator, discri, classify, utils
import numpy as np
import torch.nn as nn
import torchvision.utils as tvls
import torch.nn.functional as F
from utils import log_sum_exp, save_tensor_images
from torch.autograd import Variable
import torch.optim as optim
import torch.autograd as autograd
import statistics
import torch.distributions as tdist
device = "cuda"
num_classes = 1000
save_img_dir = './res_all' # all attack imgs
os.makedirs(save_img_dir, exist_ok=True)
success_dir = './res_success'
os.makedirs(success_dir, exist_ok=True)
def reparameterize(mu, logvar):
"""
Reparameterization trick to sample from N(mu, var) from
N(0,1).
:param mu: (Tensor) Mean of the latent Gaussian [B x D]
:param logvar: (Tensor) Standard deviation of the latent Gaussian [B x D]
:return: (Tensor) [B x D]
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps * std + mu
def dist_inversion_multi_targets(G, D, T, E, iden, itr, lr=2e-2, momentum=0.9, lamda=100, iter_times=1500, clip_range=1, improved=False, num_seeds=5):
iden = iden.view(-1).long().cuda()
criterion = nn.CrossEntropyLoss().cuda()
bs = iden.shape[0]
G.eval()
D.eval()
for model_idx in range(len(T)):
T[model_idx][0].eval()
E.eval()
no = torch.zeros(bs) # index for saving all success attack images
tf = time.time()
#NOTE
mu = Variable(torch.zeros(bs, 100), requires_grad=True)
log_var = Variable(torch.ones(bs, 100), requires_grad=True)
params = [mu, log_var]
solver = optim.Adam(params, lr=lr)
# scheduler = torch.optim.lr_scheduler.StepLR(solver, 1800, gamma=0.1)
for i in range(iter_times):
z = reparameterize(mu, log_var)
fake = G(z)
if improved == True:
_, label = D(fake)
else:
label = D(fake)
#get the ouput of all targets
out=[]
for model_idx in range(len(T)):
target_model, model_weight = T[model_idx]
current_out = target_model(fake)[-1]
out.append([current_out, model_weight])
for p in params:
if p.grad is not None:
p.grad.data.zero_()
if improved:
Prior_Loss = torch.mean(F.softplus(log_sum_exp(label))) - torch.mean(log_sum_exp(label))
else:
Prior_Loss = - label.mean()
Iden_Loss = []
for t_model_out, model_weight in out:
current_model_loss = model_weight * criterion(t_model_out.float(), iden)
Iden_Loss.append(current_model_loss)
Iden_Loss = sum(Iden_Loss)
Total_Loss = Prior_Loss + lamda * Iden_Loss
Total_Loss.backward()
solver.step()
z = torch.clamp(z.detach(), -clip_range, clip_range).float()
Prior_Loss_val = Prior_Loss.item()
Iden_Loss_val = Iden_Loss.item()
if (i+1) % 1 == 0:
fake_img = G(z.detach())
eval_prob = E(utils.low2high(fake_img))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs
print("Iteration:{}\tPrior Loss:{:.2f}\tIden Loss:{:.2f}\tAttack Acc:{:.2f}".format(i+1, Prior_Loss_val, Iden_Loss_val, acc))
interval = time.time() - tf
print("Time:{:.2f}".format(interval))
res = []
res5 = []
seed_acc = torch.zeros((bs, 5))
for random_seed in range(num_seeds):
tf = time.time()
z = reparameterize(mu, log_var)
fake = G(z)
score = T(fake)[-1]
eval_prob = E(utils.low2high(fake))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
cnt, cnt5 = 0, 0
for i in range(bs):
gt = iden[i].item()
sample = fake[i]
save_tensor_images(sample.detach(), os.path.join(save_img_dir, "attack_iden_{}_{}.png".format(gt+1, random_seed)))
if eval_iden[i].item() == gt:
seed_acc[i, random_seed] = 1
cnt += 1
best_img = G(z)[i]
save_tensor_images(best_img.detach(), os.path.join(success_dir, "{}_attack_iden_{}_{}.png".format(itr, gt+1, int(no[i]))))
no[i] += 1
_, top5_idx = torch.topk(eval_prob[i], 5)
if gt in top5_idx:
cnt5 += 1
interval = time.time() - tf
print("Time:{:.2f}\tSeed:{}\tAcc:{:.2f}\t".format(interval, random_seed, cnt * 1.0 / bs))
res.append(cnt * 1.0 / bs)
res5.append(cnt5 * 1.0 / bs)
torch.cuda.empty_cache()
acc, acc_5 = statistics.mean(res), statistics.mean(res5)
acc_var = statistics.variance(res)
acc_var5 = statistics.variance(res5)
print("Acc:{:.2f}\tAcc_5:{:.2f}\tAcc_var:{:.4f}\tAcc_var5:{:.4f}".format(acc, acc_5, acc_var, acc_var5))
return acc, acc_5, acc_var, acc_var5
def inversion_multi_targets(G, D, T, E, iden, itr, lr=2e-2, momentum=0.9, lamda=100, iter_times=1500, clip_range=1, improved=False, num_seeds=5):
iden = iden.view(-1).long().cuda()
criterion = nn.CrossEntropyLoss().cuda()
bs = iden.shape[0]
G.eval()
D.eval()
for model_idx in range(len(T)):
T[model_idx][0].eval()
E.eval()
flag = torch.zeros(bs)
no = torch.zeros(bs) # index for saving all success attack images
res = []
res5 = []
seed_acc = torch.zeros((bs, 5))
for random_seed in range(num_seeds):
tf = time.time()
r_idx = random_seed
torch.manual_seed(random_seed)
torch.cuda.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
z = torch.randn(bs, 100).cuda().float()
z.requires_grad = True
v = torch.zeros(bs, 100).cuda().float()
for i in range(iter_times):
fake = G(z)
if improved == True:
_, label = D(fake)
else:
label = D(fake)
#get the ouput of all targets
out=[]
for model_idx in range(len(T)):
target_model, model_weight = T[model_idx]
current_out = target_model(fake)[-1]
out.append([current_out, model_weight])
if z.grad is not None:
z.grad.data.zero_()
if improved:
Prior_Loss = torch.mean(F.softplus(log_sum_exp(label))) - torch.mean(log_sum_exp(label))
else:
Prior_Loss = - label.mean()
Iden_Loss = []
for t_model_out, model_weight in out:
current_model_loss = model_weight * criterion(t_model_out.float(), iden)
Iden_Loss.append(current_model_loss)
Iden_Loss = sum(Iden_Loss)
Total_Loss = Prior_Loss + lamda * Iden_Loss
Total_Loss.backward()
v_prev = v.clone()
gradient = z.grad.data
v = momentum * v - lr * gradient
z = z + ( - momentum * v_prev + (1 + momentum) * v)
z = torch.clamp(z.detach(), -clip_range, clip_range).float()
z.requires_grad = True
Prior_Loss_val = Prior_Loss.item()
Iden_Loss_val = Iden_Loss.item()
if (i+1) % 300 == 0:
fake_img = G(z.detach())
eval_prob = E(utils.low2high(fake_img))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
acc = iden.eq(eval_iden.long()).sum().item() * 1.0 / bs
print("Iteration:{}\tPrior Loss:{:.2f}\tIden Loss:{:.2f}\tAttack Acc:{:.2f}".format(i+1, Prior_Loss_val, Iden_Loss_val, acc))
fake = G(z)
score = T(fake)[-1]
eval_prob = E(utils.low2high(fake))[-1]
eval_iden = torch.argmax(eval_prob, dim=1).view(-1)
cnt, cnt5 = 0, 0
for i in range(bs):
gt = iden[i].item()
sample = G(z)[i]
# save_tensor_images(sample.detach(), os.path.join(save_img_dir, "attack_iden_{}_{}.png".format(gt+1, r_idx)))
if eval_iden[i].item() == gt:
seed_acc[i, r_idx] = 1
cnt += 1
flag[i] = 1
best_img = G(z)[i]
# save_tensor_images(best_img.detach(), os.path.join(success_dir, "{}_attack_iden_{}_{}.png".format(itr, iden[0]+i+1, int(no[i]))))
no[i] += 1
_, top5_idx = torch.topk(eval_prob[i], 5)
if gt in top5_idx:
cnt5 += 1
interval = time.time() - tf
print("Time:{:.2f}\tAcc:{:.2f}\t".format(interval, cnt * 1.0 / bs))
res.append(cnt * 1.0 / bs)
res5.append(cnt5 * 1.0 / bs)
torch.cuda.empty_cache()
acc, acc_5 = statistics.mean(res), statistics.mean(res5)
acc_var = statistics.variance(res)
acc_var5 = statistics.variance(res5)
print("Acc:{:.2f}\tAcc_5:{:.2f}\tAcc_var:{:.4f}\tAcc_var5:{:.4f}".format(acc, acc_5, acc_var, acc_var5))
print("seeds variance:", seed_var)
return acc, acc_5, acc_var, acc_var5 | 0.672439 | 0.444203 |
from PYB11Generator import *
from SolidSPHHydroBase import *
from RestartMethods import *
@PYB11template() # Override the fact SolidSPHHydroBase is templated
@PYB11template_dict({"Dimension" : "Dim<2>"})
@PYB11module("SpheralSPH")
class SolidSPHHydroBaseRZ(SolidSPHHydroBase):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def pyinit(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
dataBase = "DataBase<%(Dimension)s>&",
Q = "ArtificialViscosity<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
WPi = "const TableKernel<%(Dimension)s>&",
WGrad = "const TableKernel<%(Dimension)s>&",
filter = "const double",
cfl = "const double",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
gradhCorrection = "const bool",
XSPH = "const bool",
correctVelocityGradient = "const bool",
sumMassDensityOverAllNodeLists = "const bool",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
epsTensile = "const double",
nTensile = "const double",
damageRelieveRubble = "const bool",
negativePressureInDamage = "const bool",
strengthInDamage = "const bool",
xmin = "const Vector&",
xmax = "const Vector&"):
"SolidSPHHydroBaseRZ constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
def initializeProblemStartup(dataBase = "DataBase<%(Dimension)s>&"):
"Tasks we do once on problem startup."
return "void"
@PYB11virtual
def registerState(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"Register the state Hydro expects to use and evolve."
return "void"
@PYB11virtual
def preStepInitialize(self,
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Optional hook to be called at the beginning of a time step."
return "void"
@PYB11virtual
@PYB11const
def evaluateDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro
mass density, velocity, and specific thermal energy."""
return "void"
@PYB11virtual
def applyGhostBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Apply boundary conditions to the physics specific fields."
return "void"
@PYB11virtual
def enforceBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Enforce boundary conditions for the physics specific fields."
return "void"
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(RestartMethods, SolidSPHHydroBaseRZ) | src/Pybind11Wraps/SPH/SolidSPHHydroBaseRZ.py | from PYB11Generator import *
from SolidSPHHydroBase import *
from RestartMethods import *
@PYB11template() # Override the fact SolidSPHHydroBase is templated
@PYB11template_dict({"Dimension" : "Dim<2>"})
@PYB11module("SpheralSPH")
class SolidSPHHydroBaseRZ(SolidSPHHydroBase):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def pyinit(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
dataBase = "DataBase<%(Dimension)s>&",
Q = "ArtificialViscosity<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
WPi = "const TableKernel<%(Dimension)s>&",
WGrad = "const TableKernel<%(Dimension)s>&",
filter = "const double",
cfl = "const double",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
gradhCorrection = "const bool",
XSPH = "const bool",
correctVelocityGradient = "const bool",
sumMassDensityOverAllNodeLists = "const bool",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
epsTensile = "const double",
nTensile = "const double",
damageRelieveRubble = "const bool",
negativePressureInDamage = "const bool",
strengthInDamage = "const bool",
xmin = "const Vector&",
xmax = "const Vector&"):
"SolidSPHHydroBaseRZ constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
def initializeProblemStartup(dataBase = "DataBase<%(Dimension)s>&"):
"Tasks we do once on problem startup."
return "void"
@PYB11virtual
def registerState(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"Register the state Hydro expects to use and evolve."
return "void"
@PYB11virtual
def preStepInitialize(self,
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Optional hook to be called at the beginning of a time step."
return "void"
@PYB11virtual
@PYB11const
def evaluateDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro
mass density, velocity, and specific thermal energy."""
return "void"
@PYB11virtual
def applyGhostBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Apply boundary conditions to the physics specific fields."
return "void"
@PYB11virtual
def enforceBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Enforce boundary conditions for the physics specific fields."
return "void"
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(RestartMethods, SolidSPHHydroBaseRZ) | 0.770724 | 0.376136 |
import time
import json
import os
# Application imports
from ism.core.base_action import BaseAction
class ActionIoFileOutbound(BaseAction):
"""Scan the messages table in the control DB for
outbound messages and create an outbound file if any found.
MSG Format:
CREATE TABLE messages (
message_id INTEGER NOT NULL PRIMARY KEY, -- Record ID in recipient messages table
sender TEXT NOT NULL, -- Return address of sender
sender_id INTEGER NOT NULL, -- Record ID in sender messages table
action TEXT NOT NULL, -- Name of the action that handles this message
payload TEXT, -- Json body of msg payload
sent TEXT NOT NULL, -- Timestamp msg sent by sender
received TEXT NOT NULL DEFAULT (strftime('%s', 'now')), -- Timestamp ism loaded message into database
direction TEXT NOT NULL DEFAULT 'inbound', -- In or outbound message
processed BOOLEAN NOT NULL DEFAULT '0' -- Has the message been processed
);
File Name Format:
<recipient>_<sender_id>.json
"""
def execute(self):
if self.active():
# Get the directory paths from the properties
try:
outbound = str(self.properties['comms']['file'].get('outbound'))
smp = self.properties['comms']['file']['semaphore_extension']
msg = self.properties['comms']['file']['message_extension']
except KeyError as e:
self.logger.error(f'Failed to read [comms][file] entries from properties. KeyError ({e})')
raise
# Query the messages table for outbound messages that aren't 'processed'
sql = self.dao.prepare_parameterised_statement(
f'SELECT message_id, recipient, sender, sender_id, action, payload '
f'FROM messages WHERE processed = ? AND direction = ?'
)
results = self.dao.execute_sql_query(
sql,
(
0,
'outbound'
)
)
if not results:
return
# Create the message files in the outbound directory
for record in results:
# Create a dict of the values
send_time = int(time.time())
message = {
"message_id": record[0],
"recipient": record[1],
"sender": record[2],
"sender_id": record[3],
"action": record[4],
"payload": json.dumps(record[5]),
"sent": send_time
}
# Create the file
with open(f'{outbound}{os.path.sep}{record[1]}_{record[3]}{msg}', 'w') as file:
file.write(json.dumps(message))
# Create the semaphore
with open(f'{outbound}{os.path.sep}{record[1]}_{record[3]}{smp}', 'w') as file:
file.write('')
# Mark the message as processed and update the sent field with timestamp of epoch seconds
sql = self.dao.prepare_parameterised_statement(
'UPDATE messages SET sent = ?, processed = ?'
)
self.dao.execute_sql_statement(
sql,
(
send_time,
1
)
) | ism_comms/file/actions/action_io_file_outbound.py | import time
import json
import os
# Application imports
from ism.core.base_action import BaseAction
class ActionIoFileOutbound(BaseAction):
"""Scan the messages table in the control DB for
outbound messages and create an outbound file if any found.
MSG Format:
CREATE TABLE messages (
message_id INTEGER NOT NULL PRIMARY KEY, -- Record ID in recipient messages table
sender TEXT NOT NULL, -- Return address of sender
sender_id INTEGER NOT NULL, -- Record ID in sender messages table
action TEXT NOT NULL, -- Name of the action that handles this message
payload TEXT, -- Json body of msg payload
sent TEXT NOT NULL, -- Timestamp msg sent by sender
received TEXT NOT NULL DEFAULT (strftime('%s', 'now')), -- Timestamp ism loaded message into database
direction TEXT NOT NULL DEFAULT 'inbound', -- In or outbound message
processed BOOLEAN NOT NULL DEFAULT '0' -- Has the message been processed
);
File Name Format:
<recipient>_<sender_id>.json
"""
def execute(self):
if self.active():
# Get the directory paths from the properties
try:
outbound = str(self.properties['comms']['file'].get('outbound'))
smp = self.properties['comms']['file']['semaphore_extension']
msg = self.properties['comms']['file']['message_extension']
except KeyError as e:
self.logger.error(f'Failed to read [comms][file] entries from properties. KeyError ({e})')
raise
# Query the messages table for outbound messages that aren't 'processed'
sql = self.dao.prepare_parameterised_statement(
f'SELECT message_id, recipient, sender, sender_id, action, payload '
f'FROM messages WHERE processed = ? AND direction = ?'
)
results = self.dao.execute_sql_query(
sql,
(
0,
'outbound'
)
)
if not results:
return
# Create the message files in the outbound directory
for record in results:
# Create a dict of the values
send_time = int(time.time())
message = {
"message_id": record[0],
"recipient": record[1],
"sender": record[2],
"sender_id": record[3],
"action": record[4],
"payload": json.dumps(record[5]),
"sent": send_time
}
# Create the file
with open(f'{outbound}{os.path.sep}{record[1]}_{record[3]}{msg}', 'w') as file:
file.write(json.dumps(message))
# Create the semaphore
with open(f'{outbound}{os.path.sep}{record[1]}_{record[3]}{smp}', 'w') as file:
file.write('')
# Mark the message as processed and update the sent field with timestamp of epoch seconds
sql = self.dao.prepare_parameterised_statement(
'UPDATE messages SET sent = ?, processed = ?'
)
self.dao.execute_sql_statement(
sql,
(
send_time,
1
)
) | 0.448426 | 0.09709 |
#### Libraries
# Standard library
import cPickle
import gzip
# Third-party libraries
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.nnet import softmax
from theano.tensor import shared_randomstreams
from theano.tensor.signal import downsample
# Activation functions for neurons
def linear(z): return z
def ReLU(z): return T.maximum(0.0, z)
from theano.tensor.nnet import sigmoid
from theano.tensor import tanh
#### Constants
GPU = True
if GPU:
print "Trying to run under a GPU. If this is not desired, then modify " + \
"network3.py\nto set the GPU flag to False."
try:
theano.config.device = 'gpu'
except:
pass # it's already set
theano.config.floatX = 'float32'
else:
print "Running with a CPU. If this is not desired, then the modify " + \
"network3.py to set\nthe GPU flag to True."
#### Load the MNIST data
def load_data_shared(filename="../../data/mnist.pkl.gz"):
f = gzip.open(filename, 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
def shared(data):
"""Place the data into shared variables. This allows Theano to copy
the data to the GPU, if one is available.
"""
shared_x = theano.shared(
np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
shared_y = theano.shared(
np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
return shared_x, T.cast(shared_y, "int32")
return [shared(training_data), shared(validation_data), shared(test_data)]
#### Main class used to construct and train networks
class Network(object):
def __init__(self, layers, mini_batch_size):
"""Takes a list of `layers`, describing the network architecture, and
a value for the `mini_batch_size` to be used during training
by stochastic gradient descent.
exam:
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), # layer 列表:例如 conv -> FullyConnect -> SoftMax
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2)),
FullyConnectedLayer(n_in=20*12*12, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)],
mini_batch_size)
"""
self.layers = layers
self.mini_batch_size = mini_batch_size
self.params = [param for layer in self.layers for param in layer.params]
self.x = T.matrix("x")
self.y = T.ivector("y")
init_layer = self.layers[0]
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j - 1], self.layers[j]
layer.set_inpt(
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
self.output = self.layers[-1].output
self.output_dropout = self.layers[-1].output_dropout
def SGD(self, training_data, epochs, mini_batch_size, eta,
validation_data, test_data, lmbda=0.0):
"""Train the network using mini-batch stochastic gradient descent."""
training_x, training_y = training_data
validation_x, validation_y = validation_data
test_x, test_y = test_data
# compute number of minibatches for training, validation and testing
num_training_batches = size(training_data) / mini_batch_size
num_validation_batches = size(validation_data) / mini_batch_size
num_test_batches = size(test_data) / mini_batch_size
# define the (regularized) cost function, symbolic gradients, and updates
l2_norm_squared = sum([(layer.w ** 2).sum() for layer in self.layers])
cost = self.layers[-1].cost(self) + \
0.5 * lmbda * l2_norm_squared / num_training_batches
grads = T.grad(cost, self.params)
updates = [(param, param - eta * grad)
for param, grad in zip(self.params, grads)]
# define functions to train a mini-batch, and to compute the
# accuracy in validation and test mini-batches.
i = T.lscalar() # mini-batch index
train_mb = theano.function(
[i], cost, updates=updates,
givens={
self.x:
training_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
training_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
validate_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
validation_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
validation_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
test_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
test_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
test_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
self.test_mb_predictions = theano.function(
[i], self.layers[-1].y_out,
givens={
self.x:
test_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
# Do the actual training
best_validation_accuracy = 0.0
for epoch in xrange(epochs):
for minibatch_index in xrange(num_training_batches):
iteration = num_training_batches * epoch + minibatch_index
if iteration % 1000 == 0:
print("Training mini-batch number {0}".format(iteration))
cost_ij = train_mb(minibatch_index)
if (iteration + 1) % num_training_batches == 0:
validation_accuracy = np.mean(
[validate_mb_accuracy(j) for j in xrange(num_validation_batches)])
print("Epoch {0}: validation accuracy {1:.2%}".format(
epoch, validation_accuracy))
if validation_accuracy >= best_validation_accuracy:
print("This is the best validation accuracy to date.")
best_validation_accuracy = validation_accuracy
best_iteration = iteration
if test_data:
test_accuracy = np.mean(
[test_mb_accuracy(j) for j in xrange(num_test_batches)])
print('The corresponding test accuracy is {0:.2%}'.format(
test_accuracy))
print("Finished training network.")
print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(
best_validation_accuracy, best_iteration))
print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))
#### Define layer types
class ConvPoolLayer(object):
"""Used to create a combination of a convolutional and a max-pooling
layer. A more sophisticated implementation would separate the
two, but for our purposes we'll always use them together, and it
simplifies the code, so it makes sense to combine them.
"""
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
activation_fn=sigmoid):
"""`filter_shape` is a tuple of length 4, whose entries are the number
of filters, the number of input feature maps, the filter height, and the
filter width.
`image_shape` is a tuple of length 4, whose entries are the
mini-batch size, the number of input feature maps, the image
height, and the image width.
`poolsize` is a tuple of length 2, whose entries are the y and
x pooling sizes.
"""
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.activation_fn = activation_fn
# initialize weights and biases
n_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
self.w = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=np.sqrt(1.0 / n_out), size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
dtype=theano.config.floatX),
borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape(self.image_shape)
conv_out = conv.conv2d(
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
image_shape=self.image_shape)
pooled_out = downsample.max_pool_2d(
input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = self.activation_fn(
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output # no dropout in the convolutional layers
class FullyConnectedLayer(object):
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
self.n_in = n_in
self.n_out = n_out
self.activation_fn = activation_fn
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.asarray(
np.random.normal(
loc=0.0, scale=np.sqrt(1.0 / n_out), size=(n_in, n_out)),
dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
dtype=theano.config.floatX),
name='b', borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = self.activation_fn(
(1 - self.p_dropout) * T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = self.activation_fn(
T.dot(self.inpt_dropout, self.w) + self.b)
def accuracy(self, y):
"Return the accuracy for the mini-batch."
return T.mean(T.eq(y, self.y_out))
class SoftmaxLayer(object):
def __init__(self, n_in, n_out, p_dropout=0.0):
self.n_in = n_in
self.n_out = n_out
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.zeros((n_in, n_out), dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.zeros((n_out,), dtype=theano.config.floatX),
name='b', borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1 - self.p_dropout) * T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def cost(self, net):
"Return the log-likelihood cost."
return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])
def accuracy(self, y):
"Return the accuracy for the mini-batch."
return T.mean(T.eq(y, self.y_out))
#### Miscellanea
def size(data):
"Return the size of the dataset `data`."
return data[0].get_value(borrow=True).shape[0]
def dropout_layer(layer, p_dropout):
srng = shared_randomstreams.RandomStreams(
np.random.RandomState(0).randint(999999))
mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
return layer * T.cast(mask, theano.config.floatX) | src/network3.py | #### Libraries
# Standard library
import cPickle
import gzip
# Third-party libraries
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv
from theano.tensor.nnet import softmax
from theano.tensor import shared_randomstreams
from theano.tensor.signal import downsample
# Activation functions for neurons
def linear(z): return z
def ReLU(z): return T.maximum(0.0, z)
from theano.tensor.nnet import sigmoid
from theano.tensor import tanh
#### Constants
GPU = True
if GPU:
print "Trying to run under a GPU. If this is not desired, then modify " + \
"network3.py\nto set the GPU flag to False."
try:
theano.config.device = 'gpu'
except:
pass # it's already set
theano.config.floatX = 'float32'
else:
print "Running with a CPU. If this is not desired, then the modify " + \
"network3.py to set\nthe GPU flag to True."
#### Load the MNIST data
def load_data_shared(filename="../../data/mnist.pkl.gz"):
f = gzip.open(filename, 'rb')
training_data, validation_data, test_data = cPickle.load(f)
f.close()
def shared(data):
"""Place the data into shared variables. This allows Theano to copy
the data to the GPU, if one is available.
"""
shared_x = theano.shared(
np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
shared_y = theano.shared(
np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
return shared_x, T.cast(shared_y, "int32")
return [shared(training_data), shared(validation_data), shared(test_data)]
#### Main class used to construct and train networks
class Network(object):
def __init__(self, layers, mini_batch_size):
"""Takes a list of `layers`, describing the network architecture, and
a value for the `mini_batch_size` to be used during training
by stochastic gradient descent.
exam:
net = Network([
ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28), # layer 列表:例如 conv -> FullyConnect -> SoftMax
filter_shape=(20, 1, 5, 5),
poolsize=(2, 2)),
FullyConnectedLayer(n_in=20*12*12, n_out=100),
SoftmaxLayer(n_in=100, n_out=10)],
mini_batch_size)
"""
self.layers = layers
self.mini_batch_size = mini_batch_size
self.params = [param for layer in self.layers for param in layer.params]
self.x = T.matrix("x")
self.y = T.ivector("y")
init_layer = self.layers[0]
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j - 1], self.layers[j]
layer.set_inpt(
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
self.output = self.layers[-1].output
self.output_dropout = self.layers[-1].output_dropout
def SGD(self, training_data, epochs, mini_batch_size, eta,
validation_data, test_data, lmbda=0.0):
"""Train the network using mini-batch stochastic gradient descent."""
training_x, training_y = training_data
validation_x, validation_y = validation_data
test_x, test_y = test_data
# compute number of minibatches for training, validation and testing
num_training_batches = size(training_data) / mini_batch_size
num_validation_batches = size(validation_data) / mini_batch_size
num_test_batches = size(test_data) / mini_batch_size
# define the (regularized) cost function, symbolic gradients, and updates
l2_norm_squared = sum([(layer.w ** 2).sum() for layer in self.layers])
cost = self.layers[-1].cost(self) + \
0.5 * lmbda * l2_norm_squared / num_training_batches
grads = T.grad(cost, self.params)
updates = [(param, param - eta * grad)
for param, grad in zip(self.params, grads)]
# define functions to train a mini-batch, and to compute the
# accuracy in validation and test mini-batches.
i = T.lscalar() # mini-batch index
train_mb = theano.function(
[i], cost, updates=updates,
givens={
self.x:
training_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
training_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
validate_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
validation_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
validation_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
test_mb_accuracy = theano.function(
[i], self.layers[-1].accuracy(self.y),
givens={
self.x:
test_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size],
self.y:
test_y[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
self.test_mb_predictions = theano.function(
[i], self.layers[-1].y_out,
givens={
self.x:
test_x[i * self.mini_batch_size: (i + 1) * self.mini_batch_size]
})
# Do the actual training
best_validation_accuracy = 0.0
for epoch in xrange(epochs):
for minibatch_index in xrange(num_training_batches):
iteration = num_training_batches * epoch + minibatch_index
if iteration % 1000 == 0:
print("Training mini-batch number {0}".format(iteration))
cost_ij = train_mb(minibatch_index)
if (iteration + 1) % num_training_batches == 0:
validation_accuracy = np.mean(
[validate_mb_accuracy(j) for j in xrange(num_validation_batches)])
print("Epoch {0}: validation accuracy {1:.2%}".format(
epoch, validation_accuracy))
if validation_accuracy >= best_validation_accuracy:
print("This is the best validation accuracy to date.")
best_validation_accuracy = validation_accuracy
best_iteration = iteration
if test_data:
test_accuracy = np.mean(
[test_mb_accuracy(j) for j in xrange(num_test_batches)])
print('The corresponding test accuracy is {0:.2%}'.format(
test_accuracy))
print("Finished training network.")
print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(
best_validation_accuracy, best_iteration))
print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))
#### Define layer types
class ConvPoolLayer(object):
"""Used to create a combination of a convolutional and a max-pooling
layer. A more sophisticated implementation would separate the
two, but for our purposes we'll always use them together, and it
simplifies the code, so it makes sense to combine them.
"""
def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
activation_fn=sigmoid):
"""`filter_shape` is a tuple of length 4, whose entries are the number
of filters, the number of input feature maps, the filter height, and the
filter width.
`image_shape` is a tuple of length 4, whose entries are the
mini-batch size, the number of input feature maps, the image
height, and the image width.
`poolsize` is a tuple of length 2, whose entries are the y and
x pooling sizes.
"""
self.filter_shape = filter_shape
self.image_shape = image_shape
self.poolsize = poolsize
self.activation_fn = activation_fn
# initialize weights and biases
n_out = (filter_shape[0] * np.prod(filter_shape[2:]) / np.prod(poolsize))
self.w = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=np.sqrt(1.0 / n_out), size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(
np.asarray(
np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
dtype=theano.config.floatX),
borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape(self.image_shape)
conv_out = conv.conv2d(
input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
image_shape=self.image_shape)
pooled_out = downsample.max_pool_2d(
input=conv_out, ds=self.poolsize, ignore_border=True)
self.output = self.activation_fn(
pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output # no dropout in the convolutional layers
class FullyConnectedLayer(object):
def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
self.n_in = n_in
self.n_out = n_out
self.activation_fn = activation_fn
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.asarray(
np.random.normal(
loc=0.0, scale=np.sqrt(1.0 / n_out), size=(n_in, n_out)),
dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
dtype=theano.config.floatX),
name='b', borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = self.activation_fn(
(1 - self.p_dropout) * T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = self.activation_fn(
T.dot(self.inpt_dropout, self.w) + self.b)
def accuracy(self, y):
"Return the accuracy for the mini-batch."
return T.mean(T.eq(y, self.y_out))
class SoftmaxLayer(object):
def __init__(self, n_in, n_out, p_dropout=0.0):
self.n_in = n_in
self.n_out = n_out
self.p_dropout = p_dropout
# Initialize weights and biases
self.w = theano.shared(
np.zeros((n_in, n_out), dtype=theano.config.floatX),
name='w', borrow=True)
self.b = theano.shared(
np.zeros((n_out,), dtype=theano.config.floatX),
name='b', borrow=True)
self.params = [self.w, self.b]
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1 - self.p_dropout) * T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def cost(self, net):
"Return the log-likelihood cost."
return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])
def accuracy(self, y):
"Return the accuracy for the mini-batch."
return T.mean(T.eq(y, self.y_out))
#### Miscellanea
def size(data):
"Return the size of the dataset `data`."
return data[0].get_value(borrow=True).shape[0]
def dropout_layer(layer, p_dropout):
srng = shared_randomstreams.RandomStreams(
np.random.RandomState(0).randint(999999))
mask = srng.binomial(n=1, p=1 - p_dropout, size=layer.shape)
return layer * T.cast(mask, theano.config.floatX) | 0.769427 | 0.444384 |
import unittest
import units.area.square_miles
class TestSquareMilesMethods(unittest.TestCase):
def test_convert_known_square_miles_to_square_kilometres(self):
self.assertAlmostEqual(7.76996, units.area.square_miles.to_square_kilometres(3.0), places=1)
self.assertAlmostEqual(2.33099, units.area.square_miles.to_square_kilometres(0.9), places=1)
self.assertAlmostEqual(258.999, units.area.square_miles.to_square_kilometres(100.0), places=1)
def test_convert_known_square_miles_to_square_metres(self):
self.assertAlmostEqual(23309.893, units.area.square_miles.to_square_metres(0.009), places=1)
self.assertAlmostEqual(25899.88, units.area.square_miles.to_square_metres(0.010), places=1)
self.assertAlmostEqual(1553.99287, units.area.square_miles.to_square_metres(0.0006), places=1)
def test_convert_known_square_miles_to_square_yards(self):
self.assertAlmostEqual(30976.0, units.area.square_miles.to_square_yards(0.01), places=1)
self.assertAlmostEqual(3810.048, units.area.square_miles.to_square_yards(0.00123), places=1)
self.assertAlmostEqual(278784.0, units.area.square_miles.to_square_yards(0.09), places=1)
def test_convert_known_square_miles_to_square_feet(self):
self.assertAlmostEqual(278784.0, units.area.square_miles.to_square_feet(0.01), places=1)
self.assertAlmostEqual(139392.0, units.area.square_miles.to_square_feet(0.005), places=1)
self.assertAlmostEqual(2230272.0, units.area.square_miles.to_square_feet(0.08), places=1)
def test_convert_known_square_miles_to_square_inches(self):
self.assertAlmostEqual(401448.96, units.area.square_miles.to_square_inches(0.0001), places=1)
self.assertAlmostEqual(3934199.808, units.area.square_miles.to_square_inches(0.00098), places=1)
self.assertAlmostEqual(28101.4272, units.area.square_miles.to_square_inches(0.000007), places=1)
def test_convert_known_square_miles_to_hectares(self):
self.assertAlmostEqual(25899.881103, units.area.square_miles.to_hectares(100.0), places=1)
self.assertAlmostEqual(595.697, units.area.square_miles.to_hectares(2.3), places=1)
self.assertAlmostEqual(233.099, units.area.square_miles.to_hectares(0.9), places=1)
def test_convert_known_square_miles_to_acres(self):
self.assertAlmostEqual(1280.0, units.area.square_miles.to_acres(2.0), places=1)
self.assertAlmostEqual(64.0, units.area.square_miles.to_acres(0.1), places=1)
self.assertAlmostEqual(2944.0, units.area.square_miles.to_acres(4.6), places=1)
if __name__ == '__main__':
unittest.main() | tests/test_area_square_miles.py |
import unittest
import units.area.square_miles
class TestSquareMilesMethods(unittest.TestCase):
def test_convert_known_square_miles_to_square_kilometres(self):
self.assertAlmostEqual(7.76996, units.area.square_miles.to_square_kilometres(3.0), places=1)
self.assertAlmostEqual(2.33099, units.area.square_miles.to_square_kilometres(0.9), places=1)
self.assertAlmostEqual(258.999, units.area.square_miles.to_square_kilometres(100.0), places=1)
def test_convert_known_square_miles_to_square_metres(self):
self.assertAlmostEqual(23309.893, units.area.square_miles.to_square_metres(0.009), places=1)
self.assertAlmostEqual(25899.88, units.area.square_miles.to_square_metres(0.010), places=1)
self.assertAlmostEqual(1553.99287, units.area.square_miles.to_square_metres(0.0006), places=1)
def test_convert_known_square_miles_to_square_yards(self):
self.assertAlmostEqual(30976.0, units.area.square_miles.to_square_yards(0.01), places=1)
self.assertAlmostEqual(3810.048, units.area.square_miles.to_square_yards(0.00123), places=1)
self.assertAlmostEqual(278784.0, units.area.square_miles.to_square_yards(0.09), places=1)
def test_convert_known_square_miles_to_square_feet(self):
self.assertAlmostEqual(278784.0, units.area.square_miles.to_square_feet(0.01), places=1)
self.assertAlmostEqual(139392.0, units.area.square_miles.to_square_feet(0.005), places=1)
self.assertAlmostEqual(2230272.0, units.area.square_miles.to_square_feet(0.08), places=1)
def test_convert_known_square_miles_to_square_inches(self):
self.assertAlmostEqual(401448.96, units.area.square_miles.to_square_inches(0.0001), places=1)
self.assertAlmostEqual(3934199.808, units.area.square_miles.to_square_inches(0.00098), places=1)
self.assertAlmostEqual(28101.4272, units.area.square_miles.to_square_inches(0.000007), places=1)
def test_convert_known_square_miles_to_hectares(self):
self.assertAlmostEqual(25899.881103, units.area.square_miles.to_hectares(100.0), places=1)
self.assertAlmostEqual(595.697, units.area.square_miles.to_hectares(2.3), places=1)
self.assertAlmostEqual(233.099, units.area.square_miles.to_hectares(0.9), places=1)
def test_convert_known_square_miles_to_acres(self):
self.assertAlmostEqual(1280.0, units.area.square_miles.to_acres(2.0), places=1)
self.assertAlmostEqual(64.0, units.area.square_miles.to_acres(0.1), places=1)
self.assertAlmostEqual(2944.0, units.area.square_miles.to_acres(4.6), places=1)
if __name__ == '__main__':
unittest.main() | 0.712332 | 0.835215 |
from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
import torch
from torch.utils.data import DataLoader
from monai.engines.utils import CommonKeys as Keys
from monai.engines.utils import IterationEvents, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.networks.utils import eval_mode
from monai.transforms import Transform
from monai.utils import ensure_tuple, exact_version, optional_import
if TYPE_CHECKING:
from ignite.engine import Engine
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric")
__all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"]
class Evaluator(Workflow):
"""
Base class for all kinds of evaluators, inherits from Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
max_epochs=1,
data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_metric=key_val_metric,
additional_metrics=additional_metrics,
handlers=val_handlers,
amp=amp,
)
def run(self, global_epoch: int = 1) -> None:
"""
Execute validation/evaluation based on Ignite Engine.
Args:
global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.
"""
# init env value for current validation process
self.state.max_epochs = global_epoch
self.state.epoch = global_epoch - 1
self.state.iteration = 0
super().run()
def get_validation_stats(self) -> Dict[str, float]:
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
class SupervisedEvaluator(Evaluator):
"""
Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
network: use the network to run model forward.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
network: torch.nn.Module,
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
val_handlers=val_handlers,
amp=amp,
)
self.network = network
self.inferer = SimpleInferer() if inferer is None else inferer
def _register_additional_events(self):
super()._register_additional_events()
self.register_events(*IterationEvents)
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- PRED: prediction result of model.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
# execute forward computation
with eval_mode(self.network):
if self.amp:
with torch.cuda.amp.autocast():
output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
else:
output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
return output
class EnsembleEvaluator(Evaluator):
"""
Ensemble evaluation for multiple models, inherits from evaluator and Workflow.
It accepts a list of models for inference and outputs a list of predictions for further operations.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
networks: use the networks to run model forward in order.
pred_keys: the keys to store every prediction data.
the length must exactly match the number of networks.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
networks: Sequence[torch.nn.Module],
pred_keys: Sequence[str],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
val_handlers=val_handlers,
amp=amp,
)
self.networks = ensure_tuple(networks)
self.pred_keys = ensure_tuple(pred_keys)
self.inferer = SimpleInferer() if inferer is None else inferer
def _register_additional_events(self):
super()._register_additional_events()
self.register_events(*IterationEvents)
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- pred_keys[0]: prediction result of network 0.
- pred_keys[1]: prediction result of network 1.
- ... ...
- pred_keys[N]: prediction result of network N.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
for idx, network in enumerate(self.networks):
with eval_mode(network):
if self.amp:
with torch.cuda.amp.autocast():
output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
else:
output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
return output | monai/engines/evaluator.py |
from typing import TYPE_CHECKING, Callable, Dict, Iterable, Optional, Sequence, Tuple, Union
import torch
from torch.utils.data import DataLoader
from monai.engines.utils import CommonKeys as Keys
from monai.engines.utils import IterationEvents, default_prepare_batch
from monai.engines.workflow import Workflow
from monai.inferers import Inferer, SimpleInferer
from monai.networks.utils import eval_mode
from monai.transforms import Transform
from monai.utils import ensure_tuple, exact_version, optional_import
if TYPE_CHECKING:
from ignite.engine import Engine
from ignite.metrics import Metric
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
Metric, _ = optional_import("ignite.metrics", "0.4.4", exact_version, "Metric")
__all__ = ["Evaluator", "SupervisedEvaluator", "EnsembleEvaluator"]
class Evaluator(Workflow):
"""
Base class for all kinds of evaluators, inherits from Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
max_epochs=1,
data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_metric=key_val_metric,
additional_metrics=additional_metrics,
handlers=val_handlers,
amp=amp,
)
def run(self, global_epoch: int = 1) -> None:
"""
Execute validation/evaluation based on Ignite Engine.
Args:
global_epoch: the overall epoch if during a training. evaluator engine can get it from trainer.
"""
# init env value for current validation process
self.state.max_epochs = global_epoch
self.state.epoch = global_epoch - 1
self.state.iteration = 0
super().run()
def get_validation_stats(self) -> Dict[str, float]:
return {"best_validation_metric": self.state.best_metric, "best_validation_epoch": self.state.best_metric_epoch}
class SupervisedEvaluator(Evaluator):
"""
Standard supervised evaluation method with image and label(optional), inherits from evaluator and Workflow.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
network: use the network to run model forward.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
network: torch.nn.Module,
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
val_handlers=val_handlers,
amp=amp,
)
self.network = network
self.inferer = SimpleInferer() if inferer is None else inferer
def _register_additional_events(self):
super()._register_additional_events()
self.register_events(*IterationEvents)
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- PRED: prediction result of model.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
# execute forward computation
with eval_mode(self.network):
if self.amp:
with torch.cuda.amp.autocast():
output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
else:
output[Keys.PRED] = self.inferer(inputs, self.network, *args, **kwargs)
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
return output
class EnsembleEvaluator(Evaluator):
"""
Ensemble evaluation for multiple models, inherits from evaluator and Workflow.
It accepts a list of models for inference and outputs a list of predictions for further operations.
Args:
device: an object representing the device on which to run.
val_data_loader: Ignite engine use data_loader to run, must be Iterable, typically be torch.DataLoader.
epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`.
networks: use the networks to run model forward in order.
pred_keys: the keys to store every prediction data.
the length must exactly match the number of networks.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function to parse image and label for current iteration.
iteration_update: the callable function for every iteration, expect to accept `engine`
and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.
inferer: inference method that execute model forward on input data, like: SlidingWindow, etc.
post_transform: execute additional transformation for the model output data.
Typically, several Tensor based transforms composed by `Compose`.
key_val_metric: compute metric when every iteration completed, and save average value to
engine.state.metrics when epoch completed. key_val_metric is the main metric to compare and save the
checkpoint into files.
additional_metrics: more Ignite metrics that also attach to Ignite Engine.
val_handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:
CheckpointHandler, StatsHandler, SegmentationSaver, etc.
amp: whether to enable auto-mixed-precision evaluation, default is False.
"""
def __init__(
self,
device: torch.device,
val_data_loader: Union[Iterable, DataLoader],
networks: Sequence[torch.nn.Module],
pred_keys: Sequence[str],
epoch_length: Optional[int] = None,
non_blocking: bool = False,
prepare_batch: Callable = default_prepare_batch,
iteration_update: Optional[Callable] = None,
inferer: Optional[Inferer] = None,
post_transform: Optional[Transform] = None,
key_val_metric: Optional[Dict[str, Metric]] = None,
additional_metrics: Optional[Dict[str, Metric]] = None,
val_handlers: Optional[Sequence] = None,
amp: bool = False,
) -> None:
super().__init__(
device=device,
val_data_loader=val_data_loader,
epoch_length=epoch_length,
non_blocking=non_blocking,
prepare_batch=prepare_batch,
iteration_update=iteration_update,
post_transform=post_transform,
key_val_metric=key_val_metric,
additional_metrics=additional_metrics,
val_handlers=val_handlers,
amp=amp,
)
self.networks = ensure_tuple(networks)
self.pred_keys = ensure_tuple(pred_keys)
self.inferer = SimpleInferer() if inferer is None else inferer
def _register_additional_events(self):
super()._register_additional_events()
self.register_events(*IterationEvents)
def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
callback function for the Supervised Evaluation processing logic of 1 iteration in Ignite Engine.
Return below items in a dictionary:
- IMAGE: image Tensor data for model input, already moved to device.
- LABEL: label Tensor data corresponding to the image, already moved to device.
- pred_keys[0]: prediction result of network 0.
- pred_keys[1]: prediction result of network 1.
- ... ...
- pred_keys[N]: prediction result of network N.
Args:
engine: Ignite Engine, it can be a trainer, validator or evaluator.
batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.
Raises:
ValueError: When ``batchdata`` is None.
"""
if batchdata is None:
raise ValueError("Must provide batch data for current iteration.")
batch = self.prepare_batch(batchdata, engine.state.device, engine.non_blocking)
if len(batch) == 2:
inputs, targets = batch
args: Tuple = ()
kwargs: Dict = {}
else:
inputs, targets, args, kwargs = batch
# put iteration outputs into engine.state
engine.state.output = output = {Keys.IMAGE: inputs, Keys.LABEL: targets}
for idx, network in enumerate(self.networks):
with eval_mode(network):
if self.amp:
with torch.cuda.amp.autocast():
output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
else:
output.update({self.pred_keys[idx]: self.inferer(inputs, network, *args, **kwargs)})
engine.fire_event(IterationEvents.FORWARD_COMPLETED)
return output | 0.972663 | 0.44083 |
import sys
import subprocess
import datetime
import os
import time
import numpy as np
sys.path.insert(0, 'imports')
from imports import ffmpeg_installer
from imports import gce_utils
CODEC_TO_USE = 'libx264'
PARAMETERS_BUCKET = 'livepeer-qoe-renditions-params'
SOURCES_BUCKET = 'livepeer-qoe-sources'
RENDITIONS_BUCKET = 'livepeer-crf-renditions'
ENTITY_NAME = 'features_input_QoE'
def download_video_from_url(video_url, duration, local_file, extension):
"""
Downloads a video from a given url to an HLS manifest
"""
local_folder = os.path.dirname(local_file)
if not os.path.exists(local_folder):
os.makedirs(local_folder)
print('Downloading {} to {}'.format(video_url, local_file))
seek_time = str(datetime.timedelta(seconds=int(duration)/2))
end_time = str(datetime.timedelta(seconds=(int(duration)/2)+10))
print(seek_time)
ffmpeg_command = ['ffmpeg -y -i {} -ss {} -to {}'.format(video_url, seek_time, end_time),
'-vcodec copy',
'-acodec copy',
'-f {} {}'.format(extension, local_file)]
ffmpeg = subprocess.Popen(' '.join(ffmpeg_command),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
out, err = ffmpeg.communicate()
print(' '.join(ffmpeg_command), out, err)
if not os.path.exists(local_file):
print('Unable to download {}'.format(local_file))
return False
return True
def compute_metrics(asset, renditions):
'''
Function that instantiates the VideoAssetProcessor class with a list
of metrics to be computed.
The feature_list argument is left void as every descriptor of each
temporal metric is potentially used for model training
'''
from imports.video_asset_processor import VideoAssetProcessor
start_time = time.time()
source_asset = asset
max_samples = 30
renditions_list = renditions
metrics_list = ['temporal_ssim',
'temporal_psnr',
'temporal_dct',
'temporal_gaussian_mse',
'temporal_gaussian_difference',
'temporal_threshold_gaussian_difference',
]
print('Computing asset: {}, max samples used: {}'.format(asset, max_samples))
asset_processor = VideoAssetProcessor(source_asset,
renditions_list,
metrics_list,
False,
max_samples,
features_list=None)
metrics_df, _, _ = asset_processor.process()
for _, row in metrics_df.iterrows():
line = row.to_dict()
for column in metrics_df.columns:
if 'series' in column:
line[column] = np.array2string(np.around(line[column], decimals=5))
gce_utils.add_asset_input('{}/{}'.format(row['title'], row['attack']), line, ENTITY_NAME)
elapsed_time = time.time() - start_time
print('Computation time:', elapsed_time)
def dataset_generator_qoe_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object, containing the name
of the source asset
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'name' in request_json:
source_name = request_json['name']
elif request_args and 'name' in request_args:
source_name = request_args['name']
# Create the folder for the source asset
source_folder = '/tmp/{}'.format(os.path.dirname(source_name))
if not os.path.exists(source_folder):
os.makedirs(source_folder)
# Get the file that has been uploaded to GCS
asset_path = {'path': '{}{}'.format(source_folder,
source_name.replace(os.path.dirname(source_name), ''))}
renditions_paths = []
# Check if the source is not already in the path
if not os.path.exists(asset_path['path']):
gce_utils.download_to_local(SOURCES_BUCKET, asset_path['path'], source_name)
#Bring the attacks to be processed locally
resolutions = [1080, 720, 480, 384, 288, 144]
crfs = [45, 40, 32, 25, 21, 18, 14]
# Create a comprehension list with all the possible attacks
rendition_list = ['{}_{}'.format(resolution, crf)
for resolution in resolutions
for crf in crfs
]
for rendition in rendition_list:
remote_file = '{}/{}'.format(rendition, source_name)
rendition_folder = '/tmp/{}'.format(rendition)
local_path = '{}/{}'.format(rendition_folder, source_name)
try:
gce_utils.download_to_local(RENDITIONS_BUCKET,
local_path,
remote_file)
renditions_paths.append({'path': local_path})
except Exception as err:
print('Unable to download {}/{}: {}'.format(rendition, source_name, err))
if len(renditions_paths) > 0:
print('Processing the following renditions: {}'.format(renditions_paths))
compute_metrics(asset_path, renditions_paths)
else:
print('Empty renditions list. No renditions to process')
# Cleanup
if os.path.exists(asset_path['path']):
os.remove(asset_path['path'])
for rendition in rendition_list:
rendition_folder = '/tmp/{}'.format(rendition)
local_path = '{}/{}'.format(rendition_folder, source_name)
if os.path.exists(local_path):
os.remove(local_path)
return 'Process completed: {}'.format(asset_path['path'])
def trigger_renditions_bucket_event(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
This function retrieves a source video and triggers
the generation of renditions by means of an http asynchronous
call to the create_renditions_http function
Args:
data (dict): The Cloud Functions event payload.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None, the renditions cloud function are triggered asynchronously
"""
name = data['name']
# Create the folder for the renditions
params_folder = '/tmp/{}'.format(os.path.dirname(name))
if not os.path.exists(params_folder):
os.makedirs(params_folder)
resolutions = [1080, 720, 480, 384, 288, 144]
crfs = [45, 40, 32, 25, 21, 18, 14]
for resolution in resolutions:
for crf in crfs:
local_file = '{}/{}-{}-{}.json'.format(params_folder.replace(os.path.dirname(name), ''),
name,
resolution,
crf)
remote_file = '{}/{}-{}.json'.format(name,
resolution,
crf)
file_output = open(local_file, "w")
file_output.close()
gce_utils.upload_blob(PARAMETERS_BUCKET, local_file, remote_file)
return 'Renditions triggered for {}'.format(name)
def create_renditions_bucket_event(data, context):
"""
HTTP Cloud Function to generate video assets. Triggered by files
deposited in PARAMETERS_BUCKET
Args:
data: The triggering object, containing name, resolution and quantization parameter
Returns:
The status message if successful
"""
source_name = os.path.dirname(data['name'])
params_name = data['name'].replace(source_name, '')
resolution = params_name.split('-')[0][1:]
crf_value = params_name.split('-')[1].replace('.json', '')
print('Processing source: {} at resolution {}'.format(source_name, resolution))
# Locate the ffmpeg binary
ffmpeg_installer.install()
# Create the folder for the source asset
source_folder = '/tmp/source'
# Create the folder for the renditions
renditions_folder = '/tmp/renditions'
if not os.path.exists(renditions_folder):
os.makedirs(renditions_folder)
# Get the file that has been uploaded to GCS
asset_path = {'path': '{}/{}'.format(source_folder, source_name)}
# Check if the source is not already in the path
if not os.path.exists(asset_path['path']):
print('Retrieving video from {}'.format(asset_path['path']))
gce_utils.download_to_local(SOURCES_BUCKET, asset_path['path'], source_name)
print('Processing resolution', resolution)
# Create folder for each rendition
bucket_path = '{}_{}/{}'.format(resolution, crf_value, source_name)
print('Bucket path:', bucket_path)
if not gce_utils.check_blob(RENDITIONS_BUCKET, bucket_path):
crf_path = '{}/{}_{}/{}'.format(renditions_folder,
resolution,
crf_value,
os.path.dirname(source_name))
if not os.path.exists(crf_path):
print('Creating rendition folder:', crf_path)
os.makedirs(crf_path)
# Generate renditions with ffmpeg
renditions_worker(asset_path['path'],
source_folder,
CODEC_TO_USE,
resolution,
crf_value,
renditions_folder)
#compute_metrics(asset_path, renditions_paths)
# Upload renditions to GCE storage bucket
local_path = '{}/{}_{}/{}'.format(renditions_folder, resolution, crf_value, source_name)
bucket_path = '{}_{}/{}'.format(resolution, crf_value, source_name)
gce_utils.upload_blob(RENDITIONS_BUCKET, local_path, bucket_path)
os.remove(local_path)
return 'FINISHED Processing source: {} at resolution {}'.format(source_name, resolution)
def renditions_worker(full_input_file, source_folder, codec, resolution, crf_value, output_folder):
"""
Executes ffmepg command via PIPE
"""
#Formats ffmpeg command to be executed in parallel for each Quantization parameter value
print('processing {}'.format(full_input_file))
source_name = full_input_file.replace('{}/'.format(source_folder), '')
output_name = '"{}/{}_{}/{}"'.format(output_folder, resolution, crf_value, source_name)
ffmpeg_command = ['ffmpeg', '-y', '-i', '"{}"'.format(full_input_file),
'-an',
'-c:v', codec,
'-copyts',
'-vsync 0',
'-copytb 1',
'-enc_time_base -1',
'-crf {}'.format(crf_value),
'-vf scale=-2:{}'.format(resolution),
output_name
]
ffmpeg = subprocess.Popen(' '.join(ffmpeg_command),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
out, err = ffmpeg.communicate()
print(' '.join(ffmpeg_command), out, err)
def create_source_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The status message if successful
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json:
playlist_url = request_json['playlist_url']
video_id = request_json['video_id']
extension = request_json['extension']
duration = request_json['duration']
elif request_args:
playlist_url = request_args['playlist_url']
video_id = request_args['video_id']
extension = request_args['extension']
duration = request_args['duration']
else:
return 'Unable to read request'
print(playlist_url, video_id, extension)
ffmpeg_installer.install()
local_file = '/tmp/{}.{}'.format(video_id, extension)
destination_blob_name = '{}.{}'.format(video_id, extension)
if not gce_utils.check_blob(SOURCES_BUCKET, destination_blob_name):
if download_video_from_url(playlist_url, duration, local_file, extension):
gce_utils.upload_blob(SOURCES_BUCKET, local_file, destination_blob_name)
else:
print('Video already uploaded, skipping')
return 'FINISHED Processing source: {}'.format(video_id) | feature_engineering/cloud_functions/qoe_dataset_generator_http/main.py | import sys
import subprocess
import datetime
import os
import time
import numpy as np
sys.path.insert(0, 'imports')
from imports import ffmpeg_installer
from imports import gce_utils
CODEC_TO_USE = 'libx264'
PARAMETERS_BUCKET = 'livepeer-qoe-renditions-params'
SOURCES_BUCKET = 'livepeer-qoe-sources'
RENDITIONS_BUCKET = 'livepeer-crf-renditions'
ENTITY_NAME = 'features_input_QoE'
def download_video_from_url(video_url, duration, local_file, extension):
"""
Downloads a video from a given url to an HLS manifest
"""
local_folder = os.path.dirname(local_file)
if not os.path.exists(local_folder):
os.makedirs(local_folder)
print('Downloading {} to {}'.format(video_url, local_file))
seek_time = str(datetime.timedelta(seconds=int(duration)/2))
end_time = str(datetime.timedelta(seconds=(int(duration)/2)+10))
print(seek_time)
ffmpeg_command = ['ffmpeg -y -i {} -ss {} -to {}'.format(video_url, seek_time, end_time),
'-vcodec copy',
'-acodec copy',
'-f {} {}'.format(extension, local_file)]
ffmpeg = subprocess.Popen(' '.join(ffmpeg_command),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
out, err = ffmpeg.communicate()
print(' '.join(ffmpeg_command), out, err)
if not os.path.exists(local_file):
print('Unable to download {}'.format(local_file))
return False
return True
def compute_metrics(asset, renditions):
'''
Function that instantiates the VideoAssetProcessor class with a list
of metrics to be computed.
The feature_list argument is left void as every descriptor of each
temporal metric is potentially used for model training
'''
from imports.video_asset_processor import VideoAssetProcessor
start_time = time.time()
source_asset = asset
max_samples = 30
renditions_list = renditions
metrics_list = ['temporal_ssim',
'temporal_psnr',
'temporal_dct',
'temporal_gaussian_mse',
'temporal_gaussian_difference',
'temporal_threshold_gaussian_difference',
]
print('Computing asset: {}, max samples used: {}'.format(asset, max_samples))
asset_processor = VideoAssetProcessor(source_asset,
renditions_list,
metrics_list,
False,
max_samples,
features_list=None)
metrics_df, _, _ = asset_processor.process()
for _, row in metrics_df.iterrows():
line = row.to_dict()
for column in metrics_df.columns:
if 'series' in column:
line[column] = np.array2string(np.around(line[column], decimals=5))
gce_utils.add_asset_input('{}/{}'.format(row['title'], row['attack']), line, ENTITY_NAME)
elapsed_time = time.time() - start_time
print('Computation time:', elapsed_time)
def dataset_generator_qoe_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object, containing the name
of the source asset
Returns:
The response text, or any set of values that can be turned into a
Response object using `make_response`
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json and 'name' in request_json:
source_name = request_json['name']
elif request_args and 'name' in request_args:
source_name = request_args['name']
# Create the folder for the source asset
source_folder = '/tmp/{}'.format(os.path.dirname(source_name))
if not os.path.exists(source_folder):
os.makedirs(source_folder)
# Get the file that has been uploaded to GCS
asset_path = {'path': '{}{}'.format(source_folder,
source_name.replace(os.path.dirname(source_name), ''))}
renditions_paths = []
# Check if the source is not already in the path
if not os.path.exists(asset_path['path']):
gce_utils.download_to_local(SOURCES_BUCKET, asset_path['path'], source_name)
#Bring the attacks to be processed locally
resolutions = [1080, 720, 480, 384, 288, 144]
crfs = [45, 40, 32, 25, 21, 18, 14]
# Create a comprehension list with all the possible attacks
rendition_list = ['{}_{}'.format(resolution, crf)
for resolution in resolutions
for crf in crfs
]
for rendition in rendition_list:
remote_file = '{}/{}'.format(rendition, source_name)
rendition_folder = '/tmp/{}'.format(rendition)
local_path = '{}/{}'.format(rendition_folder, source_name)
try:
gce_utils.download_to_local(RENDITIONS_BUCKET,
local_path,
remote_file)
renditions_paths.append({'path': local_path})
except Exception as err:
print('Unable to download {}/{}: {}'.format(rendition, source_name, err))
if len(renditions_paths) > 0:
print('Processing the following renditions: {}'.format(renditions_paths))
compute_metrics(asset_path, renditions_paths)
else:
print('Empty renditions list. No renditions to process')
# Cleanup
if os.path.exists(asset_path['path']):
os.remove(asset_path['path'])
for rendition in rendition_list:
rendition_folder = '/tmp/{}'.format(rendition)
local_path = '{}/{}'.format(rendition_folder, source_name)
if os.path.exists(local_path):
os.remove(local_path)
return 'Process completed: {}'.format(asset_path['path'])
def trigger_renditions_bucket_event(data, context):
"""Background Cloud Function to be triggered by Cloud Storage.
This function retrieves a source video and triggers
the generation of renditions by means of an http asynchronous
call to the create_renditions_http function
Args:
data (dict): The Cloud Functions event payload.
context (google.cloud.functions.Context): Metadata of triggering event.
Returns:
None, the renditions cloud function are triggered asynchronously
"""
name = data['name']
# Create the folder for the renditions
params_folder = '/tmp/{}'.format(os.path.dirname(name))
if not os.path.exists(params_folder):
os.makedirs(params_folder)
resolutions = [1080, 720, 480, 384, 288, 144]
crfs = [45, 40, 32, 25, 21, 18, 14]
for resolution in resolutions:
for crf in crfs:
local_file = '{}/{}-{}-{}.json'.format(params_folder.replace(os.path.dirname(name), ''),
name,
resolution,
crf)
remote_file = '{}/{}-{}.json'.format(name,
resolution,
crf)
file_output = open(local_file, "w")
file_output.close()
gce_utils.upload_blob(PARAMETERS_BUCKET, local_file, remote_file)
return 'Renditions triggered for {}'.format(name)
def create_renditions_bucket_event(data, context):
"""
HTTP Cloud Function to generate video assets. Triggered by files
deposited in PARAMETERS_BUCKET
Args:
data: The triggering object, containing name, resolution and quantization parameter
Returns:
The status message if successful
"""
source_name = os.path.dirname(data['name'])
params_name = data['name'].replace(source_name, '')
resolution = params_name.split('-')[0][1:]
crf_value = params_name.split('-')[1].replace('.json', '')
print('Processing source: {} at resolution {}'.format(source_name, resolution))
# Locate the ffmpeg binary
ffmpeg_installer.install()
# Create the folder for the source asset
source_folder = '/tmp/source'
# Create the folder for the renditions
renditions_folder = '/tmp/renditions'
if not os.path.exists(renditions_folder):
os.makedirs(renditions_folder)
# Get the file that has been uploaded to GCS
asset_path = {'path': '{}/{}'.format(source_folder, source_name)}
# Check if the source is not already in the path
if not os.path.exists(asset_path['path']):
print('Retrieving video from {}'.format(asset_path['path']))
gce_utils.download_to_local(SOURCES_BUCKET, asset_path['path'], source_name)
print('Processing resolution', resolution)
# Create folder for each rendition
bucket_path = '{}_{}/{}'.format(resolution, crf_value, source_name)
print('Bucket path:', bucket_path)
if not gce_utils.check_blob(RENDITIONS_BUCKET, bucket_path):
crf_path = '{}/{}_{}/{}'.format(renditions_folder,
resolution,
crf_value,
os.path.dirname(source_name))
if not os.path.exists(crf_path):
print('Creating rendition folder:', crf_path)
os.makedirs(crf_path)
# Generate renditions with ffmpeg
renditions_worker(asset_path['path'],
source_folder,
CODEC_TO_USE,
resolution,
crf_value,
renditions_folder)
#compute_metrics(asset_path, renditions_paths)
# Upload renditions to GCE storage bucket
local_path = '{}/{}_{}/{}'.format(renditions_folder, resolution, crf_value, source_name)
bucket_path = '{}_{}/{}'.format(resolution, crf_value, source_name)
gce_utils.upload_blob(RENDITIONS_BUCKET, local_path, bucket_path)
os.remove(local_path)
return 'FINISHED Processing source: {} at resolution {}'.format(source_name, resolution)
def renditions_worker(full_input_file, source_folder, codec, resolution, crf_value, output_folder):
"""
Executes ffmepg command via PIPE
"""
#Formats ffmpeg command to be executed in parallel for each Quantization parameter value
print('processing {}'.format(full_input_file))
source_name = full_input_file.replace('{}/'.format(source_folder), '')
output_name = '"{}/{}_{}/{}"'.format(output_folder, resolution, crf_value, source_name)
ffmpeg_command = ['ffmpeg', '-y', '-i', '"{}"'.format(full_input_file),
'-an',
'-c:v', codec,
'-copyts',
'-vsync 0',
'-copytb 1',
'-enc_time_base -1',
'-crf {}'.format(crf_value),
'-vf scale=-2:{}'.format(resolution),
output_name
]
ffmpeg = subprocess.Popen(' '.join(ffmpeg_command),
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True)
out, err = ffmpeg.communicate()
print(' '.join(ffmpeg_command), out, err)
def create_source_http(request):
"""HTTP Cloud Function.
Args:
request (flask.Request): The request object.
<http://flask.pocoo.org/docs/1.0/api/#flask.Request>
Returns:
The status message if successful
"""
request_json = request.get_json(silent=True)
request_args = request.args
if request_json:
playlist_url = request_json['playlist_url']
video_id = request_json['video_id']
extension = request_json['extension']
duration = request_json['duration']
elif request_args:
playlist_url = request_args['playlist_url']
video_id = request_args['video_id']
extension = request_args['extension']
duration = request_args['duration']
else:
return 'Unable to read request'
print(playlist_url, video_id, extension)
ffmpeg_installer.install()
local_file = '/tmp/{}.{}'.format(video_id, extension)
destination_blob_name = '{}.{}'.format(video_id, extension)
if not gce_utils.check_blob(SOURCES_BUCKET, destination_blob_name):
if download_video_from_url(playlist_url, duration, local_file, extension):
gce_utils.upload_blob(SOURCES_BUCKET, local_file, destination_blob_name)
else:
print('Video already uploaded, skipping')
return 'FINISHED Processing source: {}'.format(video_id) | 0.582254 | 0.126947 |
"""Contains Loss Scale Gradient Tape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import backprop
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.util import nest
# TODO(reedwm): Expose this. Currently it doesn't work with DistributionStrategy
class LossScaleGradientTape(backprop.GradientTape):
"""A gradient tape that scales losses and unscales resulting gradients.
Operates as a normal gradient tape, but takes in a
`tf.mixed_precision.experimental.LossScale` object. Losses are scaled up by
some amount before the gradients are calculated and the resulting gradients
are scaled down by the same amount.
This has no net mathematical effect, but can be used to prevent vanishing
gradients, for example in the case of mixed precision training.
If a DynamicLossScale object is used and non-finite gradients are encountered,
the loss scale will be updated and the gradients recomputed until either
finite gradients are encountered or the loss scale becomes 1.
This class should *not* be used with a LossScaleOptimizer, as both classes
update the LossScale object. Use a non-loss scaling optimizer instead.
Usage:
```
opt = tf.keras.optimizers.SGD(1.0)
model_loss_scale = tf.mixed_precision.experimental.DynamicLossScale()
for step in training_steps:
with LossScaleGradientTape(model_loss_scale) as tape:
logits = ... # Run model and get logits
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels)
loss = tf.reduce_mean(loss)
vars = tape.watched_variables()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
```
WARNING: Computing second-order (or higher) gradients with a
`LossScaleGradientTape` does not yet work properly when a
`tf.distribute.Strategy` is used. Computing second-order gradients will return
None instead of the gradient tensors. This only occurs when you nest multiple
gradient tapes under each other; if you do not nest them, this issue will not
occur.
"""
def __init__(self,
loss_scale,
persistent=False,
watch_accessed_variables=True):
"""Creates a new LossScaleGradientTape.
Args:
loss_scale: `tf.mixed_precision.experimental.LossScale` object that
manages what quantity to scale by. This is typically either a
FixedLossScale object with a constant scalar or a
`tf.mixed_precision.experimental.DynamicLossScale` object that will
adjust the scalar appropriately if any non-finite gradients are
encountered.
persistent: Boolean controlling whether a persistent gradient tape is
created. False by default, which means at most one call can be made to
the gradient() method on this object.
watch_accessed_variables: Boolean controlling whether the tape will
automatically `watch` any (trainable) variables accessed while the tape
is active. Defaults to True meaning gradients can be requested from any
result computed in the tape derived from reading a trainable `Variable`.
If False users must explicitly `watch` any `Variable`s they want to
request gradients from.
"""
if not isinstance(loss_scale, loss_scale_module.LossScale):
raise ValueError("`loss_scale` must be an instance of LossScale.")
# always make a persistent tape to loop over loss scaling
super(LossScaleGradientTape, self).__init__(True,
watch_accessed_variables)
self._outer_persistent = persistent
self._loss_scale = loss_scale
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Uses the `LossScale` object provided in the constructor to scale `target`
and then to unscale the resulting gradients.
Args:
target: a list or nested structure of Tensors or Variables to be
differentiated.
sources: a list or nested structure of Tensors or Variables. `target` will
be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of target.
Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`. If non-finite gradients are encountered
after dynamic scaling, the loss scale will be updated and the gradients
recomputed until either finite gradients are encountered or the loss scale
becomes 1.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
ValueError: if the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None: # pylint: disable=access-member-before-definition
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if distribution_strategy_context.in_cross_replica_context():
raise ValueError("LossScaleGradientTape.gradient() must be called in a "
"replica context.")
# Note: DistributionStrategy does not support running a while loop in a
# replica context. So, we call `_compute_gradients_until_finite` in a cross-
# replica context.
replica_context = distribution_strategy_context.get_replica_context()
grads = replica_context.merge_call(
_compute_gradients_until_finite,
args=(self, self._loss_scale, target, sources, output_gradients,
unconnected_gradients))
if not self._outer_persistent:
self._tape = None # free up resources if a persistent tape was not needed
return grads
def _compute_gradients_until_finite(
distribution, loss_scale_gradient_tapes, loss_scale, target, sources,
output_gradients, unconnected_gradients):
"""Compute gradients and update the loss scale until the gradients are finite.
This must be called in a cross-replica context.
This is a function instead of a method of LossScaleGradientTape, as the `self`
parameter would be meaningless. There is one LossScaleGradientTape per
replica, but this function is called once total (not per replica), so there
cannot be a singular `self` parameter.
Args:
distribution: The distribution strategy in effect.
loss_scale_gradient_tapes: A PerReplica value of LossScaleGradientTapes.
Contains the LossScaleGradientTape of each replica.
loss_scale: The loss scale to use to scale the loss and unscale the
gradient.
target: a list or nested structure of Tensors or Variables to be
differentiated.
sources: a list or nested structure of Tensors or Variables. `target` will
be differentiated against elements in `sources`.
output_gradients: Passed to GradientTape.gradient
unconnected_gradients: Pass to GradientTape.gradient.
Returns:
The gradients of `target` with respect to `sources`.
"""
# Autograph cannot convert this function, so we must use an explicit
# tf.while_loop.
# TODO(b/143572314): Fix Autograph so that it can convert this function, then
# replace the tf.while_loop with a Python while loop.
def cond(grads, ready_to_update):
"""The condition of the while loop."""
del grads
# Equivalent to: `not ready_to_update and loss_scale() > 1`
return math_ops.logical_and(math_ops.logical_not(ready_to_update),
math_ops.greater(loss_scale(), 1))
def body(grads, ready_to_update):
"""The body of the while loop."""
del grads, ready_to_update
def replica_fn(gradient_tape, target, sources, output_gradients):
"""Scales the loss, computes the gradients, and unscales the gradients."""
loss_scale_val = loss_scale()
with gradient_tape: # re-enter gradient tape so it sees the loss scaling
scaled_target = nest.map_structure(lambda t: t * loss_scale_val, target)
old_grads = super(LossScaleGradientTape, gradient_tape).gradient(
scaled_target, sources, output_gradients, unconnected_gradients)
inv_loss_scale = 1.0 / loss_scale_val
grads = nest.map_structure(lambda g: inv_loss_scale * g, old_grads)
return grads
# Switch to a replica-context to compute gradients once per replica.
grads = distribution.experimental_run_v2(
replica_fn, args=(loss_scale_gradient_tapes, target, sources,
output_gradients))
# Check for non-finite gradients possibly resulting from scaling
_, ready_to_update = loss_scale.update(grads)
return grads, ready_to_update
# Dummy value for initial_grads. The first iteration of the loop will
# overwrite `grads` to the actual gradients.
initial_grads = sources
initial_ready_to_update = False
grads, _ = control_flow_ops.while_loop(
cond, body, [initial_grads, initial_ready_to_update])
return grads | tensorflow/python/training/experimental/loss_scaling_gradient_tape.py | """Contains Loss Scale Gradient Tape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import backprop
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.unconnected_gradients import UnconnectedGradients
from tensorflow.python.training.experimental import loss_scale as loss_scale_module
from tensorflow.python.util import nest
# TODO(reedwm): Expose this. Currently it doesn't work with DistributionStrategy
class LossScaleGradientTape(backprop.GradientTape):
"""A gradient tape that scales losses and unscales resulting gradients.
Operates as a normal gradient tape, but takes in a
`tf.mixed_precision.experimental.LossScale` object. Losses are scaled up by
some amount before the gradients are calculated and the resulting gradients
are scaled down by the same amount.
This has no net mathematical effect, but can be used to prevent vanishing
gradients, for example in the case of mixed precision training.
If a DynamicLossScale object is used and non-finite gradients are encountered,
the loss scale will be updated and the gradients recomputed until either
finite gradients are encountered or the loss scale becomes 1.
This class should *not* be used with a LossScaleOptimizer, as both classes
update the LossScale object. Use a non-loss scaling optimizer instead.
Usage:
```
opt = tf.keras.optimizers.SGD(1.0)
model_loss_scale = tf.mixed_precision.experimental.DynamicLossScale()
for step in training_steps:
with LossScaleGradientTape(model_loss_scale) as tape:
logits = ... # Run model and get logits
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
labels=labels)
loss = tf.reduce_mean(loss)
vars = tape.watched_variables()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
```
WARNING: Computing second-order (or higher) gradients with a
`LossScaleGradientTape` does not yet work properly when a
`tf.distribute.Strategy` is used. Computing second-order gradients will return
None instead of the gradient tensors. This only occurs when you nest multiple
gradient tapes under each other; if you do not nest them, this issue will not
occur.
"""
def __init__(self,
loss_scale,
persistent=False,
watch_accessed_variables=True):
"""Creates a new LossScaleGradientTape.
Args:
loss_scale: `tf.mixed_precision.experimental.LossScale` object that
manages what quantity to scale by. This is typically either a
FixedLossScale object with a constant scalar or a
`tf.mixed_precision.experimental.DynamicLossScale` object that will
adjust the scalar appropriately if any non-finite gradients are
encountered.
persistent: Boolean controlling whether a persistent gradient tape is
created. False by default, which means at most one call can be made to
the gradient() method on this object.
watch_accessed_variables: Boolean controlling whether the tape will
automatically `watch` any (trainable) variables accessed while the tape
is active. Defaults to True meaning gradients can be requested from any
result computed in the tape derived from reading a trainable `Variable`.
If False users must explicitly `watch` any `Variable`s they want to
request gradients from.
"""
if not isinstance(loss_scale, loss_scale_module.LossScale):
raise ValueError("`loss_scale` must be an instance of LossScale.")
# always make a persistent tape to loop over loss scaling
super(LossScaleGradientTape, self).__init__(True,
watch_accessed_variables)
self._outer_persistent = persistent
self._loss_scale = loss_scale
def gradient(self,
target,
sources,
output_gradients=None,
unconnected_gradients=UnconnectedGradients.NONE):
"""Computes the gradient using operations recorded in context of this tape.
Uses the `LossScale` object provided in the constructor to scale `target`
and then to unscale the resulting gradients.
Args:
target: a list or nested structure of Tensors or Variables to be
differentiated.
sources: a list or nested structure of Tensors or Variables. `target` will
be differentiated against elements in `sources`.
output_gradients: a list of gradients, one for each element of target.
Defaults to None.
unconnected_gradients: a value which can either hold 'none' or 'zero' and
alters the value which will be returned if the target and sources are
unconnected. The possible values and effects are detailed in
'UnconnectedGradients' and it defaults to 'none'.
Returns:
a list or nested structure of Tensors (or IndexedSlices, or None),
one for each element in `sources`. Returned structure is the same as
the structure of `sources`. If non-finite gradients are encountered
after dynamic scaling, the loss scale will be updated and the gradients
recomputed until either finite gradients are encountered or the loss scale
becomes 1.
Raises:
RuntimeError: if called inside the context of the tape, or if called more
than once on a non-persistent tape.
ValueError: if the target is a variable or if unconnected gradients is
called with an unknown value.
"""
if self._tape is None: # pylint: disable=access-member-before-definition
raise RuntimeError("GradientTape.gradient can only be called once on "
"non-persistent tapes.")
if distribution_strategy_context.in_cross_replica_context():
raise ValueError("LossScaleGradientTape.gradient() must be called in a "
"replica context.")
# Note: DistributionStrategy does not support running a while loop in a
# replica context. So, we call `_compute_gradients_until_finite` in a cross-
# replica context.
replica_context = distribution_strategy_context.get_replica_context()
grads = replica_context.merge_call(
_compute_gradients_until_finite,
args=(self, self._loss_scale, target, sources, output_gradients,
unconnected_gradients))
if not self._outer_persistent:
self._tape = None # free up resources if a persistent tape was not needed
return grads
def _compute_gradients_until_finite(
distribution, loss_scale_gradient_tapes, loss_scale, target, sources,
output_gradients, unconnected_gradients):
"""Compute gradients and update the loss scale until the gradients are finite.
This must be called in a cross-replica context.
This is a function instead of a method of LossScaleGradientTape, as the `self`
parameter would be meaningless. There is one LossScaleGradientTape per
replica, but this function is called once total (not per replica), so there
cannot be a singular `self` parameter.
Args:
distribution: The distribution strategy in effect.
loss_scale_gradient_tapes: A PerReplica value of LossScaleGradientTapes.
Contains the LossScaleGradientTape of each replica.
loss_scale: The loss scale to use to scale the loss and unscale the
gradient.
target: a list or nested structure of Tensors or Variables to be
differentiated.
sources: a list or nested structure of Tensors or Variables. `target` will
be differentiated against elements in `sources`.
output_gradients: Passed to GradientTape.gradient
unconnected_gradients: Pass to GradientTape.gradient.
Returns:
The gradients of `target` with respect to `sources`.
"""
# Autograph cannot convert this function, so we must use an explicit
# tf.while_loop.
# TODO(b/143572314): Fix Autograph so that it can convert this function, then
# replace the tf.while_loop with a Python while loop.
def cond(grads, ready_to_update):
"""The condition of the while loop."""
del grads
# Equivalent to: `not ready_to_update and loss_scale() > 1`
return math_ops.logical_and(math_ops.logical_not(ready_to_update),
math_ops.greater(loss_scale(), 1))
def body(grads, ready_to_update):
"""The body of the while loop."""
del grads, ready_to_update
def replica_fn(gradient_tape, target, sources, output_gradients):
"""Scales the loss, computes the gradients, and unscales the gradients."""
loss_scale_val = loss_scale()
with gradient_tape: # re-enter gradient tape so it sees the loss scaling
scaled_target = nest.map_structure(lambda t: t * loss_scale_val, target)
old_grads = super(LossScaleGradientTape, gradient_tape).gradient(
scaled_target, sources, output_gradients, unconnected_gradients)
inv_loss_scale = 1.0 / loss_scale_val
grads = nest.map_structure(lambda g: inv_loss_scale * g, old_grads)
return grads
# Switch to a replica-context to compute gradients once per replica.
grads = distribution.experimental_run_v2(
replica_fn, args=(loss_scale_gradient_tapes, target, sources,
output_gradients))
# Check for non-finite gradients possibly resulting from scaling
_, ready_to_update = loss_scale.update(grads)
return grads, ready_to_update
# Dummy value for initial_grads. The first iteration of the loop will
# overwrite `grads` to the actual gradients.
initial_grads = sources
initial_ready_to_update = False
grads, _ = control_flow_ops.while_loop(
cond, body, [initial_grads, initial_ready_to_update])
return grads | 0.903342 | 0.763484 |
from collections import OrderedDict
from devito.ir.iet import (Iteration, List, IterationTree, FindSections, FindSymbols,
FindNodes, Section, Expression)
from devito.symbolics import Macro
from devito.tools import flatten, ReducerMap
from devito.types import Array, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree',
'compose_nodes', 'derive_parameters', 'find_affine_trees']
def retrieve_iteration_tree(node, mode='normal'):
"""
A list with all :class:`Iteration` sub-trees within an IET.
Examples
--------
Given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteraion k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
Parameters
----------
iet : Node
The searched Iteration/Expression tree.
mode : str, optional
- ``normal``
- ``superset``: Iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
match = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
match.append(i)
return IterationTree(match)
def filter_iterations(tree, key=lambda i: i, stop=lambda: False):
"""
Given an iterable of :class:`Iteration`s, produce a list containing
all Iterations such that ``key(iteration)`` is True.
This function accepts an optional argument ``stop``. This may be either a
lambda function, specifying a stop criterium, or any of the following
special keywords: ::
* 'any': Return as soon as ``key(o)`` is False and at least one
item has been collected.
* 'asap': Return as soon as at least one item has been collected and
all items for which ``key(o)`` is False have been encountered.
It is useful to specify a ``stop`` criterium when one is searching the
first Iteration in an Iteration/Expression tree which does not honour a
given property.
"""
assert callable(stop) or stop in ['any', 'asap']
tree = list(tree)
filtered = []
off = []
if stop == 'any':
stop = lambda: len(filtered) > 0
elif stop == 'asap':
hits = [i for i in tree if not key(i)]
stop = lambda: len(filtered) > 0 and len(off) == len(hits)
for i in tree:
if key(i):
filtered.append(i)
else:
off.append(i)
if stop():
break
return filtered
def compose_nodes(nodes, retrieve=False):
"""Build an IET by nesting ``nodes``."""
l = list(nodes)
tree = []
if not isinstance(l[0], Iteration):
# Nothing to compose
body = flatten(l)
body = List(body=body) if len(body) > 1 else body[0]
else:
body = l.pop(-1)
while l:
handle = l.pop(-1)
body = handle._rebuild(body, **handle.args_frozen)
tree.append(body)
if retrieve is True:
tree = list(reversed(tree))
return body, tree
else:
return body
def derive_parameters(nodes, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Pick all free symbols and symbolic functions from the kernel
functions = FindSymbols('symbolics').visit(nodes)
free_symbols = FindSymbols('free-symbols').visit(nodes)
# Filter out function base symbols and use real function objects
function_names = [s.name for s in functions]
symbols = [s for s in free_symbols if s.name not in function_names]
symbols = functions + symbols
defines = [s.name for s in FindSymbols('defines').visit(nodes)]
parameters = tuple(s for s in symbols if s.name not in defines)
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, Macro)]
# Filter out locally-allocated Arrays and Objects
if drop_locals:
parameters = [p for p in parameters
if not (isinstance(p, Array) and (p._mem_heap or p._mem_stack))]
parameters = [p for p in parameters if not isinstance(p, LocalObject)]
return parameters
def find_affine_trees(iet):
"""
Find affine trees. A tree is affine when all of the array accesses are
constant/affine functions of the Iteration variables and the Iteration bounds
are fixed (but possibly symbolic).
Parameters
----------
iet : `Node`
The searched tree
Returns
-------
list of `Node`
Each item in the list is the root of an affine tree
"""
affine = OrderedDict()
roots = [i for i in FindNodes(Iteration).visit(iet) if i.dim.is_Time]
for root in roots:
sections = FindNodes(Section).visit(root)
for section in sections:
for tree in retrieve_iteration_tree(section):
if not all(i.is_Affine for i in tree):
# Non-affine array accesses not supported
break
exprs = [i.expr for i in FindNodes(Expression).visit(tree.root)]
grid = ReducerMap([('', i.grid) for i in exprs if i.grid]).unique('')
writeto_dimensions = tuple(i.dim.root for i in tree)
if grid.dimensions == writeto_dimensions:
affine.setdefault(section, []).append(tree)
else:
break
return affine | devito/ir/iet/utils.py | from collections import OrderedDict
from devito.ir.iet import (Iteration, List, IterationTree, FindSections, FindSymbols,
FindNodes, Section, Expression)
from devito.symbolics import Macro
from devito.tools import flatten, ReducerMap
from devito.types import Array, LocalObject
__all__ = ['filter_iterations', 'retrieve_iteration_tree',
'compose_nodes', 'derive_parameters', 'find_affine_trees']
def retrieve_iteration_tree(node, mode='normal'):
"""
A list with all :class:`Iteration` sub-trees within an IET.
Examples
--------
Given the Iteration tree:
.. code-block:: c
Iteration i
expr0
Iteration j
Iteraion k
expr1
Iteration p
expr2
Return the list: ::
[(Iteration i, Iteration j, Iteration k), (Iteration i, Iteration p)]
Parameters
----------
iet : Node
The searched Iteration/Expression tree.
mode : str, optional
- ``normal``
- ``superset``: Iteration trees that are subset of larger iteration trees
are dropped.
"""
assert mode in ('normal', 'superset')
trees = [IterationTree(i) for i in FindSections().visit(node) if i]
if mode == 'normal':
return trees
else:
match = []
for i in trees:
if any(set(i).issubset(set(j)) for j in trees if i != j):
continue
match.append(i)
return IterationTree(match)
def filter_iterations(tree, key=lambda i: i, stop=lambda: False):
"""
Given an iterable of :class:`Iteration`s, produce a list containing
all Iterations such that ``key(iteration)`` is True.
This function accepts an optional argument ``stop``. This may be either a
lambda function, specifying a stop criterium, or any of the following
special keywords: ::
* 'any': Return as soon as ``key(o)`` is False and at least one
item has been collected.
* 'asap': Return as soon as at least one item has been collected and
all items for which ``key(o)`` is False have been encountered.
It is useful to specify a ``stop`` criterium when one is searching the
first Iteration in an Iteration/Expression tree which does not honour a
given property.
"""
assert callable(stop) or stop in ['any', 'asap']
tree = list(tree)
filtered = []
off = []
if stop == 'any':
stop = lambda: len(filtered) > 0
elif stop == 'asap':
hits = [i for i in tree if not key(i)]
stop = lambda: len(filtered) > 0 and len(off) == len(hits)
for i in tree:
if key(i):
filtered.append(i)
else:
off.append(i)
if stop():
break
return filtered
def compose_nodes(nodes, retrieve=False):
"""Build an IET by nesting ``nodes``."""
l = list(nodes)
tree = []
if not isinstance(l[0], Iteration):
# Nothing to compose
body = flatten(l)
body = List(body=body) if len(body) > 1 else body[0]
else:
body = l.pop(-1)
while l:
handle = l.pop(-1)
body = handle._rebuild(body, **handle.args_frozen)
tree.append(body)
if retrieve is True:
tree = list(reversed(tree))
return body, tree
else:
return body
def derive_parameters(nodes, drop_locals=False):
"""
Derive all input parameters (function call arguments) from an IET
by collecting all symbols not defined in the tree itself.
"""
# Pick all free symbols and symbolic functions from the kernel
functions = FindSymbols('symbolics').visit(nodes)
free_symbols = FindSymbols('free-symbols').visit(nodes)
# Filter out function base symbols and use real function objects
function_names = [s.name for s in functions]
symbols = [s for s in free_symbols if s.name not in function_names]
symbols = functions + symbols
defines = [s.name for s in FindSymbols('defines').visit(nodes)]
parameters = tuple(s for s in symbols if s.name not in defines)
# Drop globally-visible objects
parameters = [p for p in parameters if not isinstance(p, Macro)]
# Filter out locally-allocated Arrays and Objects
if drop_locals:
parameters = [p for p in parameters
if not (isinstance(p, Array) and (p._mem_heap or p._mem_stack))]
parameters = [p for p in parameters if not isinstance(p, LocalObject)]
return parameters
def find_affine_trees(iet):
"""
Find affine trees. A tree is affine when all of the array accesses are
constant/affine functions of the Iteration variables and the Iteration bounds
are fixed (but possibly symbolic).
Parameters
----------
iet : `Node`
The searched tree
Returns
-------
list of `Node`
Each item in the list is the root of an affine tree
"""
affine = OrderedDict()
roots = [i for i in FindNodes(Iteration).visit(iet) if i.dim.is_Time]
for root in roots:
sections = FindNodes(Section).visit(root)
for section in sections:
for tree in retrieve_iteration_tree(section):
if not all(i.is_Affine for i in tree):
# Non-affine array accesses not supported
break
exprs = [i.expr for i in FindNodes(Expression).visit(tree.root)]
grid = ReducerMap([('', i.grid) for i in exprs if i.grid]).unique('')
writeto_dimensions = tuple(i.dim.root for i in tree)
if grid.dimensions == writeto_dimensions:
affine.setdefault(section, []).append(tree)
else:
break
return affine | 0.91331 | 0.592814 |
import csv
import os
def csv_iterator(filename):
with open('tests/test.csv', 'r', newline='', encoding='utf-8') as fin:
yield from enumerate(csv.reader(fin))
def is_line_empty(line):
''' Is the line a comment, or consists solely of empty cells? '''
if not line:
return True
if line[0].strip().startswith("#"):
return True
return not any(cell.strip() for cell in line)
def parse_csv(filename, syntax_errors):
stanzas = []
syntax_errors = []
parse_mode = "TABLE"
basename = os.path.basename(filename)
for rownum, tokens in csv_iterator(filename):
if is_line_empty(tokens):
continue
assert(len(tokens) > 0)
symbol_name = tokens[0].strip()
if symbol_name.lower() == "table":
parse_mode = "TABLE"
if symbol_name: # it's a new stanza
new_stanza = Stanza(basename, rownum, parse_mode, tokens, syntax_errors)
stanzas.append(new_stanza)
continue
# otherwise it belongs to a previous stanza
if not stanzas:
# oops, there's a continuation line but it's not continuing anything
syntax_errors.append({"filename": filename,
"row": rownum,
"message": "This line should belong to a previous symbol, but no symbol precedes it."})
continue
stanzas[-1].add_line(basename, rownum, tokens, syntax_errors)
class TableCompiler:
''' Compiles a Stanza into a Table '''
def __init__(self, stanza, symbol_table):
pass
class Stanza:
''' A Stanza is a series of spreadsheet rows that are interpreted as a unit '''
def __init__(self, filename, rownum, parse_mode, tokens, syntax_errors):
self.lines = []
self.parse_mode = parse_mode
assert(len(tokens) > 0)
self.symbol_name = tokens[0].strip()
self.lines = []
self.colheaders = {}
for colnum, token in enumerate(tokens[1:]):
if not token.strip():
continue
self.colheaders[colnum+1] = token.strip()
def add_line(self, filename, rownum, tokens, syntax_errors):
line = []
assert(len(tokens) > 0)
assert(tokens[0].strip() == '')
for colnum, token in enumerate(tokens[1:]):
if not token.strip():
continue
if colnum+1 not in self.colheaders:
syntax_errors.append({"filename": filename,
"row": rownum,
"col": colnum,
"message": "Cell does not belong to a column header."})
continue
colheader = self.colheaders[colnum+1]
line.append((filename, rownum, colnum+1, colheader, token.strip()))
print((filename, rownum, colnum+1, colheader, token.strip()))
self.lines.append(line)
errors = []
parse_csv("tests/test.csv", errors)
print(errors) | grable/spreadsheets.py | import csv
import os
def csv_iterator(filename):
with open('tests/test.csv', 'r', newline='', encoding='utf-8') as fin:
yield from enumerate(csv.reader(fin))
def is_line_empty(line):
''' Is the line a comment, or consists solely of empty cells? '''
if not line:
return True
if line[0].strip().startswith("#"):
return True
return not any(cell.strip() for cell in line)
def parse_csv(filename, syntax_errors):
stanzas = []
syntax_errors = []
parse_mode = "TABLE"
basename = os.path.basename(filename)
for rownum, tokens in csv_iterator(filename):
if is_line_empty(tokens):
continue
assert(len(tokens) > 0)
symbol_name = tokens[0].strip()
if symbol_name.lower() == "table":
parse_mode = "TABLE"
if symbol_name: # it's a new stanza
new_stanza = Stanza(basename, rownum, parse_mode, tokens, syntax_errors)
stanzas.append(new_stanza)
continue
# otherwise it belongs to a previous stanza
if not stanzas:
# oops, there's a continuation line but it's not continuing anything
syntax_errors.append({"filename": filename,
"row": rownum,
"message": "This line should belong to a previous symbol, but no symbol precedes it."})
continue
stanzas[-1].add_line(basename, rownum, tokens, syntax_errors)
class TableCompiler:
''' Compiles a Stanza into a Table '''
def __init__(self, stanza, symbol_table):
pass
class Stanza:
''' A Stanza is a series of spreadsheet rows that are interpreted as a unit '''
def __init__(self, filename, rownum, parse_mode, tokens, syntax_errors):
self.lines = []
self.parse_mode = parse_mode
assert(len(tokens) > 0)
self.symbol_name = tokens[0].strip()
self.lines = []
self.colheaders = {}
for colnum, token in enumerate(tokens[1:]):
if not token.strip():
continue
self.colheaders[colnum+1] = token.strip()
def add_line(self, filename, rownum, tokens, syntax_errors):
line = []
assert(len(tokens) > 0)
assert(tokens[0].strip() == '')
for colnum, token in enumerate(tokens[1:]):
if not token.strip():
continue
if colnum+1 not in self.colheaders:
syntax_errors.append({"filename": filename,
"row": rownum,
"col": colnum,
"message": "Cell does not belong to a column header."})
continue
colheader = self.colheaders[colnum+1]
line.append((filename, rownum, colnum+1, colheader, token.strip()))
print((filename, rownum, colnum+1, colheader, token.strip()))
self.lines.append(line)
errors = []
parse_csv("tests/test.csv", errors)
print(errors) | 0.398406 | 0.268144 |
import inspect
from robot.utils import normalize, unic
def LibraryScope(libcode, library):
scope = _get_scope(libcode)
if scope == 'global':
return GlobalScope(library)
if scope == 'testsuite':
return TestSuiteScope(library)
return TestCaseScope(library)
def _get_scope(libcode):
if inspect.ismodule(libcode):
return 'global'
scope = getattr(libcode, 'ROBOT_LIBRARY_SCOPE', '')
return normalize(unic(scope), ignore='_')
class GlobalScope(object):
is_global = True
def __init__(self, library):
self._register_listeners = library.register_listeners
self._unregister_listeners = library.unregister_listeners
def start_suite(self):
self._register_listeners()
def end_suite(self):
self._unregister_listeners()
def start_test(self):
pass
def end_test(self):
pass
def __str__(self):
return 'global'
class TestSuiteScope(GlobalScope):
is_global = False
def __init__(self, library):
GlobalScope.__init__(self, library)
self._reset_instance = library.reset_instance
self._instance_cache = []
@property
def is_global(self):
return False
def start_suite(self):
prev = self._reset_instance()
self._instance_cache.append(prev)
self._register_listeners()
def end_suite(self):
self._unregister_listeners(close=True)
prev = self._instance_cache.pop()
self._reset_instance(prev)
def __str__(self):
return 'test suite'
class TestCaseScope(TestSuiteScope):
def start_test(self):
self._unregister_listeners()
prev = self._reset_instance()
self._instance_cache.append(prev)
self._register_listeners()
def end_test(self):
self._unregister_listeners(close=True)
prev = self._instance_cache.pop()
self._reset_instance(prev)
self._register_listeners()
def __str__(self):
return 'test case' | API/src/main/resources/Lib/robot/running/libraryscopes.py |
import inspect
from robot.utils import normalize, unic
def LibraryScope(libcode, library):
scope = _get_scope(libcode)
if scope == 'global':
return GlobalScope(library)
if scope == 'testsuite':
return TestSuiteScope(library)
return TestCaseScope(library)
def _get_scope(libcode):
if inspect.ismodule(libcode):
return 'global'
scope = getattr(libcode, 'ROBOT_LIBRARY_SCOPE', '')
return normalize(unic(scope), ignore='_')
class GlobalScope(object):
is_global = True
def __init__(self, library):
self._register_listeners = library.register_listeners
self._unregister_listeners = library.unregister_listeners
def start_suite(self):
self._register_listeners()
def end_suite(self):
self._unregister_listeners()
def start_test(self):
pass
def end_test(self):
pass
def __str__(self):
return 'global'
class TestSuiteScope(GlobalScope):
is_global = False
def __init__(self, library):
GlobalScope.__init__(self, library)
self._reset_instance = library.reset_instance
self._instance_cache = []
@property
def is_global(self):
return False
def start_suite(self):
prev = self._reset_instance()
self._instance_cache.append(prev)
self._register_listeners()
def end_suite(self):
self._unregister_listeners(close=True)
prev = self._instance_cache.pop()
self._reset_instance(prev)
def __str__(self):
return 'test suite'
class TestCaseScope(TestSuiteScope):
def start_test(self):
self._unregister_listeners()
prev = self._reset_instance()
self._instance_cache.append(prev)
self._register_listeners()
def end_test(self):
self._unregister_listeners(close=True)
prev = self._instance_cache.pop()
self._reset_instance(prev)
self._register_listeners()
def __str__(self):
return 'test case' | 0.630912 | 0.132599 |
import logging
import re
from streamlink.compat import html_unescape, unquote
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents, validate
from streamlink.stream import HLSStream, HTTPStream, RTMPStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class OK_live(Plugin):
'''Plugin for ok.ru'''
_data_re = re.compile(r'''data-options=(?P<q>["'])(?P<data>{[^"']+})(?P=q)''')
_url_re = re.compile(r'''https?://(?:www\.)?ok\.ru/''')
_metadata_schema = validate.Schema(
validate.transform(parse_json),
validate.any({
'videos': validate.any(
[],
[
{
'name': validate.text,
'url': validate.text,
}
]
),
validate.optional('hlsManifestUrl'): validate.text,
validate.optional('hlsMasterPlaylistUrl'): validate.text,
validate.optional('liveDashManifestUrl'): validate.text,
validate.optional('rtmpUrl'): validate.text,
}, None)
)
_data_schema = validate.Schema(
validate.all(
validate.transform(_data_re.search),
validate.get('data'),
validate.transform(html_unescape),
validate.transform(parse_json),
validate.get('flashvars'),
validate.any(
{
'metadata': _metadata_schema
},
{
'metadataUrl': validate.transform(unquote)
},
None
)
)
)
QUALITY_WEIGHTS = {
'full': 1080,
'1080': 1080,
'hd': 720,
'720': 720,
'sd': 480,
'480': 480,
'360': 360,
'low': 360,
'lowest': 240,
'mobile': 144,
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'ok_live'
return Plugin.stream_weight(key)
def _get_streams(self):
log.debug('Version 2018-07-01')
log.info('This is a custom plugin. '
'For support visit https://github.com/back-to/plugins')
headers = {
'User-Agent': useragents.FIREFOX,
'Referer': self.url
}
http.headers.update(headers)
data = http.get(self.url, schema=self._data_schema)
metadata = data.get('metadata')
metadata_url = data.get('metadataUrl')
if metadata_url:
metadata = http.post(metadata_url, schema=self._metadata_schema)
if metadata:
list_hls = [
metadata.get('hlsManifestUrl'),
metadata.get('hlsMasterPlaylistUrl'),
]
for hls_url in list_hls:
if hls_url is not None:
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
if metadata.get('videos'):
for http_stream in metadata['videos']:
http_name = http_stream['name']
http_url = http_stream['url']
try:
http_name = '{0}p'.format(self.QUALITY_WEIGHTS[http_name])
except KeyError:
pass
yield http_name, HTTPStream(self.session, http_url)
if metadata.get('rtmpUrl'):
yield 'live', RTMPStream(self.session, params={'rtmp': metadata['rtmpUrl']})
__plugin__ = OK_live | plugins/ok_live.py | import logging
import re
from streamlink.compat import html_unescape, unquote
from streamlink.plugin import Plugin
from streamlink.plugin.api import http, useragents, validate
from streamlink.stream import HLSStream, HTTPStream, RTMPStream
from streamlink.utils import parse_json
log = logging.getLogger(__name__)
class OK_live(Plugin):
'''Plugin for ok.ru'''
_data_re = re.compile(r'''data-options=(?P<q>["'])(?P<data>{[^"']+})(?P=q)''')
_url_re = re.compile(r'''https?://(?:www\.)?ok\.ru/''')
_metadata_schema = validate.Schema(
validate.transform(parse_json),
validate.any({
'videos': validate.any(
[],
[
{
'name': validate.text,
'url': validate.text,
}
]
),
validate.optional('hlsManifestUrl'): validate.text,
validate.optional('hlsMasterPlaylistUrl'): validate.text,
validate.optional('liveDashManifestUrl'): validate.text,
validate.optional('rtmpUrl'): validate.text,
}, None)
)
_data_schema = validate.Schema(
validate.all(
validate.transform(_data_re.search),
validate.get('data'),
validate.transform(html_unescape),
validate.transform(parse_json),
validate.get('flashvars'),
validate.any(
{
'metadata': _metadata_schema
},
{
'metadataUrl': validate.transform(unquote)
},
None
)
)
)
QUALITY_WEIGHTS = {
'full': 1080,
'1080': 1080,
'hd': 720,
'720': 720,
'sd': 480,
'480': 480,
'360': 360,
'low': 360,
'lowest': 240,
'mobile': 144,
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = cls.QUALITY_WEIGHTS.get(key)
if weight:
return weight, 'ok_live'
return Plugin.stream_weight(key)
def _get_streams(self):
log.debug('Version 2018-07-01')
log.info('This is a custom plugin. '
'For support visit https://github.com/back-to/plugins')
headers = {
'User-Agent': useragents.FIREFOX,
'Referer': self.url
}
http.headers.update(headers)
data = http.get(self.url, schema=self._data_schema)
metadata = data.get('metadata')
metadata_url = data.get('metadataUrl')
if metadata_url:
metadata = http.post(metadata_url, schema=self._metadata_schema)
if metadata:
list_hls = [
metadata.get('hlsManifestUrl'),
metadata.get('hlsMasterPlaylistUrl'),
]
for hls_url in list_hls:
if hls_url is not None:
for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():
yield s
if metadata.get('videos'):
for http_stream in metadata['videos']:
http_name = http_stream['name']
http_url = http_stream['url']
try:
http_name = '{0}p'.format(self.QUALITY_WEIGHTS[http_name])
except KeyError:
pass
yield http_name, HTTPStream(self.session, http_url)
if metadata.get('rtmpUrl'):
yield 'live', RTMPStream(self.session, params={'rtmp': metadata['rtmpUrl']})
__plugin__ = OK_live | 0.437583 | 0.078043 |
import shutil
import logging
from typing import Optional
from packit.actions import ActionName
from packit.config.common_package_config import CommonPackageConfig
from packit.distgit import DistGit
import packit.upstream
logger = logging.getLogger(__name__)
class ChangelogHelper:
def __init__(
self,
upstream: "packit.upstream.Upstream",
downstream: Optional[DistGit] = None,
package_config: Optional[CommonPackageConfig] = None,
) -> None:
self.up = upstream
self.dg = downstream
self.package_config = package_config
@property
def entry_from_action(self) -> Optional[str]:
"""
Runs changelog-entry action if present and returns string that can be
used as a changelog entry.
Returns:
Changelog entry or `None` if action is not present.
"""
messages = self.up.get_output_from_action(ActionName.changelog_entry)
if not messages:
return None
return "\n".join(map(lambda line: line.rstrip(), messages))
def update_dist_git(self, full_version: str, upstream_tag: str) -> None:
"""
Update the spec-file in dist-git by setting the 'Version' tag and
adding a new entry in the %changelog section.
If downstream spec file has the %autochangelog macro then
preserve it and do not write a comment to the %changelog section.
Args:
full_version: Version to be set in the spec-file.
upstream_tag: The commit messages after last tag and before this tag are used
to update the changelog in the spec-file.
"""
comment = self.entry_from_action or (
self.up.local_project.git_project.get_release(
tag_name=upstream_tag, name=full_version
).body
if self.package_config.copy_upstream_release_description
else self.up.get_commit_messages(
after=self.up.get_last_tag(upstream_tag), before=upstream_tag
)
)
try:
self.dg.set_specfile_content(
self.up.specfile,
full_version,
comment=None if self.dg.specfile.has_autochangelog() else comment,
)
except FileNotFoundError as ex:
# no downstream spec file: this is either a mistake or
# there is no spec file in dist-git yet, hence warning
logger.warning(
f"Unable to find a spec file in downstream: {ex}, copying the one from upstream."
)
shutil.copy2(
self.up.absolute_specfile_path,
self.dg.get_absolute_specfile_path(),
)
def _get_release_for_source_git(
self, current_commit: str, bump_version: bool, release_suffix: Optional[str]
) -> Optional[str]:
old_release = self.up.specfile.get_release_number()
if release_suffix:
return f"{old_release}.{release_suffix}"
if not bump_version:
return None
try:
old_release_int = int(old_release)
new_release = str(old_release_int + 1)
except ValueError:
new_release = str(old_release)
return f"{new_release}.g{current_commit}"
def prepare_upstream_using_source_git(
self, bump_version: bool, release_suffix: Optional[str]
) -> None:
"""
Updates changelog when creating SRPM within source-git repository.
"""
current_commit = self.up.local_project.commit_hexsha
release_to_update = self._get_release_for_source_git(
current_commit, bump_version, release_suffix
)
msg = self.entry_from_action
if not msg and bump_version:
msg = f"- Downstream changes ({current_commit})"
self.up.specfile.set_spec_version(
release=release_to_update, changelog_entry=msg
)
def prepare_upstream_locally(
self,
version: str,
commit: str,
bump_version: bool,
release_suffix: Optional[str],
) -> None:
"""
Updates changelog when creating SRPM within upstream repository.
Args:
version: Version to be set in the spec-file.
commit: Commit to be set in the changelog.
bump_version: Specifies whether version should be changed in the spec-file.
release_suffix: Specifies local release suffix. `None` represents default suffix.
"""
last_tag = self.up.get_last_tag()
msg = self.entry_from_action
if not msg and last_tag and bump_version:
msg = self.up.get_commit_messages(after=last_tag)
if not msg and bump_version:
# no describe, no tag - just a boilerplate message w/ commit hash
# or, there were no changes b/w HEAD and last_tag, which implies last_tag == HEAD
msg = f"- Development snapshot ({commit})"
release = self.up.get_spec_release(
bump_version=bump_version,
release_suffix=release_suffix,
)
logger.debug(f"Setting Release in spec to {release!r}.")
# instead of changing version, we change Release field
# upstream projects should take care of versions
self.up.specfile.set_spec_version(
version=version,
release=release,
changelog_entry=msg,
) | packit/utils/changelog_helper.py |
import shutil
import logging
from typing import Optional
from packit.actions import ActionName
from packit.config.common_package_config import CommonPackageConfig
from packit.distgit import DistGit
import packit.upstream
logger = logging.getLogger(__name__)
class ChangelogHelper:
def __init__(
self,
upstream: "packit.upstream.Upstream",
downstream: Optional[DistGit] = None,
package_config: Optional[CommonPackageConfig] = None,
) -> None:
self.up = upstream
self.dg = downstream
self.package_config = package_config
@property
def entry_from_action(self) -> Optional[str]:
"""
Runs changelog-entry action if present and returns string that can be
used as a changelog entry.
Returns:
Changelog entry or `None` if action is not present.
"""
messages = self.up.get_output_from_action(ActionName.changelog_entry)
if not messages:
return None
return "\n".join(map(lambda line: line.rstrip(), messages))
def update_dist_git(self, full_version: str, upstream_tag: str) -> None:
"""
Update the spec-file in dist-git by setting the 'Version' tag and
adding a new entry in the %changelog section.
If downstream spec file has the %autochangelog macro then
preserve it and do not write a comment to the %changelog section.
Args:
full_version: Version to be set in the spec-file.
upstream_tag: The commit messages after last tag and before this tag are used
to update the changelog in the spec-file.
"""
comment = self.entry_from_action or (
self.up.local_project.git_project.get_release(
tag_name=upstream_tag, name=full_version
).body
if self.package_config.copy_upstream_release_description
else self.up.get_commit_messages(
after=self.up.get_last_tag(upstream_tag), before=upstream_tag
)
)
try:
self.dg.set_specfile_content(
self.up.specfile,
full_version,
comment=None if self.dg.specfile.has_autochangelog() else comment,
)
except FileNotFoundError as ex:
# no downstream spec file: this is either a mistake or
# there is no spec file in dist-git yet, hence warning
logger.warning(
f"Unable to find a spec file in downstream: {ex}, copying the one from upstream."
)
shutil.copy2(
self.up.absolute_specfile_path,
self.dg.get_absolute_specfile_path(),
)
def _get_release_for_source_git(
self, current_commit: str, bump_version: bool, release_suffix: Optional[str]
) -> Optional[str]:
old_release = self.up.specfile.get_release_number()
if release_suffix:
return f"{old_release}.{release_suffix}"
if not bump_version:
return None
try:
old_release_int = int(old_release)
new_release = str(old_release_int + 1)
except ValueError:
new_release = str(old_release)
return f"{new_release}.g{current_commit}"
def prepare_upstream_using_source_git(
self, bump_version: bool, release_suffix: Optional[str]
) -> None:
"""
Updates changelog when creating SRPM within source-git repository.
"""
current_commit = self.up.local_project.commit_hexsha
release_to_update = self._get_release_for_source_git(
current_commit, bump_version, release_suffix
)
msg = self.entry_from_action
if not msg and bump_version:
msg = f"- Downstream changes ({current_commit})"
self.up.specfile.set_spec_version(
release=release_to_update, changelog_entry=msg
)
def prepare_upstream_locally(
self,
version: str,
commit: str,
bump_version: bool,
release_suffix: Optional[str],
) -> None:
"""
Updates changelog when creating SRPM within upstream repository.
Args:
version: Version to be set in the spec-file.
commit: Commit to be set in the changelog.
bump_version: Specifies whether version should be changed in the spec-file.
release_suffix: Specifies local release suffix. `None` represents default suffix.
"""
last_tag = self.up.get_last_tag()
msg = self.entry_from_action
if not msg and last_tag and bump_version:
msg = self.up.get_commit_messages(after=last_tag)
if not msg and bump_version:
# no describe, no tag - just a boilerplate message w/ commit hash
# or, there were no changes b/w HEAD and last_tag, which implies last_tag == HEAD
msg = f"- Development snapshot ({commit})"
release = self.up.get_spec_release(
bump_version=bump_version,
release_suffix=release_suffix,
)
logger.debug(f"Setting Release in spec to {release!r}.")
# instead of changing version, we change Release field
# upstream projects should take care of versions
self.up.specfile.set_spec_version(
version=version,
release=release,
changelog_entry=msg,
) | 0.830285 | 0.128689 |
import os
import pytest
import spack.directives
import spack.fetch_strategy
import spack.repo
from spack.paths import mock_packages_path
from spack.spec import Spec
from spack.util.naming import mod_to_class
from spack.version import VersionChecksumError
@pytest.mark.usefixtures('config', 'mock_packages')
class TestPackage(object):
def test_load_package(self):
spack.repo.get('mpich')
def test_package_name(self):
pkg = spack.repo.get('mpich')
assert pkg.name == 'mpich'
def test_package_filename(self):
repo = spack.repo.Repo(mock_packages_path)
filename = repo.filename_for_package_name('mpich')
assert filename == os.path.join(
mock_packages_path,
'packages',
'mpich',
'package.py'
)
def test_nonexisting_package_filename(self):
repo = spack.repo.Repo(mock_packages_path)
filename = repo.filename_for_package_name('some-nonexisting-package')
assert filename == os.path.join(
mock_packages_path,
'packages',
'some-nonexisting-package',
'package.py'
)
def test_package_class_names(self):
assert 'Mpich' == mod_to_class('mpich')
assert 'PmgrCollective' == mod_to_class('pmgr_collective')
assert 'PmgrCollective' == mod_to_class('pmgr-collective')
assert 'Pmgrcollective' == mod_to_class('PmgrCollective')
assert '_3db' == mod_to_class('3db')
# Below tests target direct imports of spack packages from the
# spack.pkg namespace
def test_import_package(self):
import spack.pkg.builtin.mock.mpich # type: ignore[import] # noqa
def test_import_package_as(self):
import spack.pkg.builtin.mock # noqa
import spack.pkg.builtin.mock as m # noqa
import spack.pkg.builtin.mock.mpich as mp # noqa
from spack.pkg.builtin import mock # noqa
def test_inheritance_of_diretives(self):
p = spack.repo.get('simple-inheritance')
# Check dictionaries that should have been filled by directives
assert len(p.dependencies) == 3
assert 'cmake' in p.dependencies
assert 'openblas' in p.dependencies
assert 'mpi' in p.dependencies
assert len(p.provided) == 2
# Check that Spec instantiation behaves as we expect
s = Spec('simple-inheritance')
s.concretize()
assert '^cmake' in s
assert '^openblas' in s
assert '+openblas' in s
assert 'mpi' in s
s = Spec('simple-inheritance~openblas')
s.concretize()
assert '^cmake' in s
assert '^openblas' not in s
assert '~openblas' in s
assert 'mpi' in s
@pytest.mark.regression('11844')
def test_inheritance_of_patches(self):
s = Spec('patch-inheritance')
# Will error if inheritor package cannot find inherited patch files
s.concretize()
def test_dependency_extensions(self):
s = Spec('extension2')
s.concretize()
deps = set(x.name for x in s.package.dependency_activations())
assert deps == set(['extension1'])
def test_import_class_from_package(self):
from spack.pkg.builtin.mock.mpich import Mpich # noqa
def test_import_module_from_package(self):
from spack.pkg.builtin.mock import mpich # noqa
def test_import_namespace_container_modules(self):
import spack.pkg # noqa
import spack.pkg as p # noqa
import spack.pkg.builtin # noqa
import spack.pkg.builtin as b # noqa
import spack.pkg.builtin.mock # noqa
import spack.pkg.builtin.mock as m # noqa
from spack import pkg # noqa
from spack.pkg import builtin # noqa
from spack.pkg.builtin import mock # noqa
@pytest.mark.regression('2737')
def test_urls_for_versions(mock_packages, config):
"""Version directive without a 'url' argument should use default url."""
for spec_str in ('url_override@0.9.0', 'url_override@1.0.0'):
s = Spec(spec_str).concretized()
url = s.package.url_for_version('0.9.0')
assert url == 'http://www.anothersite.org/uo-0.9.0.tgz'
url = s.package.url_for_version('1.0.0')
assert url == 'http://www.doesnotexist.org/url_override-1.0.0.tar.gz'
url = s.package.url_for_version('0.8.1')
assert url == 'http://www.doesnotexist.org/url_override-0.8.1.tar.gz'
def test_url_for_version_with_no_urls(mock_packages, config):
pkg = spack.repo.get('git-test')
with pytest.raises(spack.package.NoURLError):
pkg.url_for_version('1.0')
with pytest.raises(spack.package.NoURLError):
pkg.url_for_version('1.1')
def test_url_for_version_with_only_overrides(mock_packages, config):
spec = Spec('url-only-override')
spec.concretize()
pkg = spack.repo.get(spec)
# these exist and should just take the URL provided in the package
assert pkg.url_for_version('1.0.0') == 'http://a.example.com/url_override-1.0.0.tar.gz'
assert pkg.url_for_version('0.9.0') == 'http://b.example.com/url_override-0.9.0.tar.gz'
assert pkg.url_for_version('0.8.1') == 'http://c.example.com/url_override-0.8.1.tar.gz'
# these don't exist but should still work, even if there are only overrides
assert pkg.url_for_version('1.0.5') == 'http://a.example.com/url_override-1.0.5.tar.gz'
assert pkg.url_for_version('0.9.5') == 'http://b.example.com/url_override-0.9.5.tar.gz'
assert pkg.url_for_version('0.8.5') == 'http://c.example.com/url_override-0.8.5.tar.gz'
assert pkg.url_for_version('0.7.0') == 'http://c.example.com/url_override-0.7.0.tar.gz'
def test_url_for_version_with_only_overrides_with_gaps(mock_packages, config):
spec = Spec('url-only-override-with-gaps')
spec.concretize()
pkg = spack.repo.get(spec)
# same as for url-only-override -- these are specific
assert pkg.url_for_version('1.0.0') == 'http://a.example.com/url_override-1.0.0.tar.gz'
assert pkg.url_for_version('0.9.0') == 'http://b.example.com/url_override-0.9.0.tar.gz'
assert pkg.url_for_version('0.8.1') == 'http://c.example.com/url_override-0.8.1.tar.gz'
# these don't have specific URLs, but should still work by extrapolation
assert pkg.url_for_version('1.0.5') == 'http://a.example.com/url_override-1.0.5.tar.gz'
assert pkg.url_for_version('0.9.5') == 'http://b.example.com/url_override-0.9.5.tar.gz'
assert pkg.url_for_version('0.8.5') == 'http://c.example.com/url_override-0.8.5.tar.gz'
assert pkg.url_for_version('0.7.0') == 'http://c.example.com/url_override-0.7.0.tar.gz'
def test_git_top_level(mock_packages, config):
"""Ensure that top-level git attribute can be used as a default."""
pkg = spack.repo.get('git-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
def test_svn_top_level(mock_packages, config):
"""Ensure that top-level svn attribute can be used as a default."""
pkg = spack.repo.get('svn-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.SvnFetchStrategy)
assert fetcher.url == 'https://example.com/some/svn/repo'
def test_hg_top_level(mock_packages, config):
"""Ensure that top-level hg attribute can be used as a default."""
pkg = spack.repo.get('hg-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.HgFetchStrategy)
assert fetcher.url == 'https://example.com/some/hg/repo'
def test_no_extrapolate_without_url(mock_packages, config):
"""Verify that we can't extrapolate versions for non-URL packages."""
pkg = spack.repo.get('git-top-level')
with pytest.raises(spack.fetch_strategy.ExtrapolationError):
spack.fetch_strategy.for_package_version(pkg, '1.1')
def test_two_vcs_fetchers_top_level(mock_packages, config):
"""Verify conflict when two VCS strategies are specified together."""
pkg = spack.repo.get('git-url-svn-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
pkg = spack.repo.get('git-svn-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
def test_git_url_top_level_url_versions(mock_packages, config):
"""Test URL fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
# leading 62 zeros of sha256 hash
leading_zeros = '0' * 62
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.0')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.0.tar.gz'
assert fetcher.digest == leading_zeros + '20'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.1')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.1.tar.gz'
assert fetcher.digest == leading_zeros + '21'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.2')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.2.tar.gz'
assert fetcher.digest == leading_zeros + '22'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.3')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.3.tar.gz'
assert fetcher.digest == leading_zeros + '23'
def test_git_url_top_level_git_versions(mock_packages, config):
"""Test git fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.0')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag == 'v3.0'
assert fetcher.commit is None
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.1')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag == 'v3.1'
assert fetcher.commit == 'abc31'
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.2')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch == 'releases/v3.2'
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.3')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit == 'abc33'
assert fetcher.branch == 'releases/v3.3'
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.4')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit == 'abc34'
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, 'submodules')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, 'develop')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch == 'develop'
def test_git_url_top_level_conflicts(mock_packages, config):
"""Test git fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.1')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.2')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.3')
def test_rpath_args(mutable_database):
"""Test a package's rpath_args property."""
rec = mutable_database.get_record('mpich')
rpath_args = rec.spec.package.rpath_args
assert '-rpath' in rpath_args
assert 'mpich' in rpath_args
def test_bundle_version_checksum(mock_directive_bundle,
clear_directive_functions):
"""Test raising exception on a version checksum with a bundle package."""
with pytest.raises(VersionChecksumError, match="Checksums not allowed"):
version = spack.directives.version('1.0', checksum='1badpkg')
version(mock_directive_bundle)
def test_bundle_patch_directive(mock_directive_bundle,
clear_directive_functions):
"""Test raising exception on a patch directive with a bundle package."""
with pytest.raises(spack.directives.UnsupportedPackageDirective,
match="Patches are not allowed"):
patch = spack.directives.patch('mock/patch.txt')
patch(mock_directive_bundle)
def test_fetch_options(mock_packages, config):
"""Test fetch options inference."""
pkg = spack.repo.get('fetch-options')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000010'
assert fetcher.extra_options == {'timeout': 42, 'cookie': 'foobar'}
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.1')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000011'
assert fetcher.extra_options == {'timeout': 65}
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.2')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000012'
assert fetcher.extra_options == {'cookie': 'baz'}
def test_has_test_method_fails(capsys):
with pytest.raises(SystemExit):
spack.package.has_test_method('printing-package')
captured = capsys.readouterr()[1]
assert 'is not a class' in captured | lib/spack/spack/test/packages.py |
import os
import pytest
import spack.directives
import spack.fetch_strategy
import spack.repo
from spack.paths import mock_packages_path
from spack.spec import Spec
from spack.util.naming import mod_to_class
from spack.version import VersionChecksumError
@pytest.mark.usefixtures('config', 'mock_packages')
class TestPackage(object):
def test_load_package(self):
spack.repo.get('mpich')
def test_package_name(self):
pkg = spack.repo.get('mpich')
assert pkg.name == 'mpich'
def test_package_filename(self):
repo = spack.repo.Repo(mock_packages_path)
filename = repo.filename_for_package_name('mpich')
assert filename == os.path.join(
mock_packages_path,
'packages',
'mpich',
'package.py'
)
def test_nonexisting_package_filename(self):
repo = spack.repo.Repo(mock_packages_path)
filename = repo.filename_for_package_name('some-nonexisting-package')
assert filename == os.path.join(
mock_packages_path,
'packages',
'some-nonexisting-package',
'package.py'
)
def test_package_class_names(self):
assert 'Mpich' == mod_to_class('mpich')
assert 'PmgrCollective' == mod_to_class('pmgr_collective')
assert 'PmgrCollective' == mod_to_class('pmgr-collective')
assert 'Pmgrcollective' == mod_to_class('PmgrCollective')
assert '_3db' == mod_to_class('3db')
# Below tests target direct imports of spack packages from the
# spack.pkg namespace
def test_import_package(self):
import spack.pkg.builtin.mock.mpich # type: ignore[import] # noqa
def test_import_package_as(self):
import spack.pkg.builtin.mock # noqa
import spack.pkg.builtin.mock as m # noqa
import spack.pkg.builtin.mock.mpich as mp # noqa
from spack.pkg.builtin import mock # noqa
def test_inheritance_of_diretives(self):
p = spack.repo.get('simple-inheritance')
# Check dictionaries that should have been filled by directives
assert len(p.dependencies) == 3
assert 'cmake' in p.dependencies
assert 'openblas' in p.dependencies
assert 'mpi' in p.dependencies
assert len(p.provided) == 2
# Check that Spec instantiation behaves as we expect
s = Spec('simple-inheritance')
s.concretize()
assert '^cmake' in s
assert '^openblas' in s
assert '+openblas' in s
assert 'mpi' in s
s = Spec('simple-inheritance~openblas')
s.concretize()
assert '^cmake' in s
assert '^openblas' not in s
assert '~openblas' in s
assert 'mpi' in s
@pytest.mark.regression('11844')
def test_inheritance_of_patches(self):
s = Spec('patch-inheritance')
# Will error if inheritor package cannot find inherited patch files
s.concretize()
def test_dependency_extensions(self):
s = Spec('extension2')
s.concretize()
deps = set(x.name for x in s.package.dependency_activations())
assert deps == set(['extension1'])
def test_import_class_from_package(self):
from spack.pkg.builtin.mock.mpich import Mpich # noqa
def test_import_module_from_package(self):
from spack.pkg.builtin.mock import mpich # noqa
def test_import_namespace_container_modules(self):
import spack.pkg # noqa
import spack.pkg as p # noqa
import spack.pkg.builtin # noqa
import spack.pkg.builtin as b # noqa
import spack.pkg.builtin.mock # noqa
import spack.pkg.builtin.mock as m # noqa
from spack import pkg # noqa
from spack.pkg import builtin # noqa
from spack.pkg.builtin import mock # noqa
@pytest.mark.regression('2737')
def test_urls_for_versions(mock_packages, config):
"""Version directive without a 'url' argument should use default url."""
for spec_str in ('url_override@0.9.0', 'url_override@1.0.0'):
s = Spec(spec_str).concretized()
url = s.package.url_for_version('0.9.0')
assert url == 'http://www.anothersite.org/uo-0.9.0.tgz'
url = s.package.url_for_version('1.0.0')
assert url == 'http://www.doesnotexist.org/url_override-1.0.0.tar.gz'
url = s.package.url_for_version('0.8.1')
assert url == 'http://www.doesnotexist.org/url_override-0.8.1.tar.gz'
def test_url_for_version_with_no_urls(mock_packages, config):
pkg = spack.repo.get('git-test')
with pytest.raises(spack.package.NoURLError):
pkg.url_for_version('1.0')
with pytest.raises(spack.package.NoURLError):
pkg.url_for_version('1.1')
def test_url_for_version_with_only_overrides(mock_packages, config):
spec = Spec('url-only-override')
spec.concretize()
pkg = spack.repo.get(spec)
# these exist and should just take the URL provided in the package
assert pkg.url_for_version('1.0.0') == 'http://a.example.com/url_override-1.0.0.tar.gz'
assert pkg.url_for_version('0.9.0') == 'http://b.example.com/url_override-0.9.0.tar.gz'
assert pkg.url_for_version('0.8.1') == 'http://c.example.com/url_override-0.8.1.tar.gz'
# these don't exist but should still work, even if there are only overrides
assert pkg.url_for_version('1.0.5') == 'http://a.example.com/url_override-1.0.5.tar.gz'
assert pkg.url_for_version('0.9.5') == 'http://b.example.com/url_override-0.9.5.tar.gz'
assert pkg.url_for_version('0.8.5') == 'http://c.example.com/url_override-0.8.5.tar.gz'
assert pkg.url_for_version('0.7.0') == 'http://c.example.com/url_override-0.7.0.tar.gz'
def test_url_for_version_with_only_overrides_with_gaps(mock_packages, config):
spec = Spec('url-only-override-with-gaps')
spec.concretize()
pkg = spack.repo.get(spec)
# same as for url-only-override -- these are specific
assert pkg.url_for_version('1.0.0') == 'http://a.example.com/url_override-1.0.0.tar.gz'
assert pkg.url_for_version('0.9.0') == 'http://b.example.com/url_override-0.9.0.tar.gz'
assert pkg.url_for_version('0.8.1') == 'http://c.example.com/url_override-0.8.1.tar.gz'
# these don't have specific URLs, but should still work by extrapolation
assert pkg.url_for_version('1.0.5') == 'http://a.example.com/url_override-1.0.5.tar.gz'
assert pkg.url_for_version('0.9.5') == 'http://b.example.com/url_override-0.9.5.tar.gz'
assert pkg.url_for_version('0.8.5') == 'http://c.example.com/url_override-0.8.5.tar.gz'
assert pkg.url_for_version('0.7.0') == 'http://c.example.com/url_override-0.7.0.tar.gz'
def test_git_top_level(mock_packages, config):
"""Ensure that top-level git attribute can be used as a default."""
pkg = spack.repo.get('git-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
def test_svn_top_level(mock_packages, config):
"""Ensure that top-level svn attribute can be used as a default."""
pkg = spack.repo.get('svn-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.SvnFetchStrategy)
assert fetcher.url == 'https://example.com/some/svn/repo'
def test_hg_top_level(mock_packages, config):
"""Ensure that top-level hg attribute can be used as a default."""
pkg = spack.repo.get('hg-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.HgFetchStrategy)
assert fetcher.url == 'https://example.com/some/hg/repo'
def test_no_extrapolate_without_url(mock_packages, config):
"""Verify that we can't extrapolate versions for non-URL packages."""
pkg = spack.repo.get('git-top-level')
with pytest.raises(spack.fetch_strategy.ExtrapolationError):
spack.fetch_strategy.for_package_version(pkg, '1.1')
def test_two_vcs_fetchers_top_level(mock_packages, config):
"""Verify conflict when two VCS strategies are specified together."""
pkg = spack.repo.get('git-url-svn-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
pkg = spack.repo.get('git-svn-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
def test_git_url_top_level_url_versions(mock_packages, config):
"""Test URL fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
# leading 62 zeros of sha256 hash
leading_zeros = '0' * 62
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.0')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.0.tar.gz'
assert fetcher.digest == leading_zeros + '20'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.1')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://example.com/some/tarball-2.1.tar.gz'
assert fetcher.digest == leading_zeros + '21'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.2')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.2.tar.gz'
assert fetcher.digest == leading_zeros + '22'
fetcher = spack.fetch_strategy.for_package_version(pkg, '2.3')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.url == 'https://www.example.com/foo2.3.tar.gz'
assert fetcher.digest == leading_zeros + '23'
def test_git_url_top_level_git_versions(mock_packages, config):
"""Test git fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.0')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag == 'v3.0'
assert fetcher.commit is None
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.1')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag == 'v3.1'
assert fetcher.commit == 'abc31'
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.2')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch == 'releases/v3.2'
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.3')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit == 'abc33'
assert fetcher.branch == 'releases/v3.3'
fetcher = spack.fetch_strategy.for_package_version(pkg, '3.4')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit == 'abc34'
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, 'submodules')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch is None
fetcher = spack.fetch_strategy.for_package_version(pkg, 'develop')
assert isinstance(fetcher, spack.fetch_strategy.GitFetchStrategy)
assert fetcher.url == 'https://example.com/some/git/repo'
assert fetcher.tag is None
assert fetcher.commit is None
assert fetcher.branch == 'develop'
def test_git_url_top_level_conflicts(mock_packages, config):
"""Test git fetch strategy inference when url is specified with git."""
pkg = spack.repo.get('git-url-top-level')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.0')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.1')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.2')
with pytest.raises(spack.fetch_strategy.FetcherConflict):
spack.fetch_strategy.for_package_version(pkg, '1.3')
def test_rpath_args(mutable_database):
"""Test a package's rpath_args property."""
rec = mutable_database.get_record('mpich')
rpath_args = rec.spec.package.rpath_args
assert '-rpath' in rpath_args
assert 'mpich' in rpath_args
def test_bundle_version_checksum(mock_directive_bundle,
clear_directive_functions):
"""Test raising exception on a version checksum with a bundle package."""
with pytest.raises(VersionChecksumError, match="Checksums not allowed"):
version = spack.directives.version('1.0', checksum='1badpkg')
version(mock_directive_bundle)
def test_bundle_patch_directive(mock_directive_bundle,
clear_directive_functions):
"""Test raising exception on a patch directive with a bundle package."""
with pytest.raises(spack.directives.UnsupportedPackageDirective,
match="Patches are not allowed"):
patch = spack.directives.patch('mock/patch.txt')
patch(mock_directive_bundle)
def test_fetch_options(mock_packages, config):
"""Test fetch options inference."""
pkg = spack.repo.get('fetch-options')
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.0')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000010'
assert fetcher.extra_options == {'timeout': 42, 'cookie': 'foobar'}
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.1')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000011'
assert fetcher.extra_options == {'timeout': 65}
fetcher = spack.fetch_strategy.for_package_version(pkg, '1.2')
assert isinstance(fetcher, spack.fetch_strategy.URLFetchStrategy)
assert fetcher.digest == '00000000000000000000000000000012'
assert fetcher.extra_options == {'cookie': 'baz'}
def test_has_test_method_fails(capsys):
with pytest.raises(SystemExit):
spack.package.has_test_method('printing-package')
captured = capsys.readouterr()[1]
assert 'is not a class' in captured | 0.476336 | 0.297266 |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from StomperGlobals import *
from direct.distributed import ClockDelta
from direct.showbase.PythonUtil import lerp
import math
from otp.level import DistributedEntity
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import NodePath
from otp.level import BasicEntities
from direct.task import Task
from toontown.toonbase import ToontownGlobals
from toontown.coghq import BattleBlocker
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from direct.distributed.ClockDelta import *
from toontown.golf import BuildGeometry
from direct.gui.DirectGui import *
import random
from direct.showbase import RandomNumGen
import GameSprite3D
from math import pi
import math
import random
import cPickle
from toontown.distributed import DelayDelete
from toontown.toon import ToonHeadFrame
from toontown.battle import BattleParticles
from toontown.battle import MovieUtil
import time
from toontown.toonbase import ToontownTimer
class DistributedGolfGreenGame(BattleBlocker.BattleBlocker):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGolfGreenGame')
def __init__(self, cr):
BattleBlocker.BattleBlocker.__init__(self, cr)
self.blankColor = Vec4(1.0, 1.0, 1.0, 1.0)
self.fullColor = Vec4(0.6, 0.6, 0.6, 1.0)
self.neighborColor = Vec4(0.8, 0.8, 0.8, 1.0)
self.outColor = Vec4(0.0, 0.0, 0.0, 0.0)
self.blackColor = Vec4(0.0, 0.0, 0.0, 1.0)
self.acceptErrorDialog = None
self.doneEvent = 'game Done'
self.sprites = []
self.controlSprite = None
self.standbySprite = None
self.setupFlag = 0
self.colorGridFlag = 0
self.boardIndex = None
self.board = None
self.attackPattern = None
self.tooLowFlag = 0
self.toonPoints = (Point3(3.0, 13.0, 0.0),
Point3(6.0, 13.0, 0.0),
Point3(-3.0, 13.0, 0.0),
Point3(-6.0, 13.0, 0.0))
self.joinedToons = []
self.everJoinedToons = []
self.flagNextLevel = 0
self.wildIndex = 8
self.bombIndex = 7
self.sizeMult = 1.4
self.cellSizeX = 1.0 * self.sizeMult
self.cellSizeZ = self.cellSizeX * 0.8
self.radiusBall = 0.5 * self.cellSizeX
self.gridDimX = 9
self.gridDimZ = 15
self.minX = -1.0 * (self.gridDimX + 0.3751) * 0.5 * self.cellSizeX
self.minZ = -self.gridDimZ * 0.1 * self.cellSizeZ
self.newBallX = 0.0
self.newBallZ = self.minZ + 0.1 * self.sizeMult
self.rangeX = (self.gridDimX + 0.5) * self.cellSizeX
self.rangeZ = self.gridDimZ * self.cellSizeZ
self.maxX = self.minX + self.rangeX
self.maxZ = self.minZ + self.rangeZ
self.sizeX = self.rangeX
self.sizeZ = self.rangeZ
self.isActive = 0
self.boardsLeft = 0
self.timeLeft = 0
self.timeStart = None
self.timeTotal = None
self.timerTask = None
self.timerTaskName = 'golfgreengame timer task'
self.giftId = None
self.holdGiftId = None
self.rollTrack = None
self.zGap = 0.092
self.screenSizeX = base.a2dRight - base.a2dLeft
self.screenSizeZ = base.a2dTop - base.a2dBottom
self.XtoZ = self.screenSizeX / (self.screenSizeZ * (1.0 - self.zGap * 1.0))
self.countTimeOld = None
self.countDownRunning = 0
self.timer = None
self.hasEntered = 0
self.trackClosed = 0
self.running = 0
self.finished = 0
self.__toonTracks = {}
return
def disable(self):
self.unload()
self.clearToonTracks()
BattleBlocker.BattleBlocker.disable(self)
def updateSpritePos(self):
if self.spriteNode.isEmpty():
return
self.spriteNode.setZ(-self.spriteNotchPos * self.cellSizeZ)
if self.controlSprite:
if not self.controlSprite.isActive:
pass
self.colorGridFlag = 1
def lerpSpritePos(self):
if self.spriteNode.isEmpty():
return
x = self.spriteNode.getX()
y = self.spriteNode.getY()
self.rollTrack = Sequence(LerpPosInterval(self.spriteNode, 0.5, Point3(x, y, -self.spriteNotchPos * self.cellSizeZ)))
if self.controlSprite:
if not self.controlSprite.isActive:
pass
self.colorGridFlag = 1
self.rollTrack.start()
if self.soundMove:
self.soundMove.play()
messenger.send('wakeup')
def findLowestSprite(self):
lowest = 100
for sprite in self.sprites:
if sprite.gridPosZ:
if sprite.gridPosZ < lowest:
lowest = sprite.gridPosZ
return lowest
def setup(self):
if not self.setupFlag:
self.setupFlag = 1
else:
return
self.updateSpritePos()
self.spriteNode.setY(self.radiusBall)
thing = self.model.find('**/item_board')
self.block = self.model1.find('**/minnieCircle')
self.colorRed = (1, 0, 0, 1)
self.colorBlue = (0, 0, 1, 1)
self.colorGreen = (0, 1, 0, 1)
self.colorGhostRed = (1, 0, 0, 0.5)
self.colorGhostBlue = (0, 0, 1, 0.5)
self.colorGhostGreen = (0, 1, 0, 0.5)
self.colorWhite = (1, 1, 1, 1)
self.colorBlack = (0, 0, 0, 1.0)
self.colorShadow = (0, 0, 0, 0.5)
self.lastTime = None
self.running = 0
self.massCount = 0
self.foundCount = 0
self.controlOffsetX = 0.0
self.controlOffsetZ = 0.0
self.grid = []
for countX in range(0, self.gridDimX):
newRow = []
for countZ in range(self.gridDimZ):
offset = 0
margin = self.cellSizeX * 0.4375
if countZ % 2 == 0:
offset = self.cellSizeX * 0.5
newCell = [None,
countX * self.cellSizeX + self.minX + offset + margin,
countZ * self.cellSizeZ + self.minZ,
countX,
countZ,
None]
groundCircle = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_hole')
groundCircle.reparentTo(self.spriteNode)
if groundCircle == None:
import pdb
pdb.set_trace()
groundCircle.setTransparency(TransparencyAttrib.MAlpha)
groundCircle.setPos(newCell[1], -self.radiusBall, newCell[2])
groundCircle.setScale(1.2)
groundCircle.setR(90)
groundCircle.setH(-90)
newCell[5] = groundCircle
newCell[5].setColorScale(self.blankColor)
newRow.append(newCell)
self.grid.append(newRow)
self.cogSprite = self.addUnSprite(self.block, posX=0.25, posZ=0.5)
self.cogSprite.setColor(self.colorShadow)
self.cogSprite.nodeObj.hide()
self.standbySprite = self.addUnSprite(self.block, posX=0.0, posZ=-3.0)
self.standbySprite.setColor(self.colorShadow)
self.standbySprite.spriteBase.reparentTo(self.frame)
self.standbySprite.spriteBase.setY(self.radiusBall)
self.standbySprite.nodeObj.hide()
self.boardData = [((1, 0, 0),
(4, 0, 1),
(6, 0, 2),
(1, 1, 0)), ((1, 0, 1),
(4, 0, 1),
(6, 0, 1),
(1, 1, 1)), ((1, 0, 2),
(4, 0, 2),
(6, 0, 2),
(1, 1, 2))]
self.attackPatterns = [(0, 1, 2), (0, 0, 1, 1, 2, 2), (0, 1, 0, 2)]
self.winCounter = 0
self.matchList = []
self.newBallTime = 5.0
self.newBallCountUp = 0.0
self.cogX = 0
self.cogZ = 0
self.aimRadian = 0.0
self.ballLoaded = 0.0
self.countTime = 10
self.countDown = self.countTime
return
def printGrid(self):
printout = ' '
for columnIndex in range(self.gridDimX - 1, -1, -1):
if columnIndex < 10:
printout += '%s ' % columnIndex
else:
printout += '%s ' % columnIndex
print printout
for rowIndex in range(self.gridDimZ - 1, -1, -1):
if rowIndex < 10:
printout = 'row %s ' % rowIndex
else:
printout = 'row %s ' % rowIndex
for columnIndex in range(self.gridDimX - 1, -1, -1):
hasSprite = '_'
if self.grid[columnIndex][rowIndex][0]:
hasSprite = 'X'
if rowIndex < 10:
printout += '%s ' % hasSprite
else:
printout += '%s ' % hasSprite
print printout
count = 0
for sprite in self.sprites:
print 'count %s X %s Z %s Color %s' % (count,
sprite.gridPosX,
sprite.gridPosZ,
sprite.colorType)
count += 1
def pickLevelPattern(self):
self.boardIndex = random.choice(range(0, len(self.boardData)))
self.board = self.boardData[self.boardIndex]
self.attackPattern = self.attackPatterns[self.boardIndex]
self.attackCounter = 0
self.spriteNotchPos = 0
for ball in self.board:
newSprite = self.addSprite(self.block, found=1, color=ball[2])
self.placeIntoGrid(newSprite, ball[0], self.gridDimZ - 1 - ball[1])
self.colorGridFlag = 1
self.updateSpritePos()
def load(self):
BattleParticles.loadParticles()
model = loader.loadModel('phase_5.5/models/gui/package_delivery_panel')
model1 = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
self.invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
self.model = model
self.model1 = model1
self.soundFire = base.loadSfx('phase_6/audio/sfx/Golf_Hit_Ball.mp3')
self.soundLand = base.loadSfx('phase_4/audio/sfx/MG_maze_pickup.mp3')
self.soundBurst = base.loadSfx('phase_5/audio/sfx/Toon_bodyfall_synergy.mp3')
self.soundBomb = base.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.mp3')
self.soundLose = base.loadSfx('phase_11/audio/sfx/LB_capacitor_discharge_3.mp3')
self.soundWin = base.loadSfx('phase_4/audio/sfx/MG_pairing_match_bonus_both.mp3')
self.soundDone = base.loadSfx('phase_3/audio/sfx/GUI_create_toon_back.mp3')
self.soundMove = base.loadSfx('phase_3.5/audio/sfx/SA_shred.mp3')
background = model.find('**/bg')
itemBoard = model.find('**/item_board')
self.focusPoint = self.baseNode.attachNewNode('GolfGreenGameFrame')
self.frame2D = DirectFrame(scale=1.1, relief=DGG.FLAT, frameSize=(-0.1,
0.1,
-0.1,
-0.1), frameColor=(0.737, 0.573, 0.345, 0.3))
gui2 = loader.loadModel('phase_3/models/gui/quit_button')
self.quitButton = DirectButton(parent=self.frame2D, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.95, 1.3, -0.69), image_scale=(0.9, 1.0, 1.0), text=TTLocalizer.BustACogExit, text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0, 0, 0, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=TTLocalizer.DGGGquitButton, text_pos=(0, -0.01), command=self.__leaveGame)
self.quitButton.hide()
self.instructions = DirectFrame(parent=self.frame2D, relief=None, image=DGG.getDefaultDialogGeom(), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.2, 1.0, 1.0), text=TTLocalizer.GolfGreenGameDirections, text_font=ToontownGlobals.getSignFont(), text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.06, text_pos=(-0.5, 0.3), pos=(0.0, 0, -0.0))
self.instructions.hide()
imageCogBall = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_ball_cog')
imageCogBall.setHpr(0, 90, 0)
self.instCogBall = DirectFrame(parent=self.instructions, relief=None, image=imageCogBall, image_color=ToontownGlobals.GlobalDialogColor, image_scale=(0.12, 0.12, 0.12), pos=(0.0, 0, -0.2))
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
cancelImageList = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
self.doneButton = DirectButton(parent=self.instructions, relief=None, image=cancelImageList, command=self.instructions.hide, pos=(0, 0, -0.4))
self.howToButton = DirectButton(parent=self.frame2D, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.95, 1.3, -0.82), image_scale=(0.9, 1.0, 1.0), text=TTLocalizer.BustACogHowto, text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0, 0, 0, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=TTLocalizer.DGGGhowToButton, text_pos=(0, -0.01), command=self.instructions.show)
self.howToButton.hide()
self.timerLabel = DirectLabel(parent=self.frame2D, relief=None, image=gui2.find('**/QuitBtn_UP'), pos=(0.9, 1.3, -0.42), image_scale=(0.5, 1.0, 1.0), text='Timer', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text_scale=0.045, text_pos=(0, -0.01))
self.timerLabel.hide()
self.headPanel = loader.loadModel('phase_6/models/golf/headPanel')
self.scoreBoard = DirectFrame(scale=1.0, pos=(0.0, 0, 0.9), relief=DGG.FLAT, parent=aspect2d, frameSize=(-0.35,
0.35,
-0.05,
0.05), frameColor=(0.737, 0.573, 0.345, 0.3))
self.scoreLabel = DirectLabel(parent=self.scoreBoard, relief=None, pos=(0, 0, 0), scale=1.0, text='', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0.0, 0.0, 0.0, 1), text_scale=TTLocalizer.DGGGscoreLabel, text_pos=(0, -0.02))
self.scoreBoard.hide()
self.bonusBoard = DirectFrame(parent=self.frame2D, relief=None, image_pos=(0, 0, 0.0), image_scale=(0.4, 1, 0.4), image_color=(1, 1, 1, 1), pos=(0.0, 1.5, 0.67), scale=1.0, text='You gotsa bonus fool!', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0.0, 0.0, 0.0, 1), text_scale=0.055, text_pos=(0, -0.1), textMayChange=1)
self.bonusBoard.hide()
self.backBoard = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_background')
self.backBoard.setCollideMask(BitMask32.allOff())
self.backBoard.reparentTo(self.frame)
self.backBoard.setScale(0.3, 0.2, 0.25)
self.backBoard.setHpr(0, -90, 0)
self.backBoard.setPos(0, -1.5, 8.0)
self.backBoard.hide()
base.bb = self.backBoard
self.aimbase = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_shooter')
self.aimbase.setHpr(90, 0, 90)
self.aimbase.setScale(0.3, 0.3, 0.15)
self.aimbase.reparentTo(self.frame)
self.aimbase.setPos(0.0, 0.0, self.minZ + 0.1)
self.aimer = self.aimbase.attachNewNode('GolfGreenGameBase')
aimer = self.aimbase.find('**/moving*')
aimer.reparentTo(self.aimer)
aimer.setPos(0.0, 0.0, 0.0)
base.gi = aimer
self.aimbase.hide()
self.toonPanels = {}
return
def addToonHeadPanel(self, toon):
tPanels = ToonHeadFrame.ToonHeadFrame(toon, (0.4, 0.4, 0.4, 0.6), self.headPanel)
tPanels.extraData['text_fg'] = (1.0, 1.0, 1.0, 1.0)
tPanels.extraData['text_shadow'] = (0.0, 0.0, 0.0, 1.0)
tPanels.extraData.show()
tPanels.setScale(0.3, 1, 0.7)
tPanels.head.setPos(0, 10, 0.18)
tPanels.head.setScale(0.47, 0.2, 0.2)
tPanels.tag1.setPos(0.3, 10, 0.18)
tPanels.tag1.setScale(0.1283, 0.055, 0.055)
tPanels.tag2.setPos(0, 10, 0.43)
tPanels.tag2.setScale(0.117, 0.05, 0.05)
tPanels.hide()
self.toonPanels[toon.doId] = tPanels
self.arrangeToonHeadPanels()
def removeToonHeadPanel(self, avId):
if self.toonPanels.has_key(avId):
self.toonPanels[avId].destroy()
del self.toonPanels[avId]
self.arrangeToonHeadPanels()
def arrangeToonHeadPanels(self):
toonPanelsStart = 0.0
whichToon = 0
color = 0
tpDiff = -0.45
for panelKey in self.toonPanels:
panel = self.toonPanels[panelKey]
if self.isActive:
panel.show()
else:
panel.hide()
if whichToon <= 1:
panel.setPos(-1, 0, toonPanelsStart + whichToon * tpDiff)
else:
panel.setPos(1, 0, toonPanelsStart + (whichToon - 2) * tpDiff)
whichToon += 1
def unload(self):
self.cleanupTimer()
for panelKey in self.toonPanels:
self.toonPanels[panelKey].destroy()
self.headPanel.remove()
self.toonPanels = None
self.soundFire = None
self.soundLand = None
self.soundBurst = None
self.soundBomb = None
self.soundLose = None
self.soundWin = None
self.soundDone = None
self.soundMove = None
self.scoreBoard.destroy()
self.instructions.destroy()
self.frame2D.destroy()
self.baseNode.removeNode()
del self.baseNode
if self.acceptErrorDialog:
self.acceptErrorDialog.cleanup()
self.acceptErrorDialog = None
self.stopCountDown()
self.__stop()
self.ignoreAll()
return
def show(self):
self.frame.show()
def hide(self):
self.frame.hide()
def __handleExit(self):
self.__acceptExit()
def __startGame(self):
if not self.setupFlag:
self.setup()
self.quitButton.show()
self.howToButton.show()
self.backBoard.show()
self.aimbase.show()
self.squareNode.show()
self.scoreBoard.show()
self.standbySprite.nodeObj.show()
self.groundFlag.hide()
self.isActive = 1
self.__setCamera()
self.spriteNode.show()
base.setCellsAvailable([base.bottomCells[1], base.bottomCells[2], base.bottomCells[3]], 0)
self.setupFlag = 1
def startBoard(self, board, attackPattern):
if self.finished:
return
self.clearGrid()
self.board = board
self.attackPattern = attackPattern
self.attackCounter = 0
self.spriteNotchPos = 0
self.countDown = self.countTime
self.tooLowFlag = 0
for ball in self.board:
newSprite = self.addSprite(self.block, found=1, color=ball[2])
self.placeIntoGrid(newSprite, ball[0], self.gridDimZ - 1 - ball[1])
self.colorGridFlag = 1
self.tooLowFlag = 0
self.startCountDown()
self.updateSpritePos()
self.killSprite(self.controlSprite)
self.accept('mouse1', self.__handleMouseClick)
self.__run()
def startCountDown(self):
if self.countDownRunning == 0:
taskMgr.add(self.doCountDown, 'GolfGreenGame countdown')
self.countDownRunning = 1
def stopCountDown(self):
taskMgr.remove('GolfGreenGame countdown')
self.countDownRunning = 0
self.countTimeOld = None
return
def doCountDown(self, task):
currentTime = globalClock.getFrameTime()
if self.countTimeOld == None:
self.countTimeOld = currentTime
if currentTime - self.countTimeOld < 1.0:
return task.cont
else:
self.countTimeOld = currentTime
self.countDown -= 1
if self.countDown in [3, 2, 1]:
for sprite in self.sprites:
sprite.warningBump()
elif self.countDown == 0:
self.countDown = self.countTime
self.spriteNotchPos += 1
self.lerpSpritePos()
self.checkForTooLow()
self.timerLabel['text'] = '%s' % self.countDown
return task.cont
return
def checkForTooLow(self):
low = self.findLowestSprite()
if low <= self.spriteNotchPos:
self.doFail()
def doFail(self):
self.tooLowFlag = 1
taskMgr.doMethodLater(1.0, self.failBoard, 'finishing Failure')
for sprite in self.sprites:
sprite.setColorType(4)
self.__stop()
self.ignore('mouse1')
def failBoard(self, task = None):
self.__finishBoard(0)
def __handleWin(self):
self.__handleExit()
def __finishBoard(self, success = 1):
if self.rollTrack:
self.rollTrack.finish()
self.countDown = self.countTime
if success:
if self.soundWin:
self.soundWin.play()
elif self.soundLose:
self.soundLose.play()
self.giftId = None
self.attackPattern = None
self.stopCountDown()
self.clearGrid()
self.spriteNotchPos = 0
self.updateSpritePos()
self.__stop()
self.ignore('mouse1')
if not self.tooLowFlag or 1:
self.sendUpdate('requestBoard', [success])
return
def __acceptExit(self, buttonValue = None):
import pdb
pdb.set_trace()
if hasattr(self, 'frame'):
self.hide()
self.unload()
messenger.send(self.doneEvent)
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
def __removeGame(self):
self.spriteNode.remove()
self.setupFlag = 0
def __leaveGame(self):
taskMgr.remove('GolfGreenGameTask')
self.stopCountDown()
taskMgr.remove(self.timerTaskName)
self.ignore('mouse1')
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
base.cr.playGame.getPlace().fsm.request('walk')
for sprite in self.sprites:
sprite.delete()
self.sprites = []
self.spriteNode.hide()
self.controlSprite = None
self.running = 0
self.timerLabel.hide()
self.quitButton.hide()
self.howToButton.hide()
self.backBoard.hide()
self.aimbase.hide()
self.squareNode.hide()
self.groundFlag.show()
self.instructions.hide()
self.isActive = 0
if self.standbySprite:
self.standbySprite.nodeObj.hide()
base.setCellsAvailable([base.bottomCells[1], base.bottomCells[2], base.bottomCells[3]], 1)
self.sendUpdate('leaveGame', [])
return
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in range(self.gridDimX):
for countZ in range(self.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ) != None):
currentClosest = self.grid[countX][countZ]
self.closestX = countX
self.closestZ = countZ
currentDist = testDist
return currentClosest
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = None
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = cellZ - 1
return gotNeighbor
def clearFloaters(self):
self.grounded = []
self.unknown = []
groundZ = self.gridDimZ - 1
for indexX in range(0, self.gridDimX):
gridCell = self.grid[indexX][groundZ]
if gridCell[0]:
self.grounded.append((indexX, groundZ))
for column in self.grid:
for cell in column:
if cell[0] != None:
cellData = (cell[3], cell[4])
if cellData not in self.grounded:
self.unknown.append(cellData)
lastUnknownCount = 0
while len(self.unknown) != lastUnknownCount:
lastUnknownCount = len(self.unknown)
for cell in self.unknown:
if self.hasGroundedNeighbor(cell[0], cell[1]):
self.unknown.remove(cell)
self.grounded.append(cell)
for entry in self.unknown:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
self.killSprite(sprite)
return
def explodeBombs(self):
didBomb = 0
for column in self.grid:
for cell in column:
if cell[0] != None:
if cell[0].colorType == self.bombIndex:
self.killSprite(cell[0])
didBomb += 1
if didBomb:
self.soundBomb.play()
return
def hasGroundedNeighbor(self, cellX, cellZ):
gotNeighbor = None
if cellZ % 2 == 0:
if (cellX - 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX + 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX + 1, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX + 1, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX - 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX + 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX - 1, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX - 1, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
return gotNeighbor
def clearMatchList(self, typeClear = 0):
self.soundBurst.play()
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
if typeClear == self.wildIndex:
self.questionSprite(sprite)
elif typeClear == 0:
pass
self.killSprite(sprite)
def shakeList(self, neighbors):
for entry in neighbors:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
self.shakeSprite(sprite)
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def matchWild(self, x, z, color):
spriteType = self.getColorType(x, z)
if not self.getBreakable(x, z):
return 0
elif spriteType != -1 and spriteType == self.wildIndex:
return 1
elif spriteType != -1 and color == self.wildIndex:
return 1
else:
return 0
def bombNeighbors(self, cellX, cellZ):
self.soundBomb.play()
self.matchList = []
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) != -1:
self.addToMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) != -1:
self.addToMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) != -1:
self.addToMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) != -1:
self.addToMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) != -1:
self.addToMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) != -1:
self.addToMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) != -1:
self.addToMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) != -1:
self.addToMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) != -1:
self.addToMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) != -1:
self.addToMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) != -1:
self.addToMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) != -1:
self.addToMatchList(cellX - 1, cellZ - 1)
def addToMatchList(self, posX, posZ):
if self.getBreakable(posX, posZ) > 0:
self.matchList.append((posX, posZ))
def getNeighbors(self, cellX, cellZ):
neighborList = []
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) != -1:
neighborList.append((cellX - 1, cellZ))
if self.getColorType(cellX + 1, cellZ) != -1:
neighborList.append((cellX + 1, cellZ))
if self.getColorType(cellX, cellZ + 1) != -1:
neighborList.append((cellX, cellZ + 1))
if self.getColorType(cellX + 1, cellZ + 1) != -1:
neighborList.append((cellX + 1, cellZ + 1))
if self.getColorType(cellX, cellZ - 1) != -1:
neighborList.append((cellX, cellZ - 1))
if self.getColorType(cellX + 1, cellZ - 1) != -1:
neighborList.append((cellX + 1, cellZ - 1))
else:
if self.getColorType(cellX - 1, cellZ) != -1:
neighborList.append((cellX - 1, cellZ))
if self.getColorType(cellX + 1, cellZ) != -1:
neighborList.append((cellX + 1, cellZ))
if self.getColorType(cellX, cellZ + 1) != -1:
neighborList.append((cellX, cellZ + 1))
if self.getColorType(cellX - 1, cellZ + 1) != -1:
neighborList.append((cellX - 1, cellZ + 1))
if self.getColorType(cellX, cellZ - 1) != -1:
neighborList.append((cellX, cellZ - 1))
if self.getColorType(cellX - 1, cellZ - 1) != -1:
neighborList.append((cellX - 1, cellZ - 1))
return neighborList
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if colorType == 4:
return
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType or self.matchWild(cellX - 1, cellZ, colorType):
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType or self.matchWild(cellX + 1, cellZ, colorType):
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType or self.matchWild(cellX, cellZ + 1, colorType):
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType or self.matchWild(cellX + 1, cellZ + 1, colorType):
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType or self.matchWild(cellX, cellZ - 1, colorType):
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType or self.matchWild(cellX + 1, cellZ - 1, colorType):
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType or self.matchWild(cellX - 1, cellZ, colorType):
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType or self.matchWild(cellX + 1, cellZ, colorType):
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType or self.matchWild(cellX, cellZ + 1, colorType):
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType or self.matchWild(cellX - 1, cellZ + 1, colorType):
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType or self.matchWild(cellX, cellZ - 1, colorType):
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType or self.matchWild(cellX - 1, cellZ - 1, colorType):
self.fillMatchList(cellX - 1, cellZ - 1)
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
return
def getValidGrid(self, x, z):
if x < 0 or x >= self.gridDimX:
return None
elif z < 0 or z >= self.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].colorType
return
def getBreakable(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].breakable
return
def findGridCog(self):
self.cogX = 0
self.cogZ = 0
self.massCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
self.massCount += 1
if self.massCount > 0:
self.cogX = self.cogX / self.massCount
self.cogZ = self.cogZ / self.massCount
self.cogSprite.setX(self.cogX)
self.cogSprite.setZ(self.cogZ)
return
def doOnClearGrid(self):
self.winCounter += 1
self.clearGrid()
self.flagNextLevel = 1
if self.winCounter > 4:
self.__handleWin()
def clearGrid(self):
for row in self.grid:
for cell in row:
if cell[0] != None:
self.killSprite(cell[0])
cell[5].setColorScale(self.blankColor)
self.killSprite(self.controlSprite)
return
def killSprite(self, sprite):
if sprite == None:
return
if sprite.giftId != None:
self.giftId = sprite.giftId
if sprite.foundation:
self.foundCount -= 1
if self.controlSprite == sprite:
self.controlSprite = None
if sprite in self.sprites:
self.sprites.remove(sprite)
if sprite.gridPosX != None:
self.grid[sprite.gridPosX][sprite.gridPosZ][0] = None
self.grid[sprite.gridPosX][sprite.gridPosZ][5].setColorScale(self.blankColor)
sprite.deathEffect()
sprite.delete()
self.hasChanged = 1
return
def shakeSprite(self, sprite):
if sprite == None:
return
sprite.shake()
return
def questionSprite(self, sprite):
newSprite = self.addSprite(self.block, found=0, color=1)
newSprite.setX(sprite.getX())
newSprite.setZ(sprite.getZ())
newSprite.wildEffect()
def colorGrid(self):
for row in self.grid:
for cell in row:
if cell[0] != None:
if cell[0].colorType == 3:
cell[5].setColorScale(self.blackColor)
else:
cell[5].setColorScale(self.fullColor)
elif cell[4] <= self.spriteNotchPos:
cell[5].setColorScale(self.outColor)
elif self.hasNeighbor(cell[3], cell[4]):
cell[5].setColorScale(self.neighborColor)
else:
cell[5].setColorScale(self.blankColor)
return
def findPos(self, x, z):
return (self.grid[x][z][1], self.grid[x][z][2])
def placeIntoGrid(self, sprite, x, z):
if self.grid[x][z][0] == None:
self.grid[x][z][0] = sprite
sprite.gridPosX = x
sprite.gridPosZ = z
sprite.setActive(0)
newX, newZ = self.findPos(x, z)
sprite.setX(newX)
sprite.setZ(newZ)
if sprite == self.controlSprite:
self.controlSprite = None
self.colorGridFlag = 1
self.hasChanged = 1
self.findGridCog()
self.checkForTooLow()
else:
self.placeIntoGrid(sprite, x + 1, z - 1)
return
def stickInGrid(self, sprite, force = 0):
if sprite.isActive:
gridCell = self.findGrid(sprite.getX(), sprite.getZ(), force)
if gridCell:
colorType = sprite.colorType
sprite.setActive(0)
self.soundLand.play()
self.placeIntoGrid(sprite, gridCell[3], gridCell[4])
if colorType == self.bombIndex:
kapow = MovieUtil.createKapowExplosionTrack(render, sprite.nodeObj.getPos(render))
kapow.start()
self.bombNeighbors(self.closestX, self.closestZ)
allNeighbors = []
for entry in self.matchList:
neighbors = self.getNeighbors(entry[0], entry[1])
for neighbor in neighbors:
if neighbor not in allNeighbors and neighbor not in self.matchList:
allNeighbors.append(neighbor)
self.shakeList(allNeighbors)
self.clearMatchList()
else:
self.createMatchList(self.closestX, self.closestZ)
if len(self.matchList) >= 3:
clearType = 0
self.clearMatchList(colorType)
else:
neighbors = self.getNeighbors(self.closestX, self.closestZ)
self.shakeList(neighbors)
def addSprite(self, image, size = 3.0, posX = 0, posZ = 0, found = 0, color = None):
spriteBase = self.spriteNode.attachNewNode('sprite base')
size = self.radiusBall * 2.0
facing = 1
if color == None:
colorChoice = random.choice(range(0, 3))
else:
colorChoice = color
newSprite = GameSprite3D.GameSprite(spriteBase, size, colorChoice, found, facing)
newSprite.setX(posX)
newSprite.setZ(posZ)
self.sprites.append(newSprite)
if found:
self.foundCount += 1
return newSprite
def addControlSprite(self, x = 0.0, z = 0.0, color = None):
newSprite = self.addSprite(self.block, posX=x, posZ=z, color=color, found=1)
newSprite.spriteBase.reparentTo(self.frame)
newSprite.spriteBase.setPos(0.0, 0.7, -1.54)
self.controlSprite = newSprite
def addUnSprite(self, image, size = 3.0, posX = 0, posZ = 0):
size = self.radiusBall * 2.0
spriteBase = self.spriteNode.attachNewNode('sprite base')
newSprite = GameSprite3D.GameSprite(spriteBase, size)
newSprite.setX(posX)
newSprite.setZ(posZ)
return newSprite
def __handleMouseClick(self):
if self.ballLoaded == 2:
pass
if self.ballLoaded and self.controlSprite:
self.controlSprite.spriteBase.wrtReparentTo(self.spriteNode)
self.controlSprite.setAccel(14.0, pi * 0.0 - self.aimRadian)
self.controlSprite.setActive(1)
self.soundFire.play()
self.ballLoaded = 0
def __run(self, cont = 1):
if cont and not self.running:
taskMgr.add(self.__run, 'GolfGreenGameTask')
self.running = 1
if self.lastTime == None:
self.lastTime = globalClock.getRealTime()
timeDelta = globalClock.getRealTime() - self.lastTime
self.lastTime = globalClock.getRealTime()
self.newBallCountUp += timeDelta
if base.mouseWatcherNode.hasMouse():
inputX = base.mouseWatcherNode.getMouseX()
inputZ = base.mouseWatcherNode.getMouseY()
outputZ = inputZ + self.screenSizeZ * (0.5 - self.zGap)
if outputZ <= 0.0:
outputZ = 0.0001
if inputX > 0.0:
self.aimRadian = -1.0 * pi + math.atan(outputZ / (inputX * self.XtoZ))
elif inputX < 0.0:
self.aimRadian = math.atan(outputZ / (inputX * self.XtoZ))
else:
self.aimRadian = pi * -0.5
margin = 0.2
if self.aimRadian >= -margin:
self.aimRadian = -margin
elif self.aimRadian <= margin - pi:
self.aimRadian = margin - pi
degrees = self.__toDegrees(self.aimRadian)
self.aimer.setH(degrees)
self.wallMaxX = self.maxX - self.radiusBall
self.wallMinX = self.minX + self.radiusBall
self.wallMaxZ = self.maxZ - self.radiusBall
self.wallMinZ = self.minZ + self.radiusBall
if self.controlSprite and self.controlSprite.nodeObj.isEmpty():
self.controlSprite = None
if self.giftId:
self.ballLoaded = 2
self.updateSpritePos()
self.standbySprite.holdType = self.giftId
self.standbySprite.setBallType(self.giftId, 1)
self.standbySprite.face()
self.giftId = None
while self.controlSprite == None and self.attackPattern:
if self.attackCounter > len(self.attackPattern) - 1:
self.attackCounter = 0
print 'Pattern %s Place %s Type %s' % (self.attackPattern, self.attackCounter, self.attackPattern[self.attackCounter])
if self.standbySprite.holdType != None:
color = self.standbySprite.holdType
sprite = self.addControlSprite(self.newBallX, self.newBallZ + self.spriteNotchPos * self.cellSizeZ, color)
self.ballLoaded = 1
self.updateSpritePos()
newColor = self.predictAttackPattern(0)
self.standbySprite.holdType = newColor
self.standbySprite.setBallType(newColor, 1)
self.standbySprite.face()
self.attackCounter += 1
self.standbySprite.runColor()
for sprite in self.sprites:
if sprite.deleteFlag:
self.sprites.remove(sprite)
else:
sprite.run(timeDelta)
if sprite.getX() > self.wallMaxX:
sprite.setX(self.wallMaxX)
sprite.reflectX()
if sprite.getX() < self.wallMinX:
sprite.setX(self.wallMinX)
sprite.reflectX()
if sprite.getZ() > self.wallMaxZ:
self.stickInGrid(sprite, 1)
if sprite.getZ() < self.wallMinZ:
pass
self.__colTest()
if self.hasChanged and self.running:
self.clearFloaters()
self.explodeBombs()
self.findGridCog()
spriteCount = 0
whiteCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
spriteCount += 1
if cell[0].colorType == 3:
whiteCount += 1
if whiteCount == 0:
self.__finishBoard()
self.flagNextLevel = 0
self.killSprite(self.controlSprite)
self.standbySprite.holdType = None
self.colorGridFlag = 1
self.hasChanged = 0
if self.colorGridFlag:
self.colorGridFlag = 0
self.colorGrid()
return Task.cont
def predictAttackPattern(self, numSteps = 1):
predict = self.attackCounter + numSteps
predict = predict % len(self.attackPattern)
return self.attackPattern[predict]
def __stop(self):
taskMgr.remove('GolfGreenGameTask')
self.running = 0
def __testWin(self):
gridCount = 0
for column in self.grid:
for cell in column:
if cell[0]:
gridCount += 1
if gridCount == 0:
self.__handleWin()
def __toRadians(self, angle):
return angle * 2.0 * math.pi / 360.0
def __toDegrees(self, angle):
return angle * 360.0 / (2.0 * math.pi)
def __colTest(self):
if not hasattr(self, 'tick'):
self.tick = 0
self.tick += 1
if self.tick > 5:
self.tick = 0
sizeSprites = len(self.sprites)
for movingSpriteIndex in range(len(self.sprites)):
for testSpriteIndex in range(movingSpriteIndex, len(self.sprites)):
movingSprite = self.getSprite(movingSpriteIndex)
testSprite = self.getSprite(testSpriteIndex)
if testSprite and movingSprite:
if movingSpriteIndex != testSpriteIndex and (movingSprite.isActive or testSprite.isActive):
if self.testDistance(movingSprite.spriteBase, testSprite.spriteBase) < self.radiusBall * 1.65:
if not (movingSprite.isActive and testSprite.isActive):
if movingSprite.canCollide and testSprite.canCollide:
self.__collide(movingSprite, testSprite)
if self.tick == 5:
pass
def getSprite(self, spriteIndex):
if spriteIndex >= len(self.sprites) or self.sprites[spriteIndex].markedForDeath:
return None
else:
return self.sprites[spriteIndex]
return None
def testDistance(self, nodeA, nodeB):
if nodeA.isEmpty() or nodeB.isEmpty():
return 10000
distX = nodeA.getX() - nodeB.getX()
distZ = nodeA.getZ() - nodeB.getZ()
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
return dist
def testPointDistance(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
if dist == 0:
dist = 1e-10
return dist
def testPointDistanceSquare(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
if distC == 0:
distC = 1e-10
return distC
def angleTwoSprites(self, sprite1, sprite2):
x1 = sprite1.getX()
z1 = sprite1.getZ()
x2 = sprite2.getX()
z2 = sprite2.getZ()
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def angleTwoPoints(self, x1, z1, x2, z2):
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def __collide(self, move, test):
test.velX = 0
test.velZ = 0
move.velX = 0
move.velZ = 0
test.collide()
move.collide()
self.stickInGrid(move)
self.stickInGrid(test)
def generateInit(self):
self.notify.debug('generateInit')
BattleBlocker.BattleBlocker.generateInit(self)
def generate(self):
self.notify.debug('generate')
BasicEntities.DistributedNodePathEntity.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
BattleBlocker.BattleBlocker.announceGenerate(self)
self.baseNode = self.attachNewNode('GolfGreenGameBase')
self.frame = self.baseNode.attachNewNode('GolfGreenGameFrame')
self.spriteNode = self.frame.attachNewNode('GolfGreenGameSpriteNode')
self.frame.setScale(1.0)
self.frame.setP(90)
self.spriteNotchPos = 0
self.frame.setY(10.0)
self.frame.setZ(2.0)
self.spriteNode.setY(0.5)
self.hasChanged = 0
self.squareNode = self.frame.attachNewNode('GolfGreenGameBase')
groundCircle = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_golf_green')
groundCircle.reparentTo(self.baseNode)
groundCircle.setScale(0.24)
self.groundFlag = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_golf_flag')
self.groundFlag.reparentTo(self.baseNode)
self.groundFlag.setScale(0.5)
self.groundFlag.setH(-45)
self.groundFlag.setPos(3.0, 4.0, 0.0)
groundSquare = BuildGeometry.addSquareGeom(self.squareNode, self.sizeX, self.sizeZ, color=Vec4(0.4, 0.4, 0.4, 0.5))
self.centerZ = (self.minZ + self.maxZ) * 0.5
self.squareNode.setZ((self.minZ + self.maxZ) * 0.5)
self.squareNode.setP(-90)
groundCircle.setDepthWrite(False)
groundCircle.setDepthTest(True)
groundCircle.setBin('ground', 1)
groundSquare[0].setDepthWrite(False)
groundSquare[0].setDepthTest(False)
groundSquare[0].setBin('ground', 2)
self.squareNode.hide()
self.load()
def initCollisionGeom(self):
self.actSphere = CollisionSphere(0, 0, 0, 11.5)
self.actSphereNode = CollisionNode('gridgame-%s-%s' % (self.level.getLevelId(), self.entId))
self.actSphereNode.addSolid(self.actSphere)
self.actSphereNodePath = self.attachNewNode(self.actSphereNode)
self.actSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
self.actSphere.setTangible(0)
self.enterEvent = 'enter' + self.actSphereNode.getName()
self.accept(self.enterEvent, self.__handleToonEnter)
def __handleToonEnter(self, collEntry):
self.sendUpdate('requestJoin', [])
def __setCamera(self):
camHeight = base.localAvatar.getClampedAvatarHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
cameraPoint = Point3(0.0, -16.0, 16.0)
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopUpdateSmartCamera()
basePos = self.frame.getPos(render)
modPos = Point3(basePos[0] + 0.0, basePos[1] + 12.0, basePos[2] + 12.0)
camera.setPos(0, 0, 0)
camera.setH(0)
camera.setP(-70)
camera.reparentTo(self.focusPoint)
base.camLens.setFov(60, 46.8265)
self.focusPoint.setPos(0, 12, 27)
self.focusPoint.setH(180)
def acceptJoin(self, time, timeStamp, avIds):
self.timeStart = timeStamp
timePassed = globalClockDelta.localElapsedTime(self.timeStart)
timeleft = time - timePassed
self.timeTotal = time
if localAvatar.doId in avIds and localAvatar.doId not in self.joinedToons:
self.__startGame()
base.cr.playGame.getPlace().fsm.request('stopped')
self.sendUpdate('requestBoard', [0])
if not self.hasEntered:
self.level.countryClub.showInfoText(TTLocalizer.BustACogInstruction)
self.hasEntered = 1
for avId in self.joinedToons:
if avId not in avIds:
self.joinedToons.remove(avId)
self.removeToonHeadPanel(avId)
toon = base.cr.doId2do.get(avId)
if toon:
toon.startSmooth()
for avId in avIds:
if avId and avId not in self.joinedToons:
if avId not in self.everJoinedToons:
self.everJoinedToons.append(avId)
self.joinedToons.append(avId)
index = self.everJoinedToons.index(avId)
if index > 3:
print 'ERROR! green game has had more than 4 players, we are about to crash\n %s' % self.everJoinedToons
print 'Joining Toon is %s index is %s' % (avId, index)
toon = base.cr.doId2do.get(avId)
selfPos = self.getPos(render)
offset = self.toonPoints[index]
if index > 3:
print 'odd... we should have crashed by now'
standPoint = render.getRelativePoint(self, offset)
if toon:
toon.stopSmooth()
self.addToonHeadPanel(toon)
toon.setAnimState('run', 1.0)
animFunc = Func(toon.setAnimState, 'neutral', 1.0)
track = Sequence(LerpPosInterval(toon, 0.75, standPoint), LerpHprInterval(toon, 0.25, Point3(180, 0, 0)), animFunc, Func(self.clearToonTrack, avId), name=toon.uniqueName('gggEnter'), autoPause=1)
track.delayDelete = DelayDelete.DelayDelete(toon, 'GolfGreenGame.acceptJoin')
self.storeToonTrack(avId, track)
track.start()
def signalDone(self, success):
self.finished = 1
self.soundDone.play()
self.__leaveGame()
self.__removeGame()
self.scoreBoard.hide()
self.cleanupTimer()
if success:
self.level.countryClub.showInfoText(TTLocalizer.BustACogSuccess)
else:
self.level.countryClub.showInfoText(TTLocalizer.BustACogFailure)
def boardCleared(self, avId):
self.doFail()
def setTimerStart(self, time, timeStamp):
if self.timer == None:
self.timeStart = timeStamp
timePassed = globalClockDelta.localElapsedTime(self.timeStart)
timeleft = time - timePassed
self.timeTotal = time
self.cleanupTimer()
self.timer = ToontownTimer.ToontownTimer()
self.timer.posBelowTopRightCorner()
self.timer.setTime(timeleft)
self.timer.countdown(timeleft, self.timerExpired)
return
def cleanupTimer(self):
if self.timer:
self.timer.stop()
self.timer.destroy()
self.timer = None
return
def timerExpired(self):
self.cleanupTimer()
def useTime(self, time = None):
if time != None:
self.timeLeft = time
if self.timerTask != None:
taskMgr.remove(self.timerTaskName)
if time != None and time > 0.0 and self.isActive:
self.timerTask = taskMgr.doMethodLater(1.0, self.gameCountDown, self.timerTaskName)
self.scoreLabel['text'] = TTLocalizer.GolfGreenGameScoreString % (self.boardsLeft, int(self.timeLeft))
return
def gameCountDown(self, task):
self.timeLeft = self.timeTotal - globalClockDelta.localElapsedTime(self.timeStart)
return task.done
def scoreData(self, total = 2, closed = 1, scoreList = 'hello world'):
self.boardsLeft = total - closed
for panelIndex in self.toonPanels:
panel = self.toonPanels[panelIndex]
panel.extraData['text'] = TTLocalizer.GolfGreenGamePlayerScore % 0
for entryIndex in range(len(scoreList)):
entry = scoreList[entryIndex]
if self.toonPanels.has_key(entry[0]):
panel = self.toonPanels[entry[0]]
panel.extraData['text'] = TTLocalizer.GolfGreenGamePlayerScore % entry[1]
self.scoreLabel['text'] = TTLocalizer.GolfGreenGameScoreString % self.boardsLeft
def informGag(self, track, level):
self.bonusBoard.show()
self.bonusBoard['text'] = TTLocalizer.GolfGreenGameBonusGag % TTLocalizer.BattleGlobalAvPropStringsSingular[track][level]
iconName = ToontownBattleGlobals.AvPropsNew[track][level]
icon = self.invModel.find('**/%s' % iconName)
self.bonusBoard['image'] = icon
self.bonusBoard['image_scale'] = (1.0, 1, 1.0)
taskMgr.doMethodLater(4.0, self.hideBonusBoard, 'hide bonus')
def helpOthers(self, avId):
if not avId == localAvatar.doId and self.running:
self.giftId = 7
toonName = ''
toon = base.cr.doId2do[avId]
if toon:
toonName = toon.getName()
self.bonusBoard['text'] = TTLocalizer.GolfGreenGameGotHelp % toonName
imageBall = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_ball_fire')
imageBall.setHpr(0, 90, 0)
self.bonusBoard['image'] = imageBall
self.bonusBoard['image_scale'] = 0.13
self.bonusBoard.show()
taskMgr.doMethodLater(4.0, self.hideBonusBoard, 'hide bonus')
def hideBonusBoard(self, task):
if self.bonusBoard:
if not self.bonusBoard.isEmpty():
self.bonusBoard.hide()
def storeToonTrack(self, avId, track):
self.clearToonTrack(avId)
self.__toonTracks[avId] = track
def clearToonTrack(self, avId):
oldTrack = self.__toonTracks.get(avId)
if oldTrack:
oldTrack.pause()
if self.__toonTracks.get(avId):
DelayDelete.cleanupDelayDeletes(self.__toonTracks[avId])
del self.__toonTracks[avId]
def clearToonTracks(self):
keyList = []
for key in self.__toonTracks:
keyList.append(key)
for key in keyList:
if self.__toonTracks.has_key(key):
self.clearToonTrack(key) | toontown/coghq/DistributedGolfGreenGame.py | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from StomperGlobals import *
from direct.distributed import ClockDelta
from direct.showbase.PythonUtil import lerp
import math
from otp.level import DistributedEntity
from direct.directnotify import DirectNotifyGlobal
from pandac.PandaModules import NodePath
from otp.level import BasicEntities
from direct.task import Task
from toontown.toonbase import ToontownGlobals
from toontown.coghq import BattleBlocker
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownBattleGlobals
from direct.distributed.ClockDelta import *
from toontown.golf import BuildGeometry
from direct.gui.DirectGui import *
import random
from direct.showbase import RandomNumGen
import GameSprite3D
from math import pi
import math
import random
import cPickle
from toontown.distributed import DelayDelete
from toontown.toon import ToonHeadFrame
from toontown.battle import BattleParticles
from toontown.battle import MovieUtil
import time
from toontown.toonbase import ToontownTimer
class DistributedGolfGreenGame(BattleBlocker.BattleBlocker):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGolfGreenGame')
def __init__(self, cr):
BattleBlocker.BattleBlocker.__init__(self, cr)
self.blankColor = Vec4(1.0, 1.0, 1.0, 1.0)
self.fullColor = Vec4(0.6, 0.6, 0.6, 1.0)
self.neighborColor = Vec4(0.8, 0.8, 0.8, 1.0)
self.outColor = Vec4(0.0, 0.0, 0.0, 0.0)
self.blackColor = Vec4(0.0, 0.0, 0.0, 1.0)
self.acceptErrorDialog = None
self.doneEvent = 'game Done'
self.sprites = []
self.controlSprite = None
self.standbySprite = None
self.setupFlag = 0
self.colorGridFlag = 0
self.boardIndex = None
self.board = None
self.attackPattern = None
self.tooLowFlag = 0
self.toonPoints = (Point3(3.0, 13.0, 0.0),
Point3(6.0, 13.0, 0.0),
Point3(-3.0, 13.0, 0.0),
Point3(-6.0, 13.0, 0.0))
self.joinedToons = []
self.everJoinedToons = []
self.flagNextLevel = 0
self.wildIndex = 8
self.bombIndex = 7
self.sizeMult = 1.4
self.cellSizeX = 1.0 * self.sizeMult
self.cellSizeZ = self.cellSizeX * 0.8
self.radiusBall = 0.5 * self.cellSizeX
self.gridDimX = 9
self.gridDimZ = 15
self.minX = -1.0 * (self.gridDimX + 0.3751) * 0.5 * self.cellSizeX
self.minZ = -self.gridDimZ * 0.1 * self.cellSizeZ
self.newBallX = 0.0
self.newBallZ = self.minZ + 0.1 * self.sizeMult
self.rangeX = (self.gridDimX + 0.5) * self.cellSizeX
self.rangeZ = self.gridDimZ * self.cellSizeZ
self.maxX = self.minX + self.rangeX
self.maxZ = self.minZ + self.rangeZ
self.sizeX = self.rangeX
self.sizeZ = self.rangeZ
self.isActive = 0
self.boardsLeft = 0
self.timeLeft = 0
self.timeStart = None
self.timeTotal = None
self.timerTask = None
self.timerTaskName = 'golfgreengame timer task'
self.giftId = None
self.holdGiftId = None
self.rollTrack = None
self.zGap = 0.092
self.screenSizeX = base.a2dRight - base.a2dLeft
self.screenSizeZ = base.a2dTop - base.a2dBottom
self.XtoZ = self.screenSizeX / (self.screenSizeZ * (1.0 - self.zGap * 1.0))
self.countTimeOld = None
self.countDownRunning = 0
self.timer = None
self.hasEntered = 0
self.trackClosed = 0
self.running = 0
self.finished = 0
self.__toonTracks = {}
return
def disable(self):
self.unload()
self.clearToonTracks()
BattleBlocker.BattleBlocker.disable(self)
def updateSpritePos(self):
if self.spriteNode.isEmpty():
return
self.spriteNode.setZ(-self.spriteNotchPos * self.cellSizeZ)
if self.controlSprite:
if not self.controlSprite.isActive:
pass
self.colorGridFlag = 1
def lerpSpritePos(self):
if self.spriteNode.isEmpty():
return
x = self.spriteNode.getX()
y = self.spriteNode.getY()
self.rollTrack = Sequence(LerpPosInterval(self.spriteNode, 0.5, Point3(x, y, -self.spriteNotchPos * self.cellSizeZ)))
if self.controlSprite:
if not self.controlSprite.isActive:
pass
self.colorGridFlag = 1
self.rollTrack.start()
if self.soundMove:
self.soundMove.play()
messenger.send('wakeup')
def findLowestSprite(self):
lowest = 100
for sprite in self.sprites:
if sprite.gridPosZ:
if sprite.gridPosZ < lowest:
lowest = sprite.gridPosZ
return lowest
def setup(self):
if not self.setupFlag:
self.setupFlag = 1
else:
return
self.updateSpritePos()
self.spriteNode.setY(self.radiusBall)
thing = self.model.find('**/item_board')
self.block = self.model1.find('**/minnieCircle')
self.colorRed = (1, 0, 0, 1)
self.colorBlue = (0, 0, 1, 1)
self.colorGreen = (0, 1, 0, 1)
self.colorGhostRed = (1, 0, 0, 0.5)
self.colorGhostBlue = (0, 0, 1, 0.5)
self.colorGhostGreen = (0, 1, 0, 0.5)
self.colorWhite = (1, 1, 1, 1)
self.colorBlack = (0, 0, 0, 1.0)
self.colorShadow = (0, 0, 0, 0.5)
self.lastTime = None
self.running = 0
self.massCount = 0
self.foundCount = 0
self.controlOffsetX = 0.0
self.controlOffsetZ = 0.0
self.grid = []
for countX in range(0, self.gridDimX):
newRow = []
for countZ in range(self.gridDimZ):
offset = 0
margin = self.cellSizeX * 0.4375
if countZ % 2 == 0:
offset = self.cellSizeX * 0.5
newCell = [None,
countX * self.cellSizeX + self.minX + offset + margin,
countZ * self.cellSizeZ + self.minZ,
countX,
countZ,
None]
groundCircle = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_hole')
groundCircle.reparentTo(self.spriteNode)
if groundCircle == None:
import pdb
pdb.set_trace()
groundCircle.setTransparency(TransparencyAttrib.MAlpha)
groundCircle.setPos(newCell[1], -self.radiusBall, newCell[2])
groundCircle.setScale(1.2)
groundCircle.setR(90)
groundCircle.setH(-90)
newCell[5] = groundCircle
newCell[5].setColorScale(self.blankColor)
newRow.append(newCell)
self.grid.append(newRow)
self.cogSprite = self.addUnSprite(self.block, posX=0.25, posZ=0.5)
self.cogSprite.setColor(self.colorShadow)
self.cogSprite.nodeObj.hide()
self.standbySprite = self.addUnSprite(self.block, posX=0.0, posZ=-3.0)
self.standbySprite.setColor(self.colorShadow)
self.standbySprite.spriteBase.reparentTo(self.frame)
self.standbySprite.spriteBase.setY(self.radiusBall)
self.standbySprite.nodeObj.hide()
self.boardData = [((1, 0, 0),
(4, 0, 1),
(6, 0, 2),
(1, 1, 0)), ((1, 0, 1),
(4, 0, 1),
(6, 0, 1),
(1, 1, 1)), ((1, 0, 2),
(4, 0, 2),
(6, 0, 2),
(1, 1, 2))]
self.attackPatterns = [(0, 1, 2), (0, 0, 1, 1, 2, 2), (0, 1, 0, 2)]
self.winCounter = 0
self.matchList = []
self.newBallTime = 5.0
self.newBallCountUp = 0.0
self.cogX = 0
self.cogZ = 0
self.aimRadian = 0.0
self.ballLoaded = 0.0
self.countTime = 10
self.countDown = self.countTime
return
def printGrid(self):
printout = ' '
for columnIndex in range(self.gridDimX - 1, -1, -1):
if columnIndex < 10:
printout += '%s ' % columnIndex
else:
printout += '%s ' % columnIndex
print printout
for rowIndex in range(self.gridDimZ - 1, -1, -1):
if rowIndex < 10:
printout = 'row %s ' % rowIndex
else:
printout = 'row %s ' % rowIndex
for columnIndex in range(self.gridDimX - 1, -1, -1):
hasSprite = '_'
if self.grid[columnIndex][rowIndex][0]:
hasSprite = 'X'
if rowIndex < 10:
printout += '%s ' % hasSprite
else:
printout += '%s ' % hasSprite
print printout
count = 0
for sprite in self.sprites:
print 'count %s X %s Z %s Color %s' % (count,
sprite.gridPosX,
sprite.gridPosZ,
sprite.colorType)
count += 1
def pickLevelPattern(self):
self.boardIndex = random.choice(range(0, len(self.boardData)))
self.board = self.boardData[self.boardIndex]
self.attackPattern = self.attackPatterns[self.boardIndex]
self.attackCounter = 0
self.spriteNotchPos = 0
for ball in self.board:
newSprite = self.addSprite(self.block, found=1, color=ball[2])
self.placeIntoGrid(newSprite, ball[0], self.gridDimZ - 1 - ball[1])
self.colorGridFlag = 1
self.updateSpritePos()
def load(self):
BattleParticles.loadParticles()
model = loader.loadModel('phase_5.5/models/gui/package_delivery_panel')
model1 = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
self.invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
self.model = model
self.model1 = model1
self.soundFire = base.loadSfx('phase_6/audio/sfx/Golf_Hit_Ball.mp3')
self.soundLand = base.loadSfx('phase_4/audio/sfx/MG_maze_pickup.mp3')
self.soundBurst = base.loadSfx('phase_5/audio/sfx/Toon_bodyfall_synergy.mp3')
self.soundBomb = base.loadSfx('phase_4/audio/sfx/MG_cannon_fire_alt.mp3')
self.soundLose = base.loadSfx('phase_11/audio/sfx/LB_capacitor_discharge_3.mp3')
self.soundWin = base.loadSfx('phase_4/audio/sfx/MG_pairing_match_bonus_both.mp3')
self.soundDone = base.loadSfx('phase_3/audio/sfx/GUI_create_toon_back.mp3')
self.soundMove = base.loadSfx('phase_3.5/audio/sfx/SA_shred.mp3')
background = model.find('**/bg')
itemBoard = model.find('**/item_board')
self.focusPoint = self.baseNode.attachNewNode('GolfGreenGameFrame')
self.frame2D = DirectFrame(scale=1.1, relief=DGG.FLAT, frameSize=(-0.1,
0.1,
-0.1,
-0.1), frameColor=(0.737, 0.573, 0.345, 0.3))
gui2 = loader.loadModel('phase_3/models/gui/quit_button')
self.quitButton = DirectButton(parent=self.frame2D, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.95, 1.3, -0.69), image_scale=(0.9, 1.0, 1.0), text=TTLocalizer.BustACogExit, text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0, 0, 0, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=TTLocalizer.DGGGquitButton, text_pos=(0, -0.01), command=self.__leaveGame)
self.quitButton.hide()
self.instructions = DirectFrame(parent=self.frame2D, relief=None, image=DGG.getDefaultDialogGeom(), image_color=ToontownGlobals.GlobalDialogColor, image_scale=(1.2, 1.0, 1.0), text=TTLocalizer.GolfGreenGameDirections, text_font=ToontownGlobals.getSignFont(), text_align=TextNode.ALeft, text_wordwrap=16, text_scale=0.06, text_pos=(-0.5, 0.3), pos=(0.0, 0, -0.0))
self.instructions.hide()
imageCogBall = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_ball_cog')
imageCogBall.setHpr(0, 90, 0)
self.instCogBall = DirectFrame(parent=self.instructions, relief=None, image=imageCogBall, image_color=ToontownGlobals.GlobalDialogColor, image_scale=(0.12, 0.12, 0.12), pos=(0.0, 0, -0.2))
buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui')
cancelImageList = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr'))
self.doneButton = DirectButton(parent=self.instructions, relief=None, image=cancelImageList, command=self.instructions.hide, pos=(0, 0, -0.4))
self.howToButton = DirectButton(parent=self.frame2D, relief=None, image=(gui2.find('**/QuitBtn_UP'), gui2.find('**/QuitBtn_DN'), gui2.find('**/QuitBtn_RLVR')), pos=(0.95, 1.3, -0.82), image_scale=(0.9, 1.0, 1.0), text=TTLocalizer.BustACogHowto, text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0, 0, 0, 1), text1_fg=(1, 1, 1, 1), text2_fg=(1, 1, 1, 1), text_scale=TTLocalizer.DGGGhowToButton, text_pos=(0, -0.01), command=self.instructions.show)
self.howToButton.hide()
self.timerLabel = DirectLabel(parent=self.frame2D, relief=None, image=gui2.find('**/QuitBtn_UP'), pos=(0.9, 1.3, -0.42), image_scale=(0.5, 1.0, 1.0), text='Timer', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text_scale=0.045, text_pos=(0, -0.01))
self.timerLabel.hide()
self.headPanel = loader.loadModel('phase_6/models/golf/headPanel')
self.scoreBoard = DirectFrame(scale=1.0, pos=(0.0, 0, 0.9), relief=DGG.FLAT, parent=aspect2d, frameSize=(-0.35,
0.35,
-0.05,
0.05), frameColor=(0.737, 0.573, 0.345, 0.3))
self.scoreLabel = DirectLabel(parent=self.scoreBoard, relief=None, pos=(0, 0, 0), scale=1.0, text='', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0.0, 0.0, 0.0, 1), text_scale=TTLocalizer.DGGGscoreLabel, text_pos=(0, -0.02))
self.scoreBoard.hide()
self.bonusBoard = DirectFrame(parent=self.frame2D, relief=None, image_pos=(0, 0, 0.0), image_scale=(0.4, 1, 0.4), image_color=(1, 1, 1, 1), pos=(0.0, 1.5, 0.67), scale=1.0, text='You gotsa bonus fool!', text_font=ToontownGlobals.getSignFont(), text0_fg=(1, 1, 1, 1), text0_shadow=(0.0, 0.0, 0.0, 1), text_scale=0.055, text_pos=(0, -0.1), textMayChange=1)
self.bonusBoard.hide()
self.backBoard = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_background')
self.backBoard.setCollideMask(BitMask32.allOff())
self.backBoard.reparentTo(self.frame)
self.backBoard.setScale(0.3, 0.2, 0.25)
self.backBoard.setHpr(0, -90, 0)
self.backBoard.setPos(0, -1.5, 8.0)
self.backBoard.hide()
base.bb = self.backBoard
self.aimbase = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_shooter')
self.aimbase.setHpr(90, 0, 90)
self.aimbase.setScale(0.3, 0.3, 0.15)
self.aimbase.reparentTo(self.frame)
self.aimbase.setPos(0.0, 0.0, self.minZ + 0.1)
self.aimer = self.aimbase.attachNewNode('GolfGreenGameBase')
aimer = self.aimbase.find('**/moving*')
aimer.reparentTo(self.aimer)
aimer.setPos(0.0, 0.0, 0.0)
base.gi = aimer
self.aimbase.hide()
self.toonPanels = {}
return
def addToonHeadPanel(self, toon):
tPanels = ToonHeadFrame.ToonHeadFrame(toon, (0.4, 0.4, 0.4, 0.6), self.headPanel)
tPanels.extraData['text_fg'] = (1.0, 1.0, 1.0, 1.0)
tPanels.extraData['text_shadow'] = (0.0, 0.0, 0.0, 1.0)
tPanels.extraData.show()
tPanels.setScale(0.3, 1, 0.7)
tPanels.head.setPos(0, 10, 0.18)
tPanels.head.setScale(0.47, 0.2, 0.2)
tPanels.tag1.setPos(0.3, 10, 0.18)
tPanels.tag1.setScale(0.1283, 0.055, 0.055)
tPanels.tag2.setPos(0, 10, 0.43)
tPanels.tag2.setScale(0.117, 0.05, 0.05)
tPanels.hide()
self.toonPanels[toon.doId] = tPanels
self.arrangeToonHeadPanels()
def removeToonHeadPanel(self, avId):
if self.toonPanels.has_key(avId):
self.toonPanels[avId].destroy()
del self.toonPanels[avId]
self.arrangeToonHeadPanels()
def arrangeToonHeadPanels(self):
toonPanelsStart = 0.0
whichToon = 0
color = 0
tpDiff = -0.45
for panelKey in self.toonPanels:
panel = self.toonPanels[panelKey]
if self.isActive:
panel.show()
else:
panel.hide()
if whichToon <= 1:
panel.setPos(-1, 0, toonPanelsStart + whichToon * tpDiff)
else:
panel.setPos(1, 0, toonPanelsStart + (whichToon - 2) * tpDiff)
whichToon += 1
def unload(self):
self.cleanupTimer()
for panelKey in self.toonPanels:
self.toonPanels[panelKey].destroy()
self.headPanel.remove()
self.toonPanels = None
self.soundFire = None
self.soundLand = None
self.soundBurst = None
self.soundBomb = None
self.soundLose = None
self.soundWin = None
self.soundDone = None
self.soundMove = None
self.scoreBoard.destroy()
self.instructions.destroy()
self.frame2D.destroy()
self.baseNode.removeNode()
del self.baseNode
if self.acceptErrorDialog:
self.acceptErrorDialog.cleanup()
self.acceptErrorDialog = None
self.stopCountDown()
self.__stop()
self.ignoreAll()
return
def show(self):
self.frame.show()
def hide(self):
self.frame.hide()
def __handleExit(self):
self.__acceptExit()
def __startGame(self):
if not self.setupFlag:
self.setup()
self.quitButton.show()
self.howToButton.show()
self.backBoard.show()
self.aimbase.show()
self.squareNode.show()
self.scoreBoard.show()
self.standbySprite.nodeObj.show()
self.groundFlag.hide()
self.isActive = 1
self.__setCamera()
self.spriteNode.show()
base.setCellsAvailable([base.bottomCells[1], base.bottomCells[2], base.bottomCells[3]], 0)
self.setupFlag = 1
def startBoard(self, board, attackPattern):
if self.finished:
return
self.clearGrid()
self.board = board
self.attackPattern = attackPattern
self.attackCounter = 0
self.spriteNotchPos = 0
self.countDown = self.countTime
self.tooLowFlag = 0
for ball in self.board:
newSprite = self.addSprite(self.block, found=1, color=ball[2])
self.placeIntoGrid(newSprite, ball[0], self.gridDimZ - 1 - ball[1])
self.colorGridFlag = 1
self.tooLowFlag = 0
self.startCountDown()
self.updateSpritePos()
self.killSprite(self.controlSprite)
self.accept('mouse1', self.__handleMouseClick)
self.__run()
def startCountDown(self):
if self.countDownRunning == 0:
taskMgr.add(self.doCountDown, 'GolfGreenGame countdown')
self.countDownRunning = 1
def stopCountDown(self):
taskMgr.remove('GolfGreenGame countdown')
self.countDownRunning = 0
self.countTimeOld = None
return
def doCountDown(self, task):
currentTime = globalClock.getFrameTime()
if self.countTimeOld == None:
self.countTimeOld = currentTime
if currentTime - self.countTimeOld < 1.0:
return task.cont
else:
self.countTimeOld = currentTime
self.countDown -= 1
if self.countDown in [3, 2, 1]:
for sprite in self.sprites:
sprite.warningBump()
elif self.countDown == 0:
self.countDown = self.countTime
self.spriteNotchPos += 1
self.lerpSpritePos()
self.checkForTooLow()
self.timerLabel['text'] = '%s' % self.countDown
return task.cont
return
def checkForTooLow(self):
low = self.findLowestSprite()
if low <= self.spriteNotchPos:
self.doFail()
def doFail(self):
self.tooLowFlag = 1
taskMgr.doMethodLater(1.0, self.failBoard, 'finishing Failure')
for sprite in self.sprites:
sprite.setColorType(4)
self.__stop()
self.ignore('mouse1')
def failBoard(self, task = None):
self.__finishBoard(0)
def __handleWin(self):
self.__handleExit()
def __finishBoard(self, success = 1):
if self.rollTrack:
self.rollTrack.finish()
self.countDown = self.countTime
if success:
if self.soundWin:
self.soundWin.play()
elif self.soundLose:
self.soundLose.play()
self.giftId = None
self.attackPattern = None
self.stopCountDown()
self.clearGrid()
self.spriteNotchPos = 0
self.updateSpritePos()
self.__stop()
self.ignore('mouse1')
if not self.tooLowFlag or 1:
self.sendUpdate('requestBoard', [success])
return
def __acceptExit(self, buttonValue = None):
import pdb
pdb.set_trace()
if hasattr(self, 'frame'):
self.hide()
self.unload()
messenger.send(self.doneEvent)
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
def __removeGame(self):
self.spriteNode.remove()
self.setupFlag = 0
def __leaveGame(self):
taskMgr.remove('GolfGreenGameTask')
self.stopCountDown()
taskMgr.remove(self.timerTaskName)
self.ignore('mouse1')
camera.reparentTo(base.localAvatar)
base.localAvatar.startUpdateSmartCamera()
base.cr.playGame.getPlace().fsm.request('walk')
for sprite in self.sprites:
sprite.delete()
self.sprites = []
self.spriteNode.hide()
self.controlSprite = None
self.running = 0
self.timerLabel.hide()
self.quitButton.hide()
self.howToButton.hide()
self.backBoard.hide()
self.aimbase.hide()
self.squareNode.hide()
self.groundFlag.show()
self.instructions.hide()
self.isActive = 0
if self.standbySprite:
self.standbySprite.nodeObj.hide()
base.setCellsAvailable([base.bottomCells[1], base.bottomCells[2], base.bottomCells[3]], 1)
self.sendUpdate('leaveGame', [])
return
def findGrid(self, x, z, force = 0):
currentClosest = None
currentDist = 10000000
for countX in range(self.gridDimX):
for countZ in range(self.gridDimZ):
testDist = self.testPointDistanceSquare(x, z, self.grid[countX][countZ][1], self.grid[countX][countZ][2])
if self.grid[countX][countZ][0] == None and testDist < currentDist and (force or self.hasNeighbor(countX, countZ) != None):
currentClosest = self.grid[countX][countZ]
self.closestX = countX
self.closestZ = countZ
currentDist = testDist
return currentClosest
def hasNeighbor(self, cellX, cellZ):
gotNeighbor = None
if cellZ % 2 == 0:
if self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX + 1, cellZ)):
gotNeighbor = cellZ
elif self.testGridfull(self.getValidGrid(cellX, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ + 1)):
gotNeighbor = cellZ + 1
elif self.testGridfull(self.getValidGrid(cellX, cellZ - 1)):
gotNeighbor = cellZ - 1
elif self.testGridfull(self.getValidGrid(cellX - 1, cellZ - 1)):
gotNeighbor = cellZ - 1
return gotNeighbor
def clearFloaters(self):
self.grounded = []
self.unknown = []
groundZ = self.gridDimZ - 1
for indexX in range(0, self.gridDimX):
gridCell = self.grid[indexX][groundZ]
if gridCell[0]:
self.grounded.append((indexX, groundZ))
for column in self.grid:
for cell in column:
if cell[0] != None:
cellData = (cell[3], cell[4])
if cellData not in self.grounded:
self.unknown.append(cellData)
lastUnknownCount = 0
while len(self.unknown) != lastUnknownCount:
lastUnknownCount = len(self.unknown)
for cell in self.unknown:
if self.hasGroundedNeighbor(cell[0], cell[1]):
self.unknown.remove(cell)
self.grounded.append(cell)
for entry in self.unknown:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
self.killSprite(sprite)
return
def explodeBombs(self):
didBomb = 0
for column in self.grid:
for cell in column:
if cell[0] != None:
if cell[0].colorType == self.bombIndex:
self.killSprite(cell[0])
didBomb += 1
if didBomb:
self.soundBomb.play()
return
def hasGroundedNeighbor(self, cellX, cellZ):
gotNeighbor = None
if cellZ % 2 == 0:
if (cellX - 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX + 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX + 1, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX + 1, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX - 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX + 1, cellZ) in self.grounded:
gotNeighbor = cellZ
elif (cellX, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX - 1, cellZ + 1) in self.grounded:
gotNeighbor = cellZ + 1
elif (cellX, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
elif (cellX - 1, cellZ - 1) in self.grounded:
gotNeighbor = cellZ - 1
return gotNeighbor
def clearMatchList(self, typeClear = 0):
self.soundBurst.play()
for entry in self.matchList:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
if typeClear == self.wildIndex:
self.questionSprite(sprite)
elif typeClear == 0:
pass
self.killSprite(sprite)
def shakeList(self, neighbors):
for entry in neighbors:
gridEntry = self.grid[entry[0]][entry[1]]
sprite = gridEntry[0]
self.shakeSprite(sprite)
def createMatchList(self, x, z):
self.matchList = []
self.fillMatchList(x, z)
def matchWild(self, x, z, color):
spriteType = self.getColorType(x, z)
if not self.getBreakable(x, z):
return 0
elif spriteType != -1 and spriteType == self.wildIndex:
return 1
elif spriteType != -1 and color == self.wildIndex:
return 1
else:
return 0
def bombNeighbors(self, cellX, cellZ):
self.soundBomb.play()
self.matchList = []
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) != -1:
self.addToMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) != -1:
self.addToMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) != -1:
self.addToMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) != -1:
self.addToMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) != -1:
self.addToMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) != -1:
self.addToMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) != -1:
self.addToMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) != -1:
self.addToMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) != -1:
self.addToMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) != -1:
self.addToMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) != -1:
self.addToMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) != -1:
self.addToMatchList(cellX - 1, cellZ - 1)
def addToMatchList(self, posX, posZ):
if self.getBreakable(posX, posZ) > 0:
self.matchList.append((posX, posZ))
def getNeighbors(self, cellX, cellZ):
neighborList = []
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) != -1:
neighborList.append((cellX - 1, cellZ))
if self.getColorType(cellX + 1, cellZ) != -1:
neighborList.append((cellX + 1, cellZ))
if self.getColorType(cellX, cellZ + 1) != -1:
neighborList.append((cellX, cellZ + 1))
if self.getColorType(cellX + 1, cellZ + 1) != -1:
neighborList.append((cellX + 1, cellZ + 1))
if self.getColorType(cellX, cellZ - 1) != -1:
neighborList.append((cellX, cellZ - 1))
if self.getColorType(cellX + 1, cellZ - 1) != -1:
neighborList.append((cellX + 1, cellZ - 1))
else:
if self.getColorType(cellX - 1, cellZ) != -1:
neighborList.append((cellX - 1, cellZ))
if self.getColorType(cellX + 1, cellZ) != -1:
neighborList.append((cellX + 1, cellZ))
if self.getColorType(cellX, cellZ + 1) != -1:
neighborList.append((cellX, cellZ + 1))
if self.getColorType(cellX - 1, cellZ + 1) != -1:
neighborList.append((cellX - 1, cellZ + 1))
if self.getColorType(cellX, cellZ - 1) != -1:
neighborList.append((cellX, cellZ - 1))
if self.getColorType(cellX - 1, cellZ - 1) != -1:
neighborList.append((cellX - 1, cellZ - 1))
return neighborList
def fillMatchList(self, cellX, cellZ):
if (cellX, cellZ) in self.matchList:
return
self.matchList.append((cellX, cellZ))
colorType = self.grid[cellX][cellZ][0].colorType
if colorType == 4:
return
if cellZ % 2 == 0:
if self.getColorType(cellX - 1, cellZ) == colorType or self.matchWild(cellX - 1, cellZ, colorType):
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType or self.matchWild(cellX + 1, cellZ, colorType):
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType or self.matchWild(cellX, cellZ + 1, colorType):
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX + 1, cellZ + 1) == colorType or self.matchWild(cellX + 1, cellZ + 1, colorType):
self.fillMatchList(cellX + 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType or self.matchWild(cellX, cellZ - 1, colorType):
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX + 1, cellZ - 1) == colorType or self.matchWild(cellX + 1, cellZ - 1, colorType):
self.fillMatchList(cellX + 1, cellZ - 1)
else:
if self.getColorType(cellX - 1, cellZ) == colorType or self.matchWild(cellX - 1, cellZ, colorType):
self.fillMatchList(cellX - 1, cellZ)
if self.getColorType(cellX + 1, cellZ) == colorType or self.matchWild(cellX + 1, cellZ, colorType):
self.fillMatchList(cellX + 1, cellZ)
if self.getColorType(cellX, cellZ + 1) == colorType or self.matchWild(cellX, cellZ + 1, colorType):
self.fillMatchList(cellX, cellZ + 1)
if self.getColorType(cellX - 1, cellZ + 1) == colorType or self.matchWild(cellX - 1, cellZ + 1, colorType):
self.fillMatchList(cellX - 1, cellZ + 1)
if self.getColorType(cellX, cellZ - 1) == colorType or self.matchWild(cellX, cellZ - 1, colorType):
self.fillMatchList(cellX, cellZ - 1)
if self.getColorType(cellX - 1, cellZ - 1) == colorType or self.matchWild(cellX - 1, cellZ - 1, colorType):
self.fillMatchList(cellX - 1, cellZ - 1)
def testGridfull(self, cell):
if not cell:
return 0
elif cell[0] != None:
return 1
else:
return 0
return
def getValidGrid(self, x, z):
if x < 0 or x >= self.gridDimX:
return None
elif z < 0 or z >= self.gridDimZ:
return None
else:
return self.grid[x][z]
return None
def getColorType(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].colorType
return
def getBreakable(self, x, z):
if x < 0 or x >= self.gridDimX:
return -1
elif z < 0 or z >= self.gridDimZ:
return -1
elif self.grid[x][z][0] == None:
return -1
else:
return self.grid[x][z][0].breakable
return
def findGridCog(self):
self.cogX = 0
self.cogZ = 0
self.massCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
self.massCount += 1
if self.massCount > 0:
self.cogX = self.cogX / self.massCount
self.cogZ = self.cogZ / self.massCount
self.cogSprite.setX(self.cogX)
self.cogSprite.setZ(self.cogZ)
return
def doOnClearGrid(self):
self.winCounter += 1
self.clearGrid()
self.flagNextLevel = 1
if self.winCounter > 4:
self.__handleWin()
def clearGrid(self):
for row in self.grid:
for cell in row:
if cell[0] != None:
self.killSprite(cell[0])
cell[5].setColorScale(self.blankColor)
self.killSprite(self.controlSprite)
return
def killSprite(self, sprite):
if sprite == None:
return
if sprite.giftId != None:
self.giftId = sprite.giftId
if sprite.foundation:
self.foundCount -= 1
if self.controlSprite == sprite:
self.controlSprite = None
if sprite in self.sprites:
self.sprites.remove(sprite)
if sprite.gridPosX != None:
self.grid[sprite.gridPosX][sprite.gridPosZ][0] = None
self.grid[sprite.gridPosX][sprite.gridPosZ][5].setColorScale(self.blankColor)
sprite.deathEffect()
sprite.delete()
self.hasChanged = 1
return
def shakeSprite(self, sprite):
if sprite == None:
return
sprite.shake()
return
def questionSprite(self, sprite):
newSprite = self.addSprite(self.block, found=0, color=1)
newSprite.setX(sprite.getX())
newSprite.setZ(sprite.getZ())
newSprite.wildEffect()
def colorGrid(self):
for row in self.grid:
for cell in row:
if cell[0] != None:
if cell[0].colorType == 3:
cell[5].setColorScale(self.blackColor)
else:
cell[5].setColorScale(self.fullColor)
elif cell[4] <= self.spriteNotchPos:
cell[5].setColorScale(self.outColor)
elif self.hasNeighbor(cell[3], cell[4]):
cell[5].setColorScale(self.neighborColor)
else:
cell[5].setColorScale(self.blankColor)
return
def findPos(self, x, z):
return (self.grid[x][z][1], self.grid[x][z][2])
def placeIntoGrid(self, sprite, x, z):
if self.grid[x][z][0] == None:
self.grid[x][z][0] = sprite
sprite.gridPosX = x
sprite.gridPosZ = z
sprite.setActive(0)
newX, newZ = self.findPos(x, z)
sprite.setX(newX)
sprite.setZ(newZ)
if sprite == self.controlSprite:
self.controlSprite = None
self.colorGridFlag = 1
self.hasChanged = 1
self.findGridCog()
self.checkForTooLow()
else:
self.placeIntoGrid(sprite, x + 1, z - 1)
return
def stickInGrid(self, sprite, force = 0):
if sprite.isActive:
gridCell = self.findGrid(sprite.getX(), sprite.getZ(), force)
if gridCell:
colorType = sprite.colorType
sprite.setActive(0)
self.soundLand.play()
self.placeIntoGrid(sprite, gridCell[3], gridCell[4])
if colorType == self.bombIndex:
kapow = MovieUtil.createKapowExplosionTrack(render, sprite.nodeObj.getPos(render))
kapow.start()
self.bombNeighbors(self.closestX, self.closestZ)
allNeighbors = []
for entry in self.matchList:
neighbors = self.getNeighbors(entry[0], entry[1])
for neighbor in neighbors:
if neighbor not in allNeighbors and neighbor not in self.matchList:
allNeighbors.append(neighbor)
self.shakeList(allNeighbors)
self.clearMatchList()
else:
self.createMatchList(self.closestX, self.closestZ)
if len(self.matchList) >= 3:
clearType = 0
self.clearMatchList(colorType)
else:
neighbors = self.getNeighbors(self.closestX, self.closestZ)
self.shakeList(neighbors)
def addSprite(self, image, size = 3.0, posX = 0, posZ = 0, found = 0, color = None):
spriteBase = self.spriteNode.attachNewNode('sprite base')
size = self.radiusBall * 2.0
facing = 1
if color == None:
colorChoice = random.choice(range(0, 3))
else:
colorChoice = color
newSprite = GameSprite3D.GameSprite(spriteBase, size, colorChoice, found, facing)
newSprite.setX(posX)
newSprite.setZ(posZ)
self.sprites.append(newSprite)
if found:
self.foundCount += 1
return newSprite
def addControlSprite(self, x = 0.0, z = 0.0, color = None):
newSprite = self.addSprite(self.block, posX=x, posZ=z, color=color, found=1)
newSprite.spriteBase.reparentTo(self.frame)
newSprite.spriteBase.setPos(0.0, 0.7, -1.54)
self.controlSprite = newSprite
def addUnSprite(self, image, size = 3.0, posX = 0, posZ = 0):
size = self.radiusBall * 2.0
spriteBase = self.spriteNode.attachNewNode('sprite base')
newSprite = GameSprite3D.GameSprite(spriteBase, size)
newSprite.setX(posX)
newSprite.setZ(posZ)
return newSprite
def __handleMouseClick(self):
if self.ballLoaded == 2:
pass
if self.ballLoaded and self.controlSprite:
self.controlSprite.spriteBase.wrtReparentTo(self.spriteNode)
self.controlSprite.setAccel(14.0, pi * 0.0 - self.aimRadian)
self.controlSprite.setActive(1)
self.soundFire.play()
self.ballLoaded = 0
def __run(self, cont = 1):
if cont and not self.running:
taskMgr.add(self.__run, 'GolfGreenGameTask')
self.running = 1
if self.lastTime == None:
self.lastTime = globalClock.getRealTime()
timeDelta = globalClock.getRealTime() - self.lastTime
self.lastTime = globalClock.getRealTime()
self.newBallCountUp += timeDelta
if base.mouseWatcherNode.hasMouse():
inputX = base.mouseWatcherNode.getMouseX()
inputZ = base.mouseWatcherNode.getMouseY()
outputZ = inputZ + self.screenSizeZ * (0.5 - self.zGap)
if outputZ <= 0.0:
outputZ = 0.0001
if inputX > 0.0:
self.aimRadian = -1.0 * pi + math.atan(outputZ / (inputX * self.XtoZ))
elif inputX < 0.0:
self.aimRadian = math.atan(outputZ / (inputX * self.XtoZ))
else:
self.aimRadian = pi * -0.5
margin = 0.2
if self.aimRadian >= -margin:
self.aimRadian = -margin
elif self.aimRadian <= margin - pi:
self.aimRadian = margin - pi
degrees = self.__toDegrees(self.aimRadian)
self.aimer.setH(degrees)
self.wallMaxX = self.maxX - self.radiusBall
self.wallMinX = self.minX + self.radiusBall
self.wallMaxZ = self.maxZ - self.radiusBall
self.wallMinZ = self.minZ + self.radiusBall
if self.controlSprite and self.controlSprite.nodeObj.isEmpty():
self.controlSprite = None
if self.giftId:
self.ballLoaded = 2
self.updateSpritePos()
self.standbySprite.holdType = self.giftId
self.standbySprite.setBallType(self.giftId, 1)
self.standbySprite.face()
self.giftId = None
while self.controlSprite == None and self.attackPattern:
if self.attackCounter > len(self.attackPattern) - 1:
self.attackCounter = 0
print 'Pattern %s Place %s Type %s' % (self.attackPattern, self.attackCounter, self.attackPattern[self.attackCounter])
if self.standbySprite.holdType != None:
color = self.standbySprite.holdType
sprite = self.addControlSprite(self.newBallX, self.newBallZ + self.spriteNotchPos * self.cellSizeZ, color)
self.ballLoaded = 1
self.updateSpritePos()
newColor = self.predictAttackPattern(0)
self.standbySprite.holdType = newColor
self.standbySprite.setBallType(newColor, 1)
self.standbySprite.face()
self.attackCounter += 1
self.standbySprite.runColor()
for sprite in self.sprites:
if sprite.deleteFlag:
self.sprites.remove(sprite)
else:
sprite.run(timeDelta)
if sprite.getX() > self.wallMaxX:
sprite.setX(self.wallMaxX)
sprite.reflectX()
if sprite.getX() < self.wallMinX:
sprite.setX(self.wallMinX)
sprite.reflectX()
if sprite.getZ() > self.wallMaxZ:
self.stickInGrid(sprite, 1)
if sprite.getZ() < self.wallMinZ:
pass
self.__colTest()
if self.hasChanged and self.running:
self.clearFloaters()
self.explodeBombs()
self.findGridCog()
spriteCount = 0
whiteCount = 0
for row in self.grid:
for cell in row:
if cell[0] != None:
self.cogX += cell[1]
self.cogZ += cell[2]
spriteCount += 1
if cell[0].colorType == 3:
whiteCount += 1
if whiteCount == 0:
self.__finishBoard()
self.flagNextLevel = 0
self.killSprite(self.controlSprite)
self.standbySprite.holdType = None
self.colorGridFlag = 1
self.hasChanged = 0
if self.colorGridFlag:
self.colorGridFlag = 0
self.colorGrid()
return Task.cont
def predictAttackPattern(self, numSteps = 1):
predict = self.attackCounter + numSteps
predict = predict % len(self.attackPattern)
return self.attackPattern[predict]
def __stop(self):
taskMgr.remove('GolfGreenGameTask')
self.running = 0
def __testWin(self):
gridCount = 0
for column in self.grid:
for cell in column:
if cell[0]:
gridCount += 1
if gridCount == 0:
self.__handleWin()
def __toRadians(self, angle):
return angle * 2.0 * math.pi / 360.0
def __toDegrees(self, angle):
return angle * 360.0 / (2.0 * math.pi)
def __colTest(self):
if not hasattr(self, 'tick'):
self.tick = 0
self.tick += 1
if self.tick > 5:
self.tick = 0
sizeSprites = len(self.sprites)
for movingSpriteIndex in range(len(self.sprites)):
for testSpriteIndex in range(movingSpriteIndex, len(self.sprites)):
movingSprite = self.getSprite(movingSpriteIndex)
testSprite = self.getSprite(testSpriteIndex)
if testSprite and movingSprite:
if movingSpriteIndex != testSpriteIndex and (movingSprite.isActive or testSprite.isActive):
if self.testDistance(movingSprite.spriteBase, testSprite.spriteBase) < self.radiusBall * 1.65:
if not (movingSprite.isActive and testSprite.isActive):
if movingSprite.canCollide and testSprite.canCollide:
self.__collide(movingSprite, testSprite)
if self.tick == 5:
pass
def getSprite(self, spriteIndex):
if spriteIndex >= len(self.sprites) or self.sprites[spriteIndex].markedForDeath:
return None
else:
return self.sprites[spriteIndex]
return None
def testDistance(self, nodeA, nodeB):
if nodeA.isEmpty() or nodeB.isEmpty():
return 10000
distX = nodeA.getX() - nodeB.getX()
distZ = nodeA.getZ() - nodeB.getZ()
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
return dist
def testPointDistance(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
dist = math.sqrt(distC)
if dist == 0:
dist = 1e-10
return dist
def testPointDistanceSquare(self, x1, z1, x2, z2):
distX = x1 - x2
distZ = z1 - z2
distC = distX * distX + distZ * distZ
if distC == 0:
distC = 1e-10
return distC
def angleTwoSprites(self, sprite1, sprite2):
x1 = sprite1.getX()
z1 = sprite1.getZ()
x2 = sprite2.getX()
z2 = sprite2.getZ()
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def angleTwoPoints(self, x1, z1, x2, z2):
x = x2 - x1
z = z2 - z1
angle = math.atan2(-x, z)
return angle + pi * 0.5
def __collide(self, move, test):
test.velX = 0
test.velZ = 0
move.velX = 0
move.velZ = 0
test.collide()
move.collide()
self.stickInGrid(move)
self.stickInGrid(test)
def generateInit(self):
self.notify.debug('generateInit')
BattleBlocker.BattleBlocker.generateInit(self)
def generate(self):
self.notify.debug('generate')
BasicEntities.DistributedNodePathEntity.generate(self)
def announceGenerate(self):
self.notify.debug('announceGenerate')
BattleBlocker.BattleBlocker.announceGenerate(self)
self.baseNode = self.attachNewNode('GolfGreenGameBase')
self.frame = self.baseNode.attachNewNode('GolfGreenGameFrame')
self.spriteNode = self.frame.attachNewNode('GolfGreenGameSpriteNode')
self.frame.setScale(1.0)
self.frame.setP(90)
self.spriteNotchPos = 0
self.frame.setY(10.0)
self.frame.setZ(2.0)
self.spriteNode.setY(0.5)
self.hasChanged = 0
self.squareNode = self.frame.attachNewNode('GolfGreenGameBase')
groundCircle = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_golf_green')
groundCircle.reparentTo(self.baseNode)
groundCircle.setScale(0.24)
self.groundFlag = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_golf_flag')
self.groundFlag.reparentTo(self.baseNode)
self.groundFlag.setScale(0.5)
self.groundFlag.setH(-45)
self.groundFlag.setPos(3.0, 4.0, 0.0)
groundSquare = BuildGeometry.addSquareGeom(self.squareNode, self.sizeX, self.sizeZ, color=Vec4(0.4, 0.4, 0.4, 0.5))
self.centerZ = (self.minZ + self.maxZ) * 0.5
self.squareNode.setZ((self.minZ + self.maxZ) * 0.5)
self.squareNode.setP(-90)
groundCircle.setDepthWrite(False)
groundCircle.setDepthTest(True)
groundCircle.setBin('ground', 1)
groundSquare[0].setDepthWrite(False)
groundSquare[0].setDepthTest(False)
groundSquare[0].setBin('ground', 2)
self.squareNode.hide()
self.load()
def initCollisionGeom(self):
self.actSphere = CollisionSphere(0, 0, 0, 11.5)
self.actSphereNode = CollisionNode('gridgame-%s-%s' % (self.level.getLevelId(), self.entId))
self.actSphereNode.addSolid(self.actSphere)
self.actSphereNodePath = self.attachNewNode(self.actSphereNode)
self.actSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
self.actSphere.setTangible(0)
self.enterEvent = 'enter' + self.actSphereNode.getName()
self.accept(self.enterEvent, self.__handleToonEnter)
def __handleToonEnter(self, collEntry):
self.sendUpdate('requestJoin', [])
def __setCamera(self):
camHeight = base.localAvatar.getClampedAvatarHeight()
heightScaleFactor = camHeight * 0.3333333333
defLookAt = Point3(0.0, 1.5, camHeight)
cameraPoint = Point3(0.0, -16.0, 16.0)
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopUpdateSmartCamera()
base.localAvatar.stopUpdateSmartCamera()
basePos = self.frame.getPos(render)
modPos = Point3(basePos[0] + 0.0, basePos[1] + 12.0, basePos[2] + 12.0)
camera.setPos(0, 0, 0)
camera.setH(0)
camera.setP(-70)
camera.reparentTo(self.focusPoint)
base.camLens.setFov(60, 46.8265)
self.focusPoint.setPos(0, 12, 27)
self.focusPoint.setH(180)
def acceptJoin(self, time, timeStamp, avIds):
self.timeStart = timeStamp
timePassed = globalClockDelta.localElapsedTime(self.timeStart)
timeleft = time - timePassed
self.timeTotal = time
if localAvatar.doId in avIds and localAvatar.doId not in self.joinedToons:
self.__startGame()
base.cr.playGame.getPlace().fsm.request('stopped')
self.sendUpdate('requestBoard', [0])
if not self.hasEntered:
self.level.countryClub.showInfoText(TTLocalizer.BustACogInstruction)
self.hasEntered = 1
for avId in self.joinedToons:
if avId not in avIds:
self.joinedToons.remove(avId)
self.removeToonHeadPanel(avId)
toon = base.cr.doId2do.get(avId)
if toon:
toon.startSmooth()
for avId in avIds:
if avId and avId not in self.joinedToons:
if avId not in self.everJoinedToons:
self.everJoinedToons.append(avId)
self.joinedToons.append(avId)
index = self.everJoinedToons.index(avId)
if index > 3:
print 'ERROR! green game has had more than 4 players, we are about to crash\n %s' % self.everJoinedToons
print 'Joining Toon is %s index is %s' % (avId, index)
toon = base.cr.doId2do.get(avId)
selfPos = self.getPos(render)
offset = self.toonPoints[index]
if index > 3:
print 'odd... we should have crashed by now'
standPoint = render.getRelativePoint(self, offset)
if toon:
toon.stopSmooth()
self.addToonHeadPanel(toon)
toon.setAnimState('run', 1.0)
animFunc = Func(toon.setAnimState, 'neutral', 1.0)
track = Sequence(LerpPosInterval(toon, 0.75, standPoint), LerpHprInterval(toon, 0.25, Point3(180, 0, 0)), animFunc, Func(self.clearToonTrack, avId), name=toon.uniqueName('gggEnter'), autoPause=1)
track.delayDelete = DelayDelete.DelayDelete(toon, 'GolfGreenGame.acceptJoin')
self.storeToonTrack(avId, track)
track.start()
def signalDone(self, success):
self.finished = 1
self.soundDone.play()
self.__leaveGame()
self.__removeGame()
self.scoreBoard.hide()
self.cleanupTimer()
if success:
self.level.countryClub.showInfoText(TTLocalizer.BustACogSuccess)
else:
self.level.countryClub.showInfoText(TTLocalizer.BustACogFailure)
def boardCleared(self, avId):
self.doFail()
def setTimerStart(self, time, timeStamp):
if self.timer == None:
self.timeStart = timeStamp
timePassed = globalClockDelta.localElapsedTime(self.timeStart)
timeleft = time - timePassed
self.timeTotal = time
self.cleanupTimer()
self.timer = ToontownTimer.ToontownTimer()
self.timer.posBelowTopRightCorner()
self.timer.setTime(timeleft)
self.timer.countdown(timeleft, self.timerExpired)
return
def cleanupTimer(self):
if self.timer:
self.timer.stop()
self.timer.destroy()
self.timer = None
return
def timerExpired(self):
self.cleanupTimer()
def useTime(self, time = None):
if time != None:
self.timeLeft = time
if self.timerTask != None:
taskMgr.remove(self.timerTaskName)
if time != None and time > 0.0 and self.isActive:
self.timerTask = taskMgr.doMethodLater(1.0, self.gameCountDown, self.timerTaskName)
self.scoreLabel['text'] = TTLocalizer.GolfGreenGameScoreString % (self.boardsLeft, int(self.timeLeft))
return
def gameCountDown(self, task):
self.timeLeft = self.timeTotal - globalClockDelta.localElapsedTime(self.timeStart)
return task.done
def scoreData(self, total = 2, closed = 1, scoreList = 'hello world'):
self.boardsLeft = total - closed
for panelIndex in self.toonPanels:
panel = self.toonPanels[panelIndex]
panel.extraData['text'] = TTLocalizer.GolfGreenGamePlayerScore % 0
for entryIndex in range(len(scoreList)):
entry = scoreList[entryIndex]
if self.toonPanels.has_key(entry[0]):
panel = self.toonPanels[entry[0]]
panel.extraData['text'] = TTLocalizer.GolfGreenGamePlayerScore % entry[1]
self.scoreLabel['text'] = TTLocalizer.GolfGreenGameScoreString % self.boardsLeft
def informGag(self, track, level):
self.bonusBoard.show()
self.bonusBoard['text'] = TTLocalizer.GolfGreenGameBonusGag % TTLocalizer.BattleGlobalAvPropStringsSingular[track][level]
iconName = ToontownBattleGlobals.AvPropsNew[track][level]
icon = self.invModel.find('**/%s' % iconName)
self.bonusBoard['image'] = icon
self.bonusBoard['image_scale'] = (1.0, 1, 1.0)
taskMgr.doMethodLater(4.0, self.hideBonusBoard, 'hide bonus')
def helpOthers(self, avId):
if not avId == localAvatar.doId and self.running:
self.giftId = 7
toonName = ''
toon = base.cr.doId2do[avId]
if toon:
toonName = toon.getName()
self.bonusBoard['text'] = TTLocalizer.GolfGreenGameGotHelp % toonName
imageBall = loader.loadModel('phase_12/models/bossbotHQ/bust_a_cog_ball_fire')
imageBall.setHpr(0, 90, 0)
self.bonusBoard['image'] = imageBall
self.bonusBoard['image_scale'] = 0.13
self.bonusBoard.show()
taskMgr.doMethodLater(4.0, self.hideBonusBoard, 'hide bonus')
def hideBonusBoard(self, task):
if self.bonusBoard:
if not self.bonusBoard.isEmpty():
self.bonusBoard.hide()
def storeToonTrack(self, avId, track):
self.clearToonTrack(avId)
self.__toonTracks[avId] = track
def clearToonTrack(self, avId):
oldTrack = self.__toonTracks.get(avId)
if oldTrack:
oldTrack.pause()
if self.__toonTracks.get(avId):
DelayDelete.cleanupDelayDeletes(self.__toonTracks[avId])
del self.__toonTracks[avId]
def clearToonTracks(self):
keyList = []
for key in self.__toonTracks:
keyList.append(key)
for key in keyList:
if self.__toonTracks.has_key(key):
self.clearToonTrack(key) | 0.465387 | 0.147402 |
import io
from PIL import Image, ImageDraw, ImageFont
default_color = 'blue'
highlight_color = 'red'
class DetectionOverlay:
def __init__(self, args):
self.args = args
self.labels_to_highlight = args.labels_to_highlight.split(";")
self.font = ImageFont.truetype("./fonts/OpenSans-Regular.ttf", 12)
def apply_overlay(self, image_bytes, example):
"""Apply annotation overlay over input image.
Args:
image_bytes: JPEG image
example: TF Example - such as via tf.train.Example().ParseFromString(record)
Returns:
image_bytes_with_overlay: JPEG image with annotation overlay.
"""
bboxes = self.get_bbox_tuples(example.features.feature)
image_bytes_with_overlay = self.draw_bboxes(image_bytes, bboxes)
return image_bytes_with_overlay
def get_bbox_tuples(self, feature):
""" From a TF Record Feature, get a list of tuples representing bounding boxes
Args:
feature: TF Record Feature
Returns:
bboxes (list of tuples): [ (label, xmin, xmax, ymin, ymax), (label, xmin, xmax, ymin, ymax) , .. ]
"""
bboxes = []
if self.args.bbox_name_key in feature:
for ibbox, label in enumerate (feature[self.args.bbox_name_key].bytes_list.value):
bboxes.append( (label.decode("utf-8"),
feature[self.args.bbox_xmin_key].float_list.value[ibbox],
feature[self.args.bbox_xmax_key].float_list.value[ibbox],
feature[self.args.bbox_ymin_key].float_list.value[ibbox],
feature[self.args.bbox_ymax_key].float_list.value[ibbox]
) )
else:
print("Bounding box key '%s' not present." % (self.args.bbox_name_key))
return bboxes
def bbox_color(self, label):
if label in self.labels_to_highlight:
return highlight_color
else:
return default_color
def bboxes_to_pixels(self, bbox, im_width, im_height):
"""
Convert bounding box coordinates to pixels.
(It is common that bboxes are parametrized as percentage of image size
instead of pixels.)
Args:
bboxes (tuple): (label, xmin, xmax, ymin, ymax)
im_width (int): image width in pixels
im_height (int): image height in pixels
Returns:
bboxes (tuple): (label, xmin, xmax, ymin, ymax)
"""
if self.args.coordinates_in_pixels:
return bbox
else:
label, xmin, xmax, ymin, ymax = bbox
return [label, xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]
def draw_bboxes(self, image_bytes, bboxes):
"""Draw bounding boxes onto image.
Args:
image_bytes: JPEG image.
bboxes (list of tuples): [ (label, xmin, xmax, ymin, ymax), (label, xmin, xmax, ymin, ymax) , .. ]
Returns:
image_bytes: JPEG image including bounding boxes.
"""
img = Image.open(io.BytesIO(image_bytes))
draw = ImageDraw.Draw(img)
width, height = img.size
for bbox in bboxes:
label, xmin, xmax, ymin, ymax = self.bboxes_to_pixels(bbox, width, height)
draw.rectangle([xmin, ymin, xmax, ymax], outline=self.bbox_color(label))
w, h = self.font.getsize(label)
draw.rectangle((xmin, ymin, xmin + w + 4, ymin + h), fill="white")
draw.text((xmin+4, ymin), label, fill=self.bbox_color(label), font=self.font)
with io.BytesIO() as output:
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.save(output, format="JPEG")
output_image = output.getvalue()
return output_image | overlays/detection_overlay.py | import io
from PIL import Image, ImageDraw, ImageFont
default_color = 'blue'
highlight_color = 'red'
class DetectionOverlay:
def __init__(self, args):
self.args = args
self.labels_to_highlight = args.labels_to_highlight.split(";")
self.font = ImageFont.truetype("./fonts/OpenSans-Regular.ttf", 12)
def apply_overlay(self, image_bytes, example):
"""Apply annotation overlay over input image.
Args:
image_bytes: JPEG image
example: TF Example - such as via tf.train.Example().ParseFromString(record)
Returns:
image_bytes_with_overlay: JPEG image with annotation overlay.
"""
bboxes = self.get_bbox_tuples(example.features.feature)
image_bytes_with_overlay = self.draw_bboxes(image_bytes, bboxes)
return image_bytes_with_overlay
def get_bbox_tuples(self, feature):
""" From a TF Record Feature, get a list of tuples representing bounding boxes
Args:
feature: TF Record Feature
Returns:
bboxes (list of tuples): [ (label, xmin, xmax, ymin, ymax), (label, xmin, xmax, ymin, ymax) , .. ]
"""
bboxes = []
if self.args.bbox_name_key in feature:
for ibbox, label in enumerate (feature[self.args.bbox_name_key].bytes_list.value):
bboxes.append( (label.decode("utf-8"),
feature[self.args.bbox_xmin_key].float_list.value[ibbox],
feature[self.args.bbox_xmax_key].float_list.value[ibbox],
feature[self.args.bbox_ymin_key].float_list.value[ibbox],
feature[self.args.bbox_ymax_key].float_list.value[ibbox]
) )
else:
print("Bounding box key '%s' not present." % (self.args.bbox_name_key))
return bboxes
def bbox_color(self, label):
if label in self.labels_to_highlight:
return highlight_color
else:
return default_color
def bboxes_to_pixels(self, bbox, im_width, im_height):
"""
Convert bounding box coordinates to pixels.
(It is common that bboxes are parametrized as percentage of image size
instead of pixels.)
Args:
bboxes (tuple): (label, xmin, xmax, ymin, ymax)
im_width (int): image width in pixels
im_height (int): image height in pixels
Returns:
bboxes (tuple): (label, xmin, xmax, ymin, ymax)
"""
if self.args.coordinates_in_pixels:
return bbox
else:
label, xmin, xmax, ymin, ymax = bbox
return [label, xmin * im_width, xmax * im_width, ymin * im_height, ymax * im_height]
def draw_bboxes(self, image_bytes, bboxes):
"""Draw bounding boxes onto image.
Args:
image_bytes: JPEG image.
bboxes (list of tuples): [ (label, xmin, xmax, ymin, ymax), (label, xmin, xmax, ymin, ymax) , .. ]
Returns:
image_bytes: JPEG image including bounding boxes.
"""
img = Image.open(io.BytesIO(image_bytes))
draw = ImageDraw.Draw(img)
width, height = img.size
for bbox in bboxes:
label, xmin, xmax, ymin, ymax = self.bboxes_to_pixels(bbox, width, height)
draw.rectangle([xmin, ymin, xmax, ymax], outline=self.bbox_color(label))
w, h = self.font.getsize(label)
draw.rectangle((xmin, ymin, xmin + w + 4, ymin + h), fill="white")
draw.text((xmin+4, ymin), label, fill=self.bbox_color(label), font=self.font)
with io.BytesIO() as output:
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.save(output, format="JPEG")
output_image = output.getvalue()
return output_image | 0.810216 | 0.498596 |
import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') | src/models/modules/layers/weight_init_helper.py | import torch
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
# Copied from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal') | 0.943893 | 0.704899 |
import copy
import logging
from typing import Any, Dict, List, Optional, Type
from kedro.io.core import (
AbstractDataSet,
DataSetAlreadyExistsError,
DataSetError,
DataSetNotFoundError,
generate_current_version,
)
from kedro.io.memory_data_set import MemoryDataSet
CATALOG_KEY = "catalog"
CREDENTIALS_KEY = "credentials"
def _get_credentials(credentials_name: str, credentials: Dict) -> Dict:
"""Return a set of credentials from the provided credentials dict.
Args:
credentials_name: Credentials name.
credentials: A dictionary with all credentials.
Returns:
The set of requested credentials.
Raises:
KeyError: When a data set with the given name has not yet been
registered.
"""
try:
return credentials[credentials_name]
except KeyError:
raise KeyError(
"Unable to find credentials '{}': check your data "
"catalog and credentials configuration. See "
"https://kedro.readthedocs.io/en/latest/kedro.io.DataCatalog.html "
"for an example.".format(credentials_name)
)
class DataCatalog:
"""``DataCatalog`` stores instances of ``AbstractDataSet`` implementations
to provide ``load`` and ``save`` capabilities from anywhere in the
program. To use a ``DataCatalog``, you need to instantiate it with
a dictionary of data sets. Then it will act as a single point of reference
for your calls, relaying load and save functions
to the underlying data sets.
"""
def __init__(
self,
data_sets: Dict[str, AbstractDataSet] = None,
feed_dict: Dict[str, Any] = None,
) -> None:
"""``DataCatalog`` stores instances of ``AbstractDataSet``
implementations to provide ``load`` and ``save`` capabilities from
anywhere in the program. To use a ``DataCatalog``, you need to
instantiate it with a dictionary of data sets. Then it will act as a
single point of reference for your calls, relaying load and save
functions to the underlying data sets.
Args:
data_sets: A dictionary of data set names and data set instances.
feed_dict: A feed dict with data to be added in memory.
Example:
::
>>> from kedro.io import CSVLocalDataSet
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
"""
self._data_sets = data_sets or {}
if feed_dict:
self.add_feed_dict(feed_dict)
@property
def _logger(self):
return logging.getLogger(__name__)
@classmethod
def from_config(
cls: Type,
catalog: Optional[Dict[str, Dict[str, Any]]],
credentials: Dict[str, Dict[str, Any]] = None,
load_versions: Dict[str, str] = None,
save_version: str = None,
) -> "DataCatalog":
"""Create a ``DataCatalog`` instance from configuration. This is a
factory method used to provide developers with a way to instantiate
``DataCatalog`` with configuration parsed from configuration files.
Args:
catalog: A dictionary whose keys are the data set names and
the values are dictionaries with the constructor arguments
for classes implementing ``AbstractDataSet``. The data set
class to be loaded is specified with the key ``type`` and their
fully qualified class name. All ``kedro.io`` data set can be
specified by their class name only, i.e. their module name
can be omitted.
credentials: A dictionary containing credentials for different
data sets. Use the ``credentials`` key in a ``AbstractDataSet``
to refer to the appropriate credentials as shown in the example
below.
load_versions: A mapping between dataset names and versions
to load. Has no effect on data sets without enabled versioning.
save_version: Version string to be used for ``save`` operations
by all data sets with enabled versioning. It must: a) be a
case-insensitive string that conforms with operating system
filename limitations, b) always return the latest version when
sorted in lexicographical order.
Returns:
An instantiated ``DataCatalog`` containing all specified
data sets, created and ready to use.
Raises:
DataSetError: When the method fails to create any of the data
sets from their config.
Example:
::
>>> config = {
>>> "cars": {
>>> "type": "CSVLocalDataSet",
>>> "filepath": "cars.csv",
>>> "save_args": {
>>> "index": False
>>> }
>>> },
>>> "boats": {
>>> "type": "CSVS3DataSet",
>>> "filepath": "boats.csv",
>>> "bucket_name": "mck-147789798-bucket",
>>> "credentials": "boats_credentials"
>>> "save_args": {
>>> "index": False
>>> }
>>> }
>>> }
>>>
>>> credentials = {
>>> "boats_credentials": {
>>> "aws_access_key_id": "<your key id>",
>>> "aws_secret_access_key": "<your secret>"
>>> }
>>> }
>>>
>>> catalog = DataCatalog.from_config(config, credentials)
>>>
>>> df = catalog.load("cars")
>>> catalog.save("boats", df)
"""
data_sets = {}
catalog = copy.deepcopy(catalog) or {}
credentials = copy.deepcopy(credentials) or {}
save_version = save_version or generate_current_version()
load_versions = copy.deepcopy(load_versions) or {}
for ds_name, ds_config in catalog.items():
if "type" not in ds_config:
raise DataSetError(
"`type` is missing from DataSet '{}' "
"catalog configuration".format(ds_name)
)
if CREDENTIALS_KEY in ds_config:
ds_config[CREDENTIALS_KEY] = _get_credentials(
ds_config.pop(CREDENTIALS_KEY), credentials # credentials name
)
data_sets[ds_name] = AbstractDataSet.from_config(
ds_name, ds_config, load_versions.get(ds_name), save_version
)
return cls(data_sets=data_sets)
def load(self, name: str) -> Any:
"""Loads a registered data set.
Args:
name: A data set to be loaded.
Returns:
The loaded data as configured.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet, DataCatalog
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
>>>
>>> df = io.load("cars")
"""
if name in self._data_sets:
self._logger.info(
"Loading data from `%s` (%s)...",
name,
type(self._data_sets[name]).__name__,
)
return self._data_sets[name].load()
raise DataSetNotFoundError("DataSet '{}' not found in the catalog".format(name))
def save(self, name: str, data: Any) -> None:
"""Save data to a registered data set.
Args:
name: A data set to be saved to.
data: A data object to be saved as configured in the registered
data set.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
Example:
::
>>> import pandas as pd
>>>
>>> from kedro.io import CSVLocalDataSet
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
>>>
>>> df = pd.DataFrame({'col1': [1, 2],
>>> 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>> io.save("cars", df)
"""
if name in self._data_sets:
self._logger.info(
"Saving data to `%s` (%s)...",
name,
type(self._data_sets[name]).__name__,
)
self._data_sets[name].save(data)
else:
raise DataSetNotFoundError(
"DataSet '{}' not found in the catalog".format(name)
)
def exists(self, name: str) -> bool:
"""Checks whether registered data set exists by calling its `exists()`
method. Raises a warning and returns False if `exists()` is not
implemented.
Args:
name: A data set to be checked.
Returns:
Whether the data set output exists.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
"""
if name in self._data_sets:
data_set = self._data_sets[name]
if hasattr(data_set, "exists"):
return data_set.exists()
self._logger.warning(
"`exists()` not implemented for `%s`. "
"Assuming output does not exist.",
name,
)
return False
raise DataSetNotFoundError("DataSet '{}' not found in the catalog".format(name))
def add(
self, data_set_name: str, data_set: AbstractDataSet, replace: bool = False
) -> None:
"""Adds a new ``AbstractDataSet`` object to the ``DataCatalog``.
Args:
data_set_name: A unique data set name which has not been
registered yet.
data_set: A data set object to be associated with the given data
set name.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Raises:
DataSetAlreadyExistsError: When a data set with the same name
has already been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet
>>>
>>> io = DataCatalog(data_sets={
>>> 'cars': CSVLocalDataSet(filepath="cars.csv")
>>> })
>>>
>>> io.add("boats", CSVLocalDataSet(filepath="boats.csv"))
"""
if data_set_name in self._data_sets:
if replace:
self._logger.warning("Replacing DataSet '%s'", data_set_name)
else:
raise DataSetAlreadyExistsError(
"DataSet '{}' has already been registered".format(data_set_name)
)
self._data_sets[data_set_name] = data_set
def add_all(
self, data_sets: Dict[str, AbstractDataSet], replace: bool = False
) -> None:
"""Adds a group of new data sets to the ``DataCatalog``.
Args:
data_sets: A dictionary of ``DataSet`` names and data set
instances.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Raises:
DataSetAlreadyExistsError: When a data set with the same name
has already been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet, ParquetLocalDataSet
>>>
>>> io = DataCatalog(data_sets={
>>> "cars": CSVLocalDataSet(filepath="cars.csv")
>>> })
>>> additional = {
>>> "planes": ParquetLocalDataSet("planes.parq"),
>>> "boats": CSVLocalDataSet(filepath="boats.csv")
>>> }
>>>
>>> io.add_all(additional)
>>>
>>> assert io.list() == ["cars", "planes", "boats"]
"""
for name, data_set in data_sets.items():
self.add(name, data_set, replace)
def add_feed_dict(self, feed_dict: Dict[str, Any], replace: bool = False) -> None:
"""Adds instances of ``MemoryDataSet``, containing the data provided
through feed_dict.
Args:
feed_dict: A feed dict with data to be added in memory.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Example:
::
>>> import pandas as pd
>>>
>>> df = pd.DataFrame({'col1': [1, 2],
>>> 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>>
>>> io = DataCatalog()
>>> io.add_feed_dict({
>>> 'data': df
>>> }, replace=True)
>>>
>>> assert io.load("data").equals(df)
"""
for data_set_name in feed_dict:
if isinstance(feed_dict[data_set_name], AbstractDataSet):
data_set = feed_dict[data_set_name]
else:
data_set = MemoryDataSet(data=feed_dict[data_set_name])
self.add(data_set_name, data_set, replace)
def list(self) -> List[str]:
"""List of ``DataSet`` names registered in the catalog.
Returns:
A List of ``DataSet`` names, corresponding to the entries that are
registered in the current catalog object.
"""
return list(self._data_sets.keys())
def shallow_copy(self) -> "DataCatalog":
"""Returns a shallow copy of the current object.
Returns:
Copy of the current object.
"""
return DataCatalog({**self._data_sets})
def __eq__(self, other):
return self._data_sets == other._data_sets # pylint: disable=protected-access | kedro/io/data_catalog.py | import copy
import logging
from typing import Any, Dict, List, Optional, Type
from kedro.io.core import (
AbstractDataSet,
DataSetAlreadyExistsError,
DataSetError,
DataSetNotFoundError,
generate_current_version,
)
from kedro.io.memory_data_set import MemoryDataSet
CATALOG_KEY = "catalog"
CREDENTIALS_KEY = "credentials"
def _get_credentials(credentials_name: str, credentials: Dict) -> Dict:
"""Return a set of credentials from the provided credentials dict.
Args:
credentials_name: Credentials name.
credentials: A dictionary with all credentials.
Returns:
The set of requested credentials.
Raises:
KeyError: When a data set with the given name has not yet been
registered.
"""
try:
return credentials[credentials_name]
except KeyError:
raise KeyError(
"Unable to find credentials '{}': check your data "
"catalog and credentials configuration. See "
"https://kedro.readthedocs.io/en/latest/kedro.io.DataCatalog.html "
"for an example.".format(credentials_name)
)
class DataCatalog:
"""``DataCatalog`` stores instances of ``AbstractDataSet`` implementations
to provide ``load`` and ``save`` capabilities from anywhere in the
program. To use a ``DataCatalog``, you need to instantiate it with
a dictionary of data sets. Then it will act as a single point of reference
for your calls, relaying load and save functions
to the underlying data sets.
"""
def __init__(
self,
data_sets: Dict[str, AbstractDataSet] = None,
feed_dict: Dict[str, Any] = None,
) -> None:
"""``DataCatalog`` stores instances of ``AbstractDataSet``
implementations to provide ``load`` and ``save`` capabilities from
anywhere in the program. To use a ``DataCatalog``, you need to
instantiate it with a dictionary of data sets. Then it will act as a
single point of reference for your calls, relaying load and save
functions to the underlying data sets.
Args:
data_sets: A dictionary of data set names and data set instances.
feed_dict: A feed dict with data to be added in memory.
Example:
::
>>> from kedro.io import CSVLocalDataSet
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
"""
self._data_sets = data_sets or {}
if feed_dict:
self.add_feed_dict(feed_dict)
@property
def _logger(self):
return logging.getLogger(__name__)
@classmethod
def from_config(
cls: Type,
catalog: Optional[Dict[str, Dict[str, Any]]],
credentials: Dict[str, Dict[str, Any]] = None,
load_versions: Dict[str, str] = None,
save_version: str = None,
) -> "DataCatalog":
"""Create a ``DataCatalog`` instance from configuration. This is a
factory method used to provide developers with a way to instantiate
``DataCatalog`` with configuration parsed from configuration files.
Args:
catalog: A dictionary whose keys are the data set names and
the values are dictionaries with the constructor arguments
for classes implementing ``AbstractDataSet``. The data set
class to be loaded is specified with the key ``type`` and their
fully qualified class name. All ``kedro.io`` data set can be
specified by their class name only, i.e. their module name
can be omitted.
credentials: A dictionary containing credentials for different
data sets. Use the ``credentials`` key in a ``AbstractDataSet``
to refer to the appropriate credentials as shown in the example
below.
load_versions: A mapping between dataset names and versions
to load. Has no effect on data sets without enabled versioning.
save_version: Version string to be used for ``save`` operations
by all data sets with enabled versioning. It must: a) be a
case-insensitive string that conforms with operating system
filename limitations, b) always return the latest version when
sorted in lexicographical order.
Returns:
An instantiated ``DataCatalog`` containing all specified
data sets, created and ready to use.
Raises:
DataSetError: When the method fails to create any of the data
sets from their config.
Example:
::
>>> config = {
>>> "cars": {
>>> "type": "CSVLocalDataSet",
>>> "filepath": "cars.csv",
>>> "save_args": {
>>> "index": False
>>> }
>>> },
>>> "boats": {
>>> "type": "CSVS3DataSet",
>>> "filepath": "boats.csv",
>>> "bucket_name": "mck-147789798-bucket",
>>> "credentials": "boats_credentials"
>>> "save_args": {
>>> "index": False
>>> }
>>> }
>>> }
>>>
>>> credentials = {
>>> "boats_credentials": {
>>> "aws_access_key_id": "<your key id>",
>>> "aws_secret_access_key": "<your secret>"
>>> }
>>> }
>>>
>>> catalog = DataCatalog.from_config(config, credentials)
>>>
>>> df = catalog.load("cars")
>>> catalog.save("boats", df)
"""
data_sets = {}
catalog = copy.deepcopy(catalog) or {}
credentials = copy.deepcopy(credentials) or {}
save_version = save_version or generate_current_version()
load_versions = copy.deepcopy(load_versions) or {}
for ds_name, ds_config in catalog.items():
if "type" not in ds_config:
raise DataSetError(
"`type` is missing from DataSet '{}' "
"catalog configuration".format(ds_name)
)
if CREDENTIALS_KEY in ds_config:
ds_config[CREDENTIALS_KEY] = _get_credentials(
ds_config.pop(CREDENTIALS_KEY), credentials # credentials name
)
data_sets[ds_name] = AbstractDataSet.from_config(
ds_name, ds_config, load_versions.get(ds_name), save_version
)
return cls(data_sets=data_sets)
def load(self, name: str) -> Any:
"""Loads a registered data set.
Args:
name: A data set to be loaded.
Returns:
The loaded data as configured.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet, DataCatalog
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
>>>
>>> df = io.load("cars")
"""
if name in self._data_sets:
self._logger.info(
"Loading data from `%s` (%s)...",
name,
type(self._data_sets[name]).__name__,
)
return self._data_sets[name].load()
raise DataSetNotFoundError("DataSet '{}' not found in the catalog".format(name))
def save(self, name: str, data: Any) -> None:
"""Save data to a registered data set.
Args:
name: A data set to be saved to.
data: A data object to be saved as configured in the registered
data set.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
Example:
::
>>> import pandas as pd
>>>
>>> from kedro.io import CSVLocalDataSet
>>>
>>> cars = CSVLocalDataSet(filepath="cars.csv",
>>> load_args=None,
>>> save_args={"index": False})
>>> io = DataCatalog(data_sets={'cars': cars})
>>>
>>> df = pd.DataFrame({'col1': [1, 2],
>>> 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>> io.save("cars", df)
"""
if name in self._data_sets:
self._logger.info(
"Saving data to `%s` (%s)...",
name,
type(self._data_sets[name]).__name__,
)
self._data_sets[name].save(data)
else:
raise DataSetNotFoundError(
"DataSet '{}' not found in the catalog".format(name)
)
def exists(self, name: str) -> bool:
"""Checks whether registered data set exists by calling its `exists()`
method. Raises a warning and returns False if `exists()` is not
implemented.
Args:
name: A data set to be checked.
Returns:
Whether the data set output exists.
Raises:
DataSetNotFoundError: When a data set with the given name
has not yet been registered.
"""
if name in self._data_sets:
data_set = self._data_sets[name]
if hasattr(data_set, "exists"):
return data_set.exists()
self._logger.warning(
"`exists()` not implemented for `%s`. "
"Assuming output does not exist.",
name,
)
return False
raise DataSetNotFoundError("DataSet '{}' not found in the catalog".format(name))
def add(
self, data_set_name: str, data_set: AbstractDataSet, replace: bool = False
) -> None:
"""Adds a new ``AbstractDataSet`` object to the ``DataCatalog``.
Args:
data_set_name: A unique data set name which has not been
registered yet.
data_set: A data set object to be associated with the given data
set name.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Raises:
DataSetAlreadyExistsError: When a data set with the same name
has already been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet
>>>
>>> io = DataCatalog(data_sets={
>>> 'cars': CSVLocalDataSet(filepath="cars.csv")
>>> })
>>>
>>> io.add("boats", CSVLocalDataSet(filepath="boats.csv"))
"""
if data_set_name in self._data_sets:
if replace:
self._logger.warning("Replacing DataSet '%s'", data_set_name)
else:
raise DataSetAlreadyExistsError(
"DataSet '{}' has already been registered".format(data_set_name)
)
self._data_sets[data_set_name] = data_set
def add_all(
self, data_sets: Dict[str, AbstractDataSet], replace: bool = False
) -> None:
"""Adds a group of new data sets to the ``DataCatalog``.
Args:
data_sets: A dictionary of ``DataSet`` names and data set
instances.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Raises:
DataSetAlreadyExistsError: When a data set with the same name
has already been registered.
Example:
::
>>> from kedro.io import CSVLocalDataSet, ParquetLocalDataSet
>>>
>>> io = DataCatalog(data_sets={
>>> "cars": CSVLocalDataSet(filepath="cars.csv")
>>> })
>>> additional = {
>>> "planes": ParquetLocalDataSet("planes.parq"),
>>> "boats": CSVLocalDataSet(filepath="boats.csv")
>>> }
>>>
>>> io.add_all(additional)
>>>
>>> assert io.list() == ["cars", "planes", "boats"]
"""
for name, data_set in data_sets.items():
self.add(name, data_set, replace)
def add_feed_dict(self, feed_dict: Dict[str, Any], replace: bool = False) -> None:
"""Adds instances of ``MemoryDataSet``, containing the data provided
through feed_dict.
Args:
feed_dict: A feed dict with data to be added in memory.
replace: Specifies whether to replace an existing ``DataSet``
with the same name is allowed.
Example:
::
>>> import pandas as pd
>>>
>>> df = pd.DataFrame({'col1': [1, 2],
>>> 'col2': [4, 5],
>>> 'col3': [5, 6]})
>>>
>>> io = DataCatalog()
>>> io.add_feed_dict({
>>> 'data': df
>>> }, replace=True)
>>>
>>> assert io.load("data").equals(df)
"""
for data_set_name in feed_dict:
if isinstance(feed_dict[data_set_name], AbstractDataSet):
data_set = feed_dict[data_set_name]
else:
data_set = MemoryDataSet(data=feed_dict[data_set_name])
self.add(data_set_name, data_set, replace)
def list(self) -> List[str]:
"""List of ``DataSet`` names registered in the catalog.
Returns:
A List of ``DataSet`` names, corresponding to the entries that are
registered in the current catalog object.
"""
return list(self._data_sets.keys())
def shallow_copy(self) -> "DataCatalog":
"""Returns a shallow copy of the current object.
Returns:
Copy of the current object.
"""
return DataCatalog({**self._data_sets})
def __eq__(self, other):
return self._data_sets == other._data_sets # pylint: disable=protected-access | 0.904979 | 0.48377 |
import os
import sys
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
if sys.version_info.major == 2:
# python2
import ConfigParser as configparser
else:
# python3
import configparser
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-timeout',
action='store',
help='Set timeout for connecting to TestRail server')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-testrun-description',
action='store',
default=None,
help='Description given to testrun, that appears in TestRail (config file: description in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail (config file: plan_id in TESTRUN section). If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun')
group.addoption(
'--tr-milestone-id',
action='store',
default=None,
required=False,
help='Identifier of milestone, to be used in run creation (config file: milestone_id in TESTRUN section)'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'),
timeout=config_manager.getoption('tr-timeout', 'timeout', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN',
is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True,
default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
tr_description=config_manager.getoption('tr-testrun-description', 'description', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id', 'plan_id', 'TESTRUN'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
milestone_id=config_manager.getoption('tr-milestone-id', 'milestone_id', 'TESTRUN')
),
# Name of plugin instance (allow to be used by other plugins)
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
'''
Handles retrieving configuration values. Config options set in flags are given preferance over options set in the
config file.
:param cfg_file_path: Path to the config file containing information about the TestRail server.
:type cfg_file_path: str or None
:param config: Config object containing commandline flag options.
:type config: _pytest.config.Config
'''
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
# priority: cli > config file > default
# 1. return cli option (if set)
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
# 2. return default if not config file path is specified
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
# 3. return config file value
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
# 4. if entry not found in config file
return default | pytest_testrail/conftest.py | import os
import sys
from .plugin import PyTestRailPlugin
from .testrail_api import APIClient
if sys.version_info.major == 2:
# python2
import ConfigParser as configparser
else:
# python3
import configparser
def pytest_addoption(parser):
group = parser.getgroup('testrail')
group.addoption(
'--testrail',
action='store_true',
help='Create and update testruns with TestRail')
group.addoption(
'--tr-config',
action='store',
default='testrail.cfg',
help='Path to the config file containing information about the TestRail server (defaults to testrail.cfg)')
group.addoption(
'--tr-url',
action='store',
help='TestRail address you use to access TestRail with your web browser (config file: url in API section)')
group.addoption(
'--tr-email',
action='store',
help='Email for the account on the TestRail server (config file: email in API section)')
group.addoption(
'--tr-password',
action='store',
help='Password for the account on the TestRail server (config file: password in API section)')
group.addoption(
'--tr-timeout',
action='store',
help='Set timeout for connecting to TestRail server')
group.addoption(
'--tr-testrun-assignedto-id',
action='store',
help='ID of the user assigned to the test run (config file: assignedto_id in TESTRUN section)')
group.addoption(
'--tr-testrun-project-id',
action='store',
help='ID of the project the test run is in (config file: project_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-id',
action='store',
help='ID of the test suite containing the test cases (config file: suite_id in TESTRUN section)')
group.addoption(
'--tr-testrun-suite-include-all',
action='store_true',
default=None,
help='Include all test cases in specified test suite when creating test run (config file: include_all in TESTRUN section)')
group.addoption(
'--tr-testrun-name',
action='store',
default=None,
help='Name given to testrun, that appears in TestRail (config file: name in TESTRUN section)')
group.addoption(
'--tr-testrun-description',
action='store',
default=None,
help='Description given to testrun, that appears in TestRail (config file: description in TESTRUN section)')
group.addoption(
'--tr-run-id',
action='store',
default=0,
required=False,
help='Identifier of testrun, that appears in TestRail. If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-plan-id',
action='store',
default=0,
required=False,
help='Identifier of testplan, that appears in TestRail (config file: plan_id in TESTRUN section). If provided, option "--tr-testrun-name" will be ignored')
group.addoption(
'--tr-version',
action='store',
default='',
required=False,
help='Indicate a version in Test Case result')
group.addoption(
'--tr-no-ssl-cert-check',
action='store_false',
default=None,
help='Do not check for valid SSL certificate on TestRail host')
group.addoption(
'--tr-close-on-complete',
action='store_true',
default=False,
required=False,
help='Close a test run on completion')
group.addoption(
'--tr-dont-publish-blocked',
action='store_false',
required=False,
help='Determine if results of "blocked" testcases (in TestRail) are published or not')
group.addoption(
'--tr-skip-missing',
action='store_true',
required=False,
help='Skip test cases that are not present in testrun')
group.addoption(
'--tr-milestone-id',
action='store',
default=None,
required=False,
help='Identifier of milestone, to be used in run creation (config file: milestone_id in TESTRUN section)'
)
def pytest_configure(config):
if config.getoption('--testrail'):
cfg_file_path = config.getoption('--tr-config')
config_manager = ConfigManager(cfg_file_path, config)
client = APIClient(config_manager.getoption('tr-url', 'url', 'API'),
config_manager.getoption('tr-email', 'email', 'API'),
config_manager.getoption('tr-password', 'password', 'API'),
timeout=config_manager.getoption('tr-timeout', 'timeout', 'API'))
config.pluginmanager.register(
PyTestRailPlugin(
client=client,
assign_user_id=config_manager.getoption('tr-testrun-assignedto-id', 'assignedto_id', 'TESTRUN'),
project_id=config_manager.getoption('tr-testrun-project-id', 'project_id', 'TESTRUN'),
suite_id=config_manager.getoption('tr-testrun-suite-id', 'suite_id', 'TESTRUN'),
include_all=config_manager.getoption('tr-testrun-suite-include-all', 'include_all', 'TESTRUN',
is_bool=True, default=False),
cert_check=config_manager.getoption('tr-no-ssl-cert-check', 'no_ssl_cert_check', 'API', is_bool=True,
default=True),
tr_name=config_manager.getoption('tr-testrun-name', 'name', 'TESTRUN'),
tr_description=config_manager.getoption('tr-testrun-description', 'description', 'TESTRUN'),
run_id=config.getoption('--tr-run-id'),
plan_id=config.getoption('--tr-plan-id', 'plan_id', 'TESTRUN'),
version=config.getoption('--tr-version'),
close_on_complete=config.getoption('--tr-close-on-complete'),
publish_blocked=config.getoption('--tr-dont-publish-blocked'),
skip_missing=config.getoption('--tr-skip-missing'),
milestone_id=config_manager.getoption('tr-milestone-id', 'milestone_id', 'TESTRUN')
),
# Name of plugin instance (allow to be used by other plugins)
name="pytest-testrail-instance"
)
class ConfigManager(object):
def __init__(self, cfg_file_path, config):
'''
Handles retrieving configuration values. Config options set in flags are given preferance over options set in the
config file.
:param cfg_file_path: Path to the config file containing information about the TestRail server.
:type cfg_file_path: str or None
:param config: Config object containing commandline flag options.
:type config: _pytest.config.Config
'''
self.cfg_file = None
if os.path.isfile(cfg_file_path) or os.path.islink(cfg_file_path):
self.cfg_file = configparser.ConfigParser()
self.cfg_file.read(cfg_file_path)
self.config = config
def getoption(self, flag, cfg_name, section=None, is_bool=False, default=None):
# priority: cli > config file > default
# 1. return cli option (if set)
value = self.config.getoption('--{}'.format(flag))
if value is not None:
return value
# 2. return default if not config file path is specified
if section is None or self.cfg_file is None:
return default
if self.cfg_file.has_option(section, cfg_name):
# 3. return config file value
return self.cfg_file.getboolean(section, cfg_name) if is_bool else self.cfg_file.get(section, cfg_name)
else:
# 4. if entry not found in config file
return default | 0.287168 | 0.116966 |
def set_spines(ax, visible=False, spine_set=None):
if spine_set is None:
spine_set = ['left', 'right', 'top', 'bottom']
for s in spine_set:
ax.spines[s].set_visible(visible)
def no_ticks(ax, no_x=True, no_y=True):
if no_x:
ax.xaxis.set_ticks_position('none')
if no_y:
ax.yaxis.set_ticks_position('none')
def clean_axis(ax, right=True, left=False, top=True, bottom=False, allx=False):
if right or allx:
ax.spines['right'].set_visible(False)
if left or allx:
ax.spines['left'].set_visible(False)
ax.set_yticks([])
if top or allx:
ax.spines['top'].set_visible(False)
if bottom or allx:
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def set_ticks_outer(ax, x=True, y=True):
if y:
ax.get_yaxis().set_tick_params(which='both', direction='out')
if x:
ax.get_xaxis().set_tick_params(which='both', direction='out')
def set_common_range(axes, x=False, y=True, setmax=True, setmin=True):
if y:
y_max = None
y_min = None
for ax in axes:
y_min = ax.get_ylim()[0] if y_min is None else min(y_min, ax.get_ylim()[0])
y_max = max(y_max, ax.get_ylim()[1])
for ax in axes:
if setmin and setmax:
ax.set_ylim([y_min, y_max])
elif setmin:
ax.set_ylim([y_min, ax.get_ylim()[1]])
elif setmax:
ax.set_ylim([ax.get_ylim()[0], y_max])
if x:
x_max = None
x_min = None
for ax in axes:
x_min = ax.get_xlim()[0] if x_min is None else min(x_min, ax.get_xlim()[0])
x_max = max(x_max, ax.get_xlim()[1])
for ax in axes:
if setmin and setmax:
ax.set_xlim([x_min, x_max])
elif setmin:
ax.set_xlim([x_min, ax.get_xlim()[1]])
elif setmax:
ax.set_xlim([ax.get_xlim()[0], x_max])
def label_bars(ax, rects, labels, rotation=0, fontsize=10):
assert len(labels) == len(rects)
for i, r in enumerate(rects):
#height = r.get_height()
height = ax.get_ylim()[1] * 0.5
ax.text(r.get_x() + r.get_width() / 2,
1.05 * height,
'%s' % str(labels[i]),
ha='center',
va='bottom',
rotation=rotation,
fontsize=fontsize ) | spladder/viz/axes.py | def set_spines(ax, visible=False, spine_set=None):
if spine_set is None:
spine_set = ['left', 'right', 'top', 'bottom']
for s in spine_set:
ax.spines[s].set_visible(visible)
def no_ticks(ax, no_x=True, no_y=True):
if no_x:
ax.xaxis.set_ticks_position('none')
if no_y:
ax.yaxis.set_ticks_position('none')
def clean_axis(ax, right=True, left=False, top=True, bottom=False, allx=False):
if right or allx:
ax.spines['right'].set_visible(False)
if left or allx:
ax.spines['left'].set_visible(False)
ax.set_yticks([])
if top or allx:
ax.spines['top'].set_visible(False)
if bottom or allx:
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def set_ticks_outer(ax, x=True, y=True):
if y:
ax.get_yaxis().set_tick_params(which='both', direction='out')
if x:
ax.get_xaxis().set_tick_params(which='both', direction='out')
def set_common_range(axes, x=False, y=True, setmax=True, setmin=True):
if y:
y_max = None
y_min = None
for ax in axes:
y_min = ax.get_ylim()[0] if y_min is None else min(y_min, ax.get_ylim()[0])
y_max = max(y_max, ax.get_ylim()[1])
for ax in axes:
if setmin and setmax:
ax.set_ylim([y_min, y_max])
elif setmin:
ax.set_ylim([y_min, ax.get_ylim()[1]])
elif setmax:
ax.set_ylim([ax.get_ylim()[0], y_max])
if x:
x_max = None
x_min = None
for ax in axes:
x_min = ax.get_xlim()[0] if x_min is None else min(x_min, ax.get_xlim()[0])
x_max = max(x_max, ax.get_xlim()[1])
for ax in axes:
if setmin and setmax:
ax.set_xlim([x_min, x_max])
elif setmin:
ax.set_xlim([x_min, ax.get_xlim()[1]])
elif setmax:
ax.set_xlim([ax.get_xlim()[0], x_max])
def label_bars(ax, rects, labels, rotation=0, fontsize=10):
assert len(labels) == len(rects)
for i, r in enumerate(rects):
#height = r.get_height()
height = ax.get_ylim()[1] * 0.5
ax.text(r.get_x() + r.get_width() / 2,
1.05 * height,
'%s' % str(labels[i]),
ha='center',
va='bottom',
rotation=rotation,
fontsize=fontsize ) | 0.477798 | 0.368406 |
from __future__ import print_function
import os
import pytest
import pandas as pd
import numpy as np
from lifelines.estimation import NelsonAalenFitter, KaplanMeierFitter, AalenAdditiveFitter,\
CoxPHFitter, CoxTimeVaryingFitter
from lifelines.generate_datasets import generate_random_lifetimes, generate_hazard_rates
from lifelines.plotting import plot_lifetimes
from lifelines.datasets import load_waltons, load_regression_dataset, load_lcd,\
load_panel_test, load_stanford_heart_transplants
from lifelines.generate_datasets import cumulative_integral
@pytest.mark.plottest
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
class TestPlotting():
@pytest.fixture
def kmf(self):
return KaplanMeierFitter()
def setup_method(self, method):
pytest.importorskip("matplotlib")
from matplotlib import pyplot as plt
self.plt = plt
def test_negative_times_still_plots(self, block, kmf):
n = 40
T = np.linspace(-2, 3, n)
C = np.random.randint(2, size=n)
kmf.fit(T, C)
ax = kmf.plot()
self.plt.title('test_negative_times_still_plots')
self.plt.show(block=block)
return
def test_kmf_plotting(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
data2 = np.random.exponential(2, size=(200, 1))
data3 = np.random.exponential(4, size=(500, 1))
kmf.fit(data1, label='test label 1')
ax = kmf.plot()
kmf.fit(data2, label='test label 2')
kmf.plot(ax=ax)
kmf.fit(data3, label='test label 3')
kmf.plot(ax=ax)
self.plt.title("test_kmf_plotting")
self.plt.show(block=block)
return
def test_kmf_with_risk_counts(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
kmf.fit(data1)
kmf.plot(at_risk_counts=True)
self.plt.title("test_kmf_with_risk_counts")
self.plt.show(block=block)
def test_naf_plotting_with_custom_colours(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(500))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(color="r")
naf.fit(data2)
naf.plot(ax=ax, c="k")
self.plt.title('test_naf_plotting_with_custom_coloirs')
self.plt.show(block=block)
return
def test_aalen_additive_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline)
C = np.random.binomial(1, 1., size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.plot(iloc=slice(0, aaf.cumulative_hazards_.shape[0] - 100))
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_plot')
self.plt.show(block=block)
return
def test_aalen_additive_smoothed_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 150, 5000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline) + 0.1 * np.random.uniform(size=(n, 1))
C = np.random.binomial(1, 0.8, size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.smoothed_hazards_(1).iloc[0:aaf.cumulative_hazards_.shape[0] - 500].plot()
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_smoothed_plot')
self.plt.show(block=block)
return
def test_naf_plotting_slice(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(loc=slice(0, None))
naf.fit(data2)
naf.plot(ax=ax, ci_force_lines=True, iloc=slice(100, 180))
self.plt.title('test_naf_plotting_slice')
self.plt.show(block=block)
return
def test_plot_lifetimes_calendar(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
current = 10
birthtimes = current * np.random.uniform(size=(N,))
T, C = generate_random_lifetimes(hz, t, size=N, censor=current - birthtimes)
plot_lifetimes(T, event_observed=C, birthtimes=birthtimes, block=block)
def test_plot_lifetimes_relative(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
T, C = generate_random_lifetimes(hz, t, size=N, censor=True)
plot_lifetimes(T, event_observed=C, block=block)
def test_naf_plot_cumulative_hazard(self, block):
data1 = np.random.exponential(5, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot()
naf.plot_cumulative_hazard(ax=ax, ci_force_lines=True)
self.plt.title("I should have plotted the same thing, but different styles + color!")
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwidth_2(self, block):
data1 = np.random.exponential(5, size=(2000, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=1., loc=slice(0, 7.))
self.plt.title('test_naf_plot_cumulative_hazard_bandwidth_2')
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwith_1(self, block):
data1 = np.random.exponential(5, size=(2000, 1)) ** 2
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=5., iloc=slice(0, 1700))
self.plt.title('test_naf_plot_cumulative_hazard_bandwith_1')
self.plt.show(block=block)
return
def test_show_censor_with_discrete_date(self, block, kmf):
T = np.random.binomial(20, 0.1, size=100)
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_discrete_date')
self.plt.show(block=block)
return
def test_show_censor_with_index_0(self, block, kmf):
T = np.random.binomial(20, 0.9, size=100) # lifelines should auto put a 0 in.
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_index_0')
self.plt.show(block=block)
return
def test_flat_style_with_customer_censor_styles(self, block, kmf):
data1 = np.random.exponential(10, size=200)
kmf.fit(data1, label='test label 1')
kmf.plot(ci_force_lines=True, show_censors=True,
censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
self.plt.title('test_flat_style_no_censor')
self.plt.show(block=block)
return
def test_loglogs_plot(self, block, kmf):
data1 = np.random.exponential(10, size=200)
data2 = np.random.exponential(5, size=200)
kmf.fit(data1, label='test label 1')
ax = kmf.plot_loglogs()
kmf.fit(data2, label='test label 2')
ax = kmf.plot_loglogs(ax=ax)
self.plt.title('test_loglogs_plot')
self.plt.show(block=block)
return
def test_seaborn_doesnt_cause_kmf_plot_error(self, block, kmf, capsys):
import seaborn as sns
df = load_waltons()
T = df['T']
E = df['E']
kmf = KaplanMeierFitter()
kmf.fit(T, event_observed=E)
kmf.plot()
self.plt.title('test_seaborn_doesnt_cause_kmf_plot_error')
self.plt.show(block=block)
_, err = capsys.readouterr()
assert err == ""
def test_coxph_plotting(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot()
self.plt.title('test_coxph_plotting')
self.plt.show(block=block)
def test_coxph_plotting_with_subset_of_columns(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(columns=['var1', 'var2'])
self.plt.title('test_coxph_plotting_with_subset_of_columns')
self.plt.show(block=block)
def test_coxph_plotting_with_subset_of_columns_and_standardized(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(True, columns=['var1', 'var2'])
self.plt.title('test_coxph_plotting_with_subset_of_columns_and_standardized')
self.plt.show(block=block)
def test_coxph_plotting_normalized(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(True)
self.plt.title('test_coxph_plotting_normalized')
self.plt.show(block=block)
def test_coxtv_plotting_with_subset_of_columns_and_standardized(self, block):
df = load_stanford_heart_transplants()
ctv = CoxTimeVaryingFitter()
ctv.fit(df, id_col='id', event_col='event')
ctv.plot(True, columns=['age', 'year'])
self.plt.title('test_coxtv_plotting_with_subset_of_columns_and_standardized')
self.plt.show(block=block)
def test_kmf_left_censorship_plots(self, block):
kmf = KaplanMeierFitter()
lcd_dataset = load_lcd()
alluvial_fan = lcd_dataset.loc[lcd_dataset['group'] == 'alluvial_fan']
basin_trough = lcd_dataset.loc[lcd_dataset['group'] == 'basin_trough']
kmf.fit(alluvial_fan['T'], alluvial_fan['C'], left_censorship=True, label='alluvial_fan')
ax = kmf.plot()
kmf.fit(basin_trough['T'], basin_trough['C'], left_censorship=True, label='basin_trough')
ax = kmf.plot(ax=ax)
self.plt.title("test_kmf_left_censorship_plots")
self.plt.show(block=block)
return
def test_aaf_panel_dataset(self, block):
panel_dataset = load_panel_test()
aaf = AalenAdditiveFitter()
aaf.fit(panel_dataset, id_col='id', duration_col='t', event_col='E')
aaf.plot()
self.plt.title("test_aaf_panel_dataset")
self.plt.show(block=block)
return
def test_aalen_additive_fit_no_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline),
index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
X['T'] = T
X['E'] = np.random.binomial(1, 1, n)
aaf = AalenAdditiveFitter()
aaf.fit(X, 'T', 'E')
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(legend=False, ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col], legend=False)
self.plt.title("test_aalen_additive_fit_no_censor")
self.plt.show(block=block)
return
def test_aalen_additive_fit_with_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline),
index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
X['T'] = T
X['E'] = np.random.binomial(1, 0.99, n)
aaf = AalenAdditiveFitter()
aaf.fit(X, 'T', 'E')
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(legend=False, ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col], legend=False)
self.plt.title("test_aalen_additive_fit_with_censor")
self.plt.show(block=block)
return | tests/test_plotting.py | from __future__ import print_function
import os
import pytest
import pandas as pd
import numpy as np
from lifelines.estimation import NelsonAalenFitter, KaplanMeierFitter, AalenAdditiveFitter,\
CoxPHFitter, CoxTimeVaryingFitter
from lifelines.generate_datasets import generate_random_lifetimes, generate_hazard_rates
from lifelines.plotting import plot_lifetimes
from lifelines.datasets import load_waltons, load_regression_dataset, load_lcd,\
load_panel_test, load_stanford_heart_transplants
from lifelines.generate_datasets import cumulative_integral
@pytest.mark.plottest
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
class TestPlotting():
@pytest.fixture
def kmf(self):
return KaplanMeierFitter()
def setup_method(self, method):
pytest.importorskip("matplotlib")
from matplotlib import pyplot as plt
self.plt = plt
def test_negative_times_still_plots(self, block, kmf):
n = 40
T = np.linspace(-2, 3, n)
C = np.random.randint(2, size=n)
kmf.fit(T, C)
ax = kmf.plot()
self.plt.title('test_negative_times_still_plots')
self.plt.show(block=block)
return
def test_kmf_plotting(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
data2 = np.random.exponential(2, size=(200, 1))
data3 = np.random.exponential(4, size=(500, 1))
kmf.fit(data1, label='test label 1')
ax = kmf.plot()
kmf.fit(data2, label='test label 2')
kmf.plot(ax=ax)
kmf.fit(data3, label='test label 3')
kmf.plot(ax=ax)
self.plt.title("test_kmf_plotting")
self.plt.show(block=block)
return
def test_kmf_with_risk_counts(self, block, kmf):
data1 = np.random.exponential(10, size=(100))
kmf.fit(data1)
kmf.plot(at_risk_counts=True)
self.plt.title("test_kmf_with_risk_counts")
self.plt.show(block=block)
def test_naf_plotting_with_custom_colours(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(500))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(color="r")
naf.fit(data2)
naf.plot(ax=ax, c="k")
self.plt.title('test_naf_plotting_with_custom_coloirs')
self.plt.show(block=block)
return
def test_aalen_additive_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline)
C = np.random.binomial(1, 1., size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.plot(iloc=slice(0, aaf.cumulative_hazards_.shape[0] - 100))
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_plot')
self.plt.show(block=block)
return
def test_aalen_additive_smoothed_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 150, 5000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline) + 0.1 * np.random.uniform(size=(n, 1))
C = np.random.binomial(1, 0.8, size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.smoothed_hazards_(1).iloc[0:aaf.cumulative_hazards_.shape[0] - 500].plot()
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_smoothed_plot')
self.plt.show(block=block)
return
def test_naf_plotting_slice(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(loc=slice(0, None))
naf.fit(data2)
naf.plot(ax=ax, ci_force_lines=True, iloc=slice(100, 180))
self.plt.title('test_naf_plotting_slice')
self.plt.show(block=block)
return
def test_plot_lifetimes_calendar(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
current = 10
birthtimes = current * np.random.uniform(size=(N,))
T, C = generate_random_lifetimes(hz, t, size=N, censor=current - birthtimes)
plot_lifetimes(T, event_observed=C, birthtimes=birthtimes, block=block)
def test_plot_lifetimes_relative(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
T, C = generate_random_lifetimes(hz, t, size=N, censor=True)
plot_lifetimes(T, event_observed=C, block=block)
def test_naf_plot_cumulative_hazard(self, block):
data1 = np.random.exponential(5, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot()
naf.plot_cumulative_hazard(ax=ax, ci_force_lines=True)
self.plt.title("I should have plotted the same thing, but different styles + color!")
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwidth_2(self, block):
data1 = np.random.exponential(5, size=(2000, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=1., loc=slice(0, 7.))
self.plt.title('test_naf_plot_cumulative_hazard_bandwidth_2')
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwith_1(self, block):
data1 = np.random.exponential(5, size=(2000, 1)) ** 2
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=5., iloc=slice(0, 1700))
self.plt.title('test_naf_plot_cumulative_hazard_bandwith_1')
self.plt.show(block=block)
return
def test_show_censor_with_discrete_date(self, block, kmf):
T = np.random.binomial(20, 0.1, size=100)
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_discrete_date')
self.plt.show(block=block)
return
def test_show_censor_with_index_0(self, block, kmf):
T = np.random.binomial(20, 0.9, size=100) # lifelines should auto put a 0 in.
C = np.random.binomial(1, 0.8, size=100)
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_index_0')
self.plt.show(block=block)
return
def test_flat_style_with_customer_censor_styles(self, block, kmf):
data1 = np.random.exponential(10, size=200)
kmf.fit(data1, label='test label 1')
kmf.plot(ci_force_lines=True, show_censors=True,
censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
self.plt.title('test_flat_style_no_censor')
self.plt.show(block=block)
return
def test_loglogs_plot(self, block, kmf):
data1 = np.random.exponential(10, size=200)
data2 = np.random.exponential(5, size=200)
kmf.fit(data1, label='test label 1')
ax = kmf.plot_loglogs()
kmf.fit(data2, label='test label 2')
ax = kmf.plot_loglogs(ax=ax)
self.plt.title('test_loglogs_plot')
self.plt.show(block=block)
return
def test_seaborn_doesnt_cause_kmf_plot_error(self, block, kmf, capsys):
import seaborn as sns
df = load_waltons()
T = df['T']
E = df['E']
kmf = KaplanMeierFitter()
kmf.fit(T, event_observed=E)
kmf.plot()
self.plt.title('test_seaborn_doesnt_cause_kmf_plot_error')
self.plt.show(block=block)
_, err = capsys.readouterr()
assert err == ""
def test_coxph_plotting(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot()
self.plt.title('test_coxph_plotting')
self.plt.show(block=block)
def test_coxph_plotting_with_subset_of_columns(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(columns=['var1', 'var2'])
self.plt.title('test_coxph_plotting_with_subset_of_columns')
self.plt.show(block=block)
def test_coxph_plotting_with_subset_of_columns_and_standardized(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(True, columns=['var1', 'var2'])
self.plt.title('test_coxph_plotting_with_subset_of_columns_and_standardized')
self.plt.show(block=block)
def test_coxph_plotting_normalized(self, block):
df = load_regression_dataset()
cp = CoxPHFitter()
cp.fit(df, "T", "E")
cp.plot(True)
self.plt.title('test_coxph_plotting_normalized')
self.plt.show(block=block)
def test_coxtv_plotting_with_subset_of_columns_and_standardized(self, block):
df = load_stanford_heart_transplants()
ctv = CoxTimeVaryingFitter()
ctv.fit(df, id_col='id', event_col='event')
ctv.plot(True, columns=['age', 'year'])
self.plt.title('test_coxtv_plotting_with_subset_of_columns_and_standardized')
self.plt.show(block=block)
def test_kmf_left_censorship_plots(self, block):
kmf = KaplanMeierFitter()
lcd_dataset = load_lcd()
alluvial_fan = lcd_dataset.loc[lcd_dataset['group'] == 'alluvial_fan']
basin_trough = lcd_dataset.loc[lcd_dataset['group'] == 'basin_trough']
kmf.fit(alluvial_fan['T'], alluvial_fan['C'], left_censorship=True, label='alluvial_fan')
ax = kmf.plot()
kmf.fit(basin_trough['T'], basin_trough['C'], left_censorship=True, label='basin_trough')
ax = kmf.plot(ax=ax)
self.plt.title("test_kmf_left_censorship_plots")
self.plt.show(block=block)
return
def test_aaf_panel_dataset(self, block):
panel_dataset = load_panel_test()
aaf = AalenAdditiveFitter()
aaf.fit(panel_dataset, id_col='id', duration_col='t', event_col='E')
aaf.plot()
self.plt.title("test_aaf_panel_dataset")
self.plt.show(block=block)
return
def test_aalen_additive_fit_no_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline),
index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
X['T'] = T
X['E'] = np.random.binomial(1, 1, n)
aaf = AalenAdditiveFitter()
aaf.fit(X, 'T', 'E')
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(legend=False, ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col], legend=False)
self.plt.title("test_aalen_additive_fit_no_censor")
self.plt.show(block=block)
return
def test_aalen_additive_fit_with_censor(self, block):
n = 2500
d = 6
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
X.columns = coef.columns
cumulative_hazards = pd.DataFrame(cumulative_integral(coef.values, timeline),
index=timeline, columns=coef.columns)
T = generate_random_lifetimes(hz, timeline)
X['T'] = T
X['E'] = np.random.binomial(1, 0.99, n)
aaf = AalenAdditiveFitter()
aaf.fit(X, 'T', 'E')
for i in range(d + 1):
ax = self.plt.subplot(d + 1, 1, i + 1)
col = cumulative_hazards.columns[i]
ax = cumulative_hazards[col].loc[:15].plot(legend=False, ax=ax)
ax = aaf.plot(loc=slice(0, 15), ax=ax, columns=[col], legend=False)
self.plt.title("test_aalen_additive_fit_with_censor")
self.plt.show(block=block)
return | 0.673943 | 0.544983 |
import logging
import os.path
import time
from collections import OrderedDict
import sys
import numpy as np
import torch.nn.functional as F
from torch import optim
from braindecode.models.deep4 import Deep4Net
from braindecode.datasets.bcic_iv_2a import BCICompetition4Set2A
from braindecode.experiments.experiment import Experiment
from braindecode.experiments.monitors import LossMonitor, MisclassMonitor, \
RuntimeMonitor
from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or
from braindecode.datautil.iterators import BalancedBatchSizeIterator
from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
from braindecode.datautil.splitters import split_into_two_sets
from braindecode.torch_ext.constraints import MaxNormDefaultConstraint
from braindecode.torch_ext.util import set_random_seeds, np_to_var
from braindecode.mne_ext.signalproc import mne_apply
from braindecode.datautil.signalproc import (bandpass_cnt,
exponential_running_standardize)
from braindecode.datautil.trial_segment import create_signal_target_from_raw_mne
log = logging.getLogger(__name__)
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
ival = [-500, 4000]
max_epochs = 1600
max_increase_epochs = 160
batch_size = 60
high_cut_hz = 38
factor_new = 1e-3
init_block_size = 1000
valid_set_fraction = 0.2
train_filename = 'A{:02d}T.gdf'.format(subject_id)
test_filename = 'A{:02d}E.gdf'.format(subject_id)
train_filepath = os.path.join(data_folder, train_filename)
test_filepath = os.path.join(data_folder, test_filename)
train_label_filepath = train_filepath.replace('.gdf', '.mat')
test_label_filepath = test_filepath.replace('.gdf', '.mat')
train_loader = BCICompetition4Set2A(
train_filepath, labels_filename=train_label_filepath)
test_loader = BCICompetition4Set2A(
test_filepath, labels_filename=test_label_filepath)
train_cnt = train_loader.load()
test_cnt = test_loader.load()
# Preprocessing
train_cnt = train_cnt.drop_channels(['STI 014', 'EOG-left',
'EOG-central', 'EOG-right'])
assert len(train_cnt.ch_names) == 22
# lets convert to millvolt for numerical stability of next operations
train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
train_cnt = mne_apply(
lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, train_cnt.info['sfreq'],
filt_order=3,
axis=1), train_cnt)
train_cnt = mne_apply(
lambda a: exponential_running_standardize(a.T, factor_new=factor_new,
init_block_size=init_block_size,
eps=1e-4).T,
train_cnt)
test_cnt = test_cnt.drop_channels(['STI 014', 'EOG-left',
'EOG-central', 'EOG-right'])
assert len(test_cnt.ch_names) == 22
test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
test_cnt = mne_apply(
lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, test_cnt.info['sfreq'],
filt_order=3,
axis=1), test_cnt)
test_cnt = mne_apply(
lambda a: exponential_running_standardize(a.T, factor_new=factor_new,
init_block_size=init_block_size,
eps=1e-4).T,
test_cnt)
marker_def = OrderedDict([('Left Hand', [1]), ('Right Hand', [2],),
('Foot', [3]), ('Tongue', [4])])
train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)
train_set, valid_set = split_into_two_sets(
train_set, first_set_fraction=1-valid_set_fraction)
set_random_seeds(seed=20190706, cuda=cuda)
n_classes = 4
n_chans = int(train_set.X.shape[1])
input_time_length = train_set.X.shape[2]
if model == 'shallow':
model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
final_conv_length='auto').create_network()
elif model == 'deep':
model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
final_conv_length='auto').create_network()
if cuda:
model.cuda()
log.info("Model: \n{:s}".format(str(model)))
optimizer = optim.Adam(model.parameters())
iterator = BalancedBatchSizeIterator(batch_size=batch_size)
stop_criterion = Or([MaxEpochs(max_epochs),
NoDecrease('valid_misclass', max_increase_epochs)])
monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]
model_constraint = MaxNormDefaultConstraint()
exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
loss_function=F.nll_loss, optimizer=optimizer,
model_constraint=model_constraint,
monitors=monitors,
stop_criterion=stop_criterion,
remember_best_column='valid_misclass',
run_after_early_stop=True, cuda=cuda)
exp.run()
return exp
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
for subject_id in range(1,10):
with open("original_test.txt", "a") as file:
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.DEBUG, stream=sys.stdout)
# Should contain both .gdf files and .mat-labelfiles from competition
data_folder = 'data/'
low_cut_hz = 4 # 0 or 4
model = 'shallow' #'shallow' or 'deep'
cuda = True
exp = run_exp(data_folder, subject_id, low_cut_hz, model, cuda)
log.info("\nLast 10 epochs")
log.info("\n" + str(exp.epochs_df.iloc[-10:]))
file.write("Last 10 epochs \n ----------------------------------------------------------- \n")
file.write("\n" + str(exp.epochs_df.iloc[-10:]))
file.write('\nThe minimum is %.3f\n' % np.min(exp.epochs_df['test_misclass'].values)) | original_braindecode_exp.py | import logging
import os.path
import time
from collections import OrderedDict
import sys
import numpy as np
import torch.nn.functional as F
from torch import optim
from braindecode.models.deep4 import Deep4Net
from braindecode.datasets.bcic_iv_2a import BCICompetition4Set2A
from braindecode.experiments.experiment import Experiment
from braindecode.experiments.monitors import LossMonitor, MisclassMonitor, \
RuntimeMonitor
from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or
from braindecode.datautil.iterators import BalancedBatchSizeIterator
from braindecode.models.shallow_fbcsp import ShallowFBCSPNet
from braindecode.datautil.splitters import split_into_two_sets
from braindecode.torch_ext.constraints import MaxNormDefaultConstraint
from braindecode.torch_ext.util import set_random_seeds, np_to_var
from braindecode.mne_ext.signalproc import mne_apply
from braindecode.datautil.signalproc import (bandpass_cnt,
exponential_running_standardize)
from braindecode.datautil.trial_segment import create_signal_target_from_raw_mne
log = logging.getLogger(__name__)
def run_exp(data_folder, subject_id, low_cut_hz, model, cuda):
ival = [-500, 4000]
max_epochs = 1600
max_increase_epochs = 160
batch_size = 60
high_cut_hz = 38
factor_new = 1e-3
init_block_size = 1000
valid_set_fraction = 0.2
train_filename = 'A{:02d}T.gdf'.format(subject_id)
test_filename = 'A{:02d}E.gdf'.format(subject_id)
train_filepath = os.path.join(data_folder, train_filename)
test_filepath = os.path.join(data_folder, test_filename)
train_label_filepath = train_filepath.replace('.gdf', '.mat')
test_label_filepath = test_filepath.replace('.gdf', '.mat')
train_loader = BCICompetition4Set2A(
train_filepath, labels_filename=train_label_filepath)
test_loader = BCICompetition4Set2A(
test_filepath, labels_filename=test_label_filepath)
train_cnt = train_loader.load()
test_cnt = test_loader.load()
# Preprocessing
train_cnt = train_cnt.drop_channels(['STI 014', 'EOG-left',
'EOG-central', 'EOG-right'])
assert len(train_cnt.ch_names) == 22
# lets convert to millvolt for numerical stability of next operations
train_cnt = mne_apply(lambda a: a * 1e6, train_cnt)
train_cnt = mne_apply(
lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, train_cnt.info['sfreq'],
filt_order=3,
axis=1), train_cnt)
train_cnt = mne_apply(
lambda a: exponential_running_standardize(a.T, factor_new=factor_new,
init_block_size=init_block_size,
eps=1e-4).T,
train_cnt)
test_cnt = test_cnt.drop_channels(['STI 014', 'EOG-left',
'EOG-central', 'EOG-right'])
assert len(test_cnt.ch_names) == 22
test_cnt = mne_apply(lambda a: a * 1e6, test_cnt)
test_cnt = mne_apply(
lambda a: bandpass_cnt(a, low_cut_hz, high_cut_hz, test_cnt.info['sfreq'],
filt_order=3,
axis=1), test_cnt)
test_cnt = mne_apply(
lambda a: exponential_running_standardize(a.T, factor_new=factor_new,
init_block_size=init_block_size,
eps=1e-4).T,
test_cnt)
marker_def = OrderedDict([('Left Hand', [1]), ('Right Hand', [2],),
('Foot', [3]), ('Tongue', [4])])
train_set = create_signal_target_from_raw_mne(train_cnt, marker_def, ival)
test_set = create_signal_target_from_raw_mne(test_cnt, marker_def, ival)
train_set, valid_set = split_into_two_sets(
train_set, first_set_fraction=1-valid_set_fraction)
set_random_seeds(seed=20190706, cuda=cuda)
n_classes = 4
n_chans = int(train_set.X.shape[1])
input_time_length = train_set.X.shape[2]
if model == 'shallow':
model = ShallowFBCSPNet(n_chans, n_classes, input_time_length=input_time_length,
final_conv_length='auto').create_network()
elif model == 'deep':
model = Deep4Net(n_chans, n_classes, input_time_length=input_time_length,
final_conv_length='auto').create_network()
if cuda:
model.cuda()
log.info("Model: \n{:s}".format(str(model)))
optimizer = optim.Adam(model.parameters())
iterator = BalancedBatchSizeIterator(batch_size=batch_size)
stop_criterion = Or([MaxEpochs(max_epochs),
NoDecrease('valid_misclass', max_increase_epochs)])
monitors = [LossMonitor(), MisclassMonitor(), RuntimeMonitor()]
model_constraint = MaxNormDefaultConstraint()
exp = Experiment(model, train_set, valid_set, test_set, iterator=iterator,
loss_function=F.nll_loss, optimizer=optimizer,
model_constraint=model_constraint,
monitors=monitors,
stop_criterion=stop_criterion,
remember_best_column='valid_misclass',
run_after_early_stop=True, cuda=cuda)
exp.run()
return exp
if __name__ == '__main__':
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
for subject_id in range(1,10):
with open("original_test.txt", "a") as file:
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.DEBUG, stream=sys.stdout)
# Should contain both .gdf files and .mat-labelfiles from competition
data_folder = 'data/'
low_cut_hz = 4 # 0 or 4
model = 'shallow' #'shallow' or 'deep'
cuda = True
exp = run_exp(data_folder, subject_id, low_cut_hz, model, cuda)
log.info("\nLast 10 epochs")
log.info("\n" + str(exp.epochs_df.iloc[-10:]))
file.write("Last 10 epochs \n ----------------------------------------------------------- \n")
file.write("\n" + str(exp.epochs_df.iloc[-10:]))
file.write('\nThe minimum is %.3f\n' % np.min(exp.epochs_df['test_misclass'].values)) | 0.49585 | 0.395835 |
import os
from os import path
from src.play_json import PlayNext, get_series_dirs, load_play_next
from src.status_data import STATUS_STRINGS
from src.config import Config
def _reset_target_link_dirs(config: Config) -> None:
all_dirs = _get_all_target_dirs(config)
for dir in all_dirs:
if os.path.exists(dir):
# remove symlink
link_paths = [p for f in os.listdir(dir) if path.islink(p := path.join(dir, f))]
for l in link_paths: os.unlink(l)
else:
os.makedirs(dir)
def _get_starred_target_dir(config: Config) -> str:
return path.join(config.link_root, "starred")
def _get_all_target_dirs(config: Config) -> list[str]:
starred_dir = _get_starred_target_dir(config)
return [ path.join(config.link_root, x) for x in STATUS_STRINGS ] + [ starred_dir ]
_last_link_root = None
_status_path_map = {}
def _get_link_target_path(config: Config, play_next: PlayNext) -> str:
global _status_path_map, _last_link_root
link_root = config.link_root
if link_root != _last_link_root:
_status_path_map = { x: path.join(link_root, x) for x in STATUS_STRINGS }
_last_link_root = link_root
target_dir = _status_path_map[str(play_next.status)]
return path.join(target_dir, play_next.title)
def link(config: Config, series_path: str) -> None:
# ! Quickly hacked it here, not sure if it's enough to make it work
filename = path.basename(series_path)
if filename.startswith("."): return
play_next = load_play_next(series_path)
link_target = _get_link_target_path(config, play_next)
os.symlink(series_path, link_target)
if play_next.starred:
starred_dir = _get_starred_target_dir(config)
target_path = path.join(starred_dir, play_next.title)
os.symlink(series_path, target_path)
def relink_all(config: Config) -> None:
_reset_target_link_dirs(config)
all_series_paths = get_series_dirs(config, True)
for series_path in all_series_paths:
link(config, series_path) | src/link_utilz.py | import os
from os import path
from src.play_json import PlayNext, get_series_dirs, load_play_next
from src.status_data import STATUS_STRINGS
from src.config import Config
def _reset_target_link_dirs(config: Config) -> None:
all_dirs = _get_all_target_dirs(config)
for dir in all_dirs:
if os.path.exists(dir):
# remove symlink
link_paths = [p for f in os.listdir(dir) if path.islink(p := path.join(dir, f))]
for l in link_paths: os.unlink(l)
else:
os.makedirs(dir)
def _get_starred_target_dir(config: Config) -> str:
return path.join(config.link_root, "starred")
def _get_all_target_dirs(config: Config) -> list[str]:
starred_dir = _get_starred_target_dir(config)
return [ path.join(config.link_root, x) for x in STATUS_STRINGS ] + [ starred_dir ]
_last_link_root = None
_status_path_map = {}
def _get_link_target_path(config: Config, play_next: PlayNext) -> str:
global _status_path_map, _last_link_root
link_root = config.link_root
if link_root != _last_link_root:
_status_path_map = { x: path.join(link_root, x) for x in STATUS_STRINGS }
_last_link_root = link_root
target_dir = _status_path_map[str(play_next.status)]
return path.join(target_dir, play_next.title)
def link(config: Config, series_path: str) -> None:
# ! Quickly hacked it here, not sure if it's enough to make it work
filename = path.basename(series_path)
if filename.startswith("."): return
play_next = load_play_next(series_path)
link_target = _get_link_target_path(config, play_next)
os.symlink(series_path, link_target)
if play_next.starred:
starred_dir = _get_starred_target_dir(config)
target_path = path.join(starred_dir, play_next.title)
os.symlink(series_path, target_path)
def relink_all(config: Config) -> None:
_reset_target_link_dirs(config)
all_series_paths = get_series_dirs(config, True)
for series_path in all_series_paths:
link(config, series_path) | 0.310799 | 0.116061 |
import pytest
from fbmsg.models.incoming import Request, Entry, Message as iMessage
from fbmsg.models.messages import QuickReply, QuickReplyButton, Message, Button, Template
from fbmsg.models.settings import Analytics, MenuItem, PersistentMenu
class TestQuickReplies:
def test_QuickReplyButton(self):
button = QuickReplyButton("title", "payload")
assert button.to_dict() == {'content_type': 'text', "title": "title", "payload": "payload"}
with pytest.raises(TypeError):
QuickReplyButton(1, "payload")
with pytest.raises(TypeError):
QuickReplyButton("title", None)
def test_QuickReply(self):
qr = QuickReply()
with pytest.raises(TypeError):
qr.add(1)
button1 = QuickReplyButton("title", "payload")
button2 = QuickReplyButton("title", "payload")
qr.add(button1)
qr.add(button2)
assert qr.to_dict() == [
{'content_type': 'text', "title": "title", "payload": "payload"},
{'content_type': 'text', "title": "title", "payload": "payload"}
]
class TestTemplates:
def test_Button(self):
button = Button('test', 'test')
with pytest.raises(TypeError):
Button(1, 'test')
with pytest.raises(TypeError):
Button('test', 1)
assert button.to_dict() == {'type': 'test', 'title': 'test'}
def test_Template(self):
button = Button('test', 'test')
t = Template('test', 'test', buttons=[button])
with pytest.raises(TypeError):
Template(1, 'test')
with pytest.raises(TypeError):
Template('test', 1)
assert t.to_dict() == {
"type": "template",
"payload": {
"template_type": 'test',
"text": 'test',
"buttons": [{'type': 'test', 'title': 'test'}]
}
}
class TestMessage:
def test_Message_without_QuickReply(self):
msg = Message(text="text")
assert msg.to_dict() == {"text": "text"}
with pytest.raises(TypeError):
Message(1)
def test_Message_with_QuickReply(self):
qr1 = QuickReply()
button1 = QuickReplyButton("title1", "payload1")
qr1.add(button1)
msg = Message(text="text", quick_reply=qr1)
assert msg.to_dict() == {
"text": "text",
"quick_replies": [{'content_type': 'text', "title": "title1", "payload": "payload1"}]
}
with pytest.raises(TypeError):
msg.set_quick_reply(1)
with pytest.raises(TypeError):
Message("Text", 1)
qr2 = QuickReply()
button2 = QuickReplyButton("title2", "payload2")
qr2.add(button2)
msg.set_quick_reply(qr2)
assert msg.to_dict() == {
"text": "text",
"quick_replies": [{'content_type': 'text', "title": "title2", "payload": "payload2"}]
}
class TestIncoming:
def test_Message(self):
with pytest.raises(TypeError):
iMessage('', {}, 1, {})
with pytest.raises(TypeError):
iMessage({}, 1, 1, {})
with pytest.raises(TypeError):
iMessage({}, {}, '', {})
with pytest.raises(TypeError):
iMessage({}, {}, 1, 1)
with pytest.raises(TypeError):
iMessage({}, {}, 1, {}, 1, {})
with pytest.raises(TypeError):
iMessage({}, {}, 1, {}, {}, 1)
m = iMessage(**{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'},
'timestamp': 1543226751645,
'message': {'mid': 'test', 'seq': 15, 'text': 'test', 'quick_reply': {'payload': 'test'}},
'referral': {'ref': 'asfasdf', 'source': 'SHORTLINK', 'type': 'OPEN_THREAD'}})
def test_Entry(self):
with pytest.raises(TypeError):
Entry(1, 1, [])
with pytest.raises(TypeError):
Entry('', '', [])
with pytest.raises(TypeError):
Entry('', 1, 1)
Entry('test', 1, [{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'},
'timestamp': 1543226751645,
'message': {'mid': 'test', 'seq': 15, 'text': 'test', 'quick_reply': {'payload': 'test'}}}])
def test_Request(self):
with pytest.raises(TypeError):
Request(1, [])
with pytest.raises(TypeError):
Request('', '')
Request('', [{'id': '2278924455579804', 'time': 1543226752083, 'messaging': [
{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'}, 'timestamp': 1543226751645,
'message': {
'mid': 'OqcEBjJm5yIB4FXOi-QQNpkHi9Y8onq4GJ-SGf1uuw59FAZJh1mi1w5xENFgluiJNdemXElPHwwrWElCYLW26g',
'seq': 15, 'text': 'heloo'}}]}])
class TestSettings:
def test_MenuItem(self):
i = MenuItem(**{
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
})
with pytest.raises(TypeError):
MenuItem(1, '')
with pytest.raises(TypeError):
MenuItem('', 1)
assert i.to_dict() == {
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
}
def test_PersistentMenu(self):
with pytest.raises(TypeError):
PersistentMenu(1, '', False)
with pytest.raises(TypeError):
PersistentMenu([], 1, False)
with pytest.raises(TypeError):
PersistentMenu([], '', 1)
m = PersistentMenu()
i = MenuItem(**{
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
})
m.add(i)
assert m.to_dict() == {'locale': 'default', 'composer_input_disabled': False, 'call_to_actions': [
{'type': 'postback', 'title': 'Pay Bill', 'payload': 'PAYBILL_PAYLOAD'}]}
def test_Analytics(self):
a = Analytics([], 123, 123)
assert a.to_dict() == {
'custom_events': [],
'page_id': 123,
'page_scoped_user_id': 123,
'event': 'CUSTOM_APP_EVENTS',
'advertiser_tracking_enabled': True,
'application_tracking_enabled': True,
'extinfo': ['mb1'],
} | tests/test_models.py | import pytest
from fbmsg.models.incoming import Request, Entry, Message as iMessage
from fbmsg.models.messages import QuickReply, QuickReplyButton, Message, Button, Template
from fbmsg.models.settings import Analytics, MenuItem, PersistentMenu
class TestQuickReplies:
def test_QuickReplyButton(self):
button = QuickReplyButton("title", "payload")
assert button.to_dict() == {'content_type': 'text', "title": "title", "payload": "payload"}
with pytest.raises(TypeError):
QuickReplyButton(1, "payload")
with pytest.raises(TypeError):
QuickReplyButton("title", None)
def test_QuickReply(self):
qr = QuickReply()
with pytest.raises(TypeError):
qr.add(1)
button1 = QuickReplyButton("title", "payload")
button2 = QuickReplyButton("title", "payload")
qr.add(button1)
qr.add(button2)
assert qr.to_dict() == [
{'content_type': 'text', "title": "title", "payload": "payload"},
{'content_type': 'text', "title": "title", "payload": "payload"}
]
class TestTemplates:
def test_Button(self):
button = Button('test', 'test')
with pytest.raises(TypeError):
Button(1, 'test')
with pytest.raises(TypeError):
Button('test', 1)
assert button.to_dict() == {'type': 'test', 'title': 'test'}
def test_Template(self):
button = Button('test', 'test')
t = Template('test', 'test', buttons=[button])
with pytest.raises(TypeError):
Template(1, 'test')
with pytest.raises(TypeError):
Template('test', 1)
assert t.to_dict() == {
"type": "template",
"payload": {
"template_type": 'test',
"text": 'test',
"buttons": [{'type': 'test', 'title': 'test'}]
}
}
class TestMessage:
def test_Message_without_QuickReply(self):
msg = Message(text="text")
assert msg.to_dict() == {"text": "text"}
with pytest.raises(TypeError):
Message(1)
def test_Message_with_QuickReply(self):
qr1 = QuickReply()
button1 = QuickReplyButton("title1", "payload1")
qr1.add(button1)
msg = Message(text="text", quick_reply=qr1)
assert msg.to_dict() == {
"text": "text",
"quick_replies": [{'content_type': 'text', "title": "title1", "payload": "payload1"}]
}
with pytest.raises(TypeError):
msg.set_quick_reply(1)
with pytest.raises(TypeError):
Message("Text", 1)
qr2 = QuickReply()
button2 = QuickReplyButton("title2", "payload2")
qr2.add(button2)
msg.set_quick_reply(qr2)
assert msg.to_dict() == {
"text": "text",
"quick_replies": [{'content_type': 'text', "title": "title2", "payload": "payload2"}]
}
class TestIncoming:
def test_Message(self):
with pytest.raises(TypeError):
iMessage('', {}, 1, {})
with pytest.raises(TypeError):
iMessage({}, 1, 1, {})
with pytest.raises(TypeError):
iMessage({}, {}, '', {})
with pytest.raises(TypeError):
iMessage({}, {}, 1, 1)
with pytest.raises(TypeError):
iMessage({}, {}, 1, {}, 1, {})
with pytest.raises(TypeError):
iMessage({}, {}, 1, {}, {}, 1)
m = iMessage(**{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'},
'timestamp': 1543226751645,
'message': {'mid': 'test', 'seq': 15, 'text': 'test', 'quick_reply': {'payload': 'test'}},
'referral': {'ref': 'asfasdf', 'source': 'SHORTLINK', 'type': 'OPEN_THREAD'}})
def test_Entry(self):
with pytest.raises(TypeError):
Entry(1, 1, [])
with pytest.raises(TypeError):
Entry('', '', [])
with pytest.raises(TypeError):
Entry('', 1, 1)
Entry('test', 1, [{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'},
'timestamp': 1543226751645,
'message': {'mid': 'test', 'seq': 15, 'text': 'test', 'quick_reply': {'payload': 'test'}}}])
def test_Request(self):
with pytest.raises(TypeError):
Request(1, [])
with pytest.raises(TypeError):
Request('', '')
Request('', [{'id': '2278924455579804', 'time': 1543226752083, 'messaging': [
{'sender': {'id': '1169720893152609'}, 'recipient': {'id': '2278924455579804'}, 'timestamp': 1543226751645,
'message': {
'mid': 'OqcEBjJm5yIB4FXOi-QQNpkHi9Y8onq4GJ-SGf1uuw59FAZJh1mi1w5xENFgluiJNdemXElPHwwrWElCYLW26g',
'seq': 15, 'text': 'heloo'}}]}])
class TestSettings:
def test_MenuItem(self):
i = MenuItem(**{
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
})
with pytest.raises(TypeError):
MenuItem(1, '')
with pytest.raises(TypeError):
MenuItem('', 1)
assert i.to_dict() == {
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
}
def test_PersistentMenu(self):
with pytest.raises(TypeError):
PersistentMenu(1, '', False)
with pytest.raises(TypeError):
PersistentMenu([], 1, False)
with pytest.raises(TypeError):
PersistentMenu([], '', 1)
m = PersistentMenu()
i = MenuItem(**{
"title": "Pay Bill",
"type": "postback",
"payload": "PAYBILL_PAYLOAD"
})
m.add(i)
assert m.to_dict() == {'locale': 'default', 'composer_input_disabled': False, 'call_to_actions': [
{'type': 'postback', 'title': 'Pay Bill', 'payload': 'PAYBILL_PAYLOAD'}]}
def test_Analytics(self):
a = Analytics([], 123, 123)
assert a.to_dict() == {
'custom_events': [],
'page_id': 123,
'page_scoped_user_id': 123,
'event': 'CUSTOM_APP_EVENTS',
'advertiser_tracking_enabled': True,
'application_tracking_enabled': True,
'extinfo': ['mb1'],
} | 0.44746 | 0.397091 |
import numpy as np
import cv2
import math
img = np.array(cv2.imread(r"C:\Users\<NAME>\Desktop\CVIP_PROJECT3\original_imgs\original_imgs\noise.jpg",0))
out_img = np.zeros((img.shape))
b = np.array([[1,1,1],
[1,1,1],
[1,1,1]])
k_h = b.shape[0]
k_w = b.shape[1]
h= b.shape[0]//2
w= b.shape[1]//2
def erosion(sample):
img = sample
output_img = np.zeros((sample.shape))
for i in range(sample.shape[0]):
for j in range(sample.shape[1]):
cnt = 255
for x in range(i-h,i+h+1):
for y in range(j-w,j+w+1):
if(x>=0 and x<sample.shape[0] and y>=0 and y<sample.shape[1]) :
if(img[x][y]<cnt):
cnt = img[x][y]
output_img[i][j]=cnt
return output_img
def dilation(sample):
img = sample
out_img = np.zeros((sample.shape))
for i in range(sample.shape[0]):
for j in range(sample.shape[1]):
cnt = 0
for x in range(i-h,i+h+1):
for y in range(j-w,j+w+1):
if( x>=0 and x<sample.shape[0] and y>0 and y<sample.shape[1]) :
if(img[x][y]>cnt):
cnt = img[x][y]
out_img[i][j]=cnt
return out_img
#opening
e1 = erosion(img)
d1 = dilation(e1)
#closing
d2 = dilation(d1)
e2 = erosion(d2)
cv2.imwrite('res_noise1.png',e2)
cv2.imshow("open_close",e2)
cv2.waitKey(0)
cv2.destroyAllWindows()
#closing
d3 = dilation(img)
e3 = erosion(d3)
#opening
e4 = erosion(e3)
d4 = dilation(e4)
cv2.imwrite('res_noise2.png',d4)
cv2.imshow("close_open",d4)
cv2.waitKey(0)
cv2.destroyAllWindows()
def boundary_extraction(image1,image2):
erode_1 = erosion(image1)
op_1 = np.subtract(image1,erode_1)
cv2.imshow("first",op_1)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('res_bound1.png',op_1)
erode_2 = erosion(image2)
op_2 = np.subtract(image2,erode_2)
cv2.imshow('second',op_2)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('res_bound2.png',op_2)
#boundary extraction
boundary_extraction(e2,d4) | task1/task1.py | import numpy as np
import cv2
import math
img = np.array(cv2.imread(r"C:\Users\<NAME>\Desktop\CVIP_PROJECT3\original_imgs\original_imgs\noise.jpg",0))
out_img = np.zeros((img.shape))
b = np.array([[1,1,1],
[1,1,1],
[1,1,1]])
k_h = b.shape[0]
k_w = b.shape[1]
h= b.shape[0]//2
w= b.shape[1]//2
def erosion(sample):
img = sample
output_img = np.zeros((sample.shape))
for i in range(sample.shape[0]):
for j in range(sample.shape[1]):
cnt = 255
for x in range(i-h,i+h+1):
for y in range(j-w,j+w+1):
if(x>=0 and x<sample.shape[0] and y>=0 and y<sample.shape[1]) :
if(img[x][y]<cnt):
cnt = img[x][y]
output_img[i][j]=cnt
return output_img
def dilation(sample):
img = sample
out_img = np.zeros((sample.shape))
for i in range(sample.shape[0]):
for j in range(sample.shape[1]):
cnt = 0
for x in range(i-h,i+h+1):
for y in range(j-w,j+w+1):
if( x>=0 and x<sample.shape[0] and y>0 and y<sample.shape[1]) :
if(img[x][y]>cnt):
cnt = img[x][y]
out_img[i][j]=cnt
return out_img
#opening
e1 = erosion(img)
d1 = dilation(e1)
#closing
d2 = dilation(d1)
e2 = erosion(d2)
cv2.imwrite('res_noise1.png',e2)
cv2.imshow("open_close",e2)
cv2.waitKey(0)
cv2.destroyAllWindows()
#closing
d3 = dilation(img)
e3 = erosion(d3)
#opening
e4 = erosion(e3)
d4 = dilation(e4)
cv2.imwrite('res_noise2.png',d4)
cv2.imshow("close_open",d4)
cv2.waitKey(0)
cv2.destroyAllWindows()
def boundary_extraction(image1,image2):
erode_1 = erosion(image1)
op_1 = np.subtract(image1,erode_1)
cv2.imshow("first",op_1)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('res_bound1.png',op_1)
erode_2 = erosion(image2)
op_2 = np.subtract(image2,erode_2)
cv2.imshow('second',op_2)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('res_bound2.png',op_2)
#boundary extraction
boundary_extraction(e2,d4) | 0.161254 | 0.324195 |
import numpy as np
import pandas as pd
from base_test import ArkoudaTest
from context import arkouda as ak
SIZE = 1000
GROUPS = 32
verbose = True
OPS = frozenset(["mean", "min", "max", "sum", "prod"])
def groupby_to_arrays(df: pd.DataFrame, kname, vname, op):
g = df.groupby(kname)[vname]
agg = g.aggregate(op.replace("arg", "idx"))
keys = agg.index.values
return keys, agg.values
def make_arrays():
keys = np.random.randint(0, GROUPS, SIZE)
f = np.random.randn(SIZE)
# f.fill(5)
for i in range(SIZE):
if np.random.rand() < 0.2:
f[i] = np.nan
d = {"keys": keys, "float64": f}
return d
def compare_keys(pdkeys, akkeys, pdvals, akvals) -> int:
"""
Compares the numpy and arkouda arrays via the numpy.allclose method with the
default relative and absolute tolerances, returning 0 if the arrays are similar
element-wise within the tolerances, 1 if they are dissimilar.element
:return: 0 (identical) or 1 (dissimilar)
:rtype: int
"""
akkeys = akkeys.to_ndarray()
if not np.allclose(pdkeys, akkeys):
print("Different keys")
return 1
if not np.allclose(pdvals, akvals):
print(f"Different values (abs diff = {np.abs(pdvals - akvals).sum()})")
return 1
return 0
def run_test(verbose=True):
"""
The run_test method enables execution of ak.GroupBy and ak.GroupBy.Reductions
for mean, min, max, and sum
on a randomized set of arrays including nan values.
:return:
"""
d = make_arrays()
df = pd.DataFrame(d)
akdf = {k: ak.array(v) for k, v in d.items()}
akg = ak.GroupBy(akdf["keys"])
keyname = "keys"
tests = 0
failures = 0
not_impl = 0
tests += 1
pdkeys, pdvals = groupby_to_arrays(df, keyname, "float64", "count")
akkeys, akvals = akg.count()
akvals = akvals.to_ndarray()
for op in OPS:
tests += 1
do_check = True
try:
pdkeys, pdvals = groupby_to_arrays(df, keyname, "float64", op)
except Exception:
if verbose:
print("Pandas does not implement")
do_check = False
try:
akkeys, akvals = akg.aggregate(akdf["float64"], op, True)
akvals = akvals.to_ndarray()
except RuntimeError as E:
if verbose:
print("Arkouda error: ", E)
not_impl += 1
do_check = False
continue
if not do_check:
continue
for i in range(pdvals.size):
if np.isnan(pdvals[i]):
pdvals[i] = 0.0 # clear out any nans to match ak implementation
failures += compare_keys(pdkeys, akkeys, pdvals, akvals)
return failures
class NanTest(ArkoudaTest):
def test_nan(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test with nan values
"""
self.assertEqual(0, run_test()) | tests/nan_test.py | import numpy as np
import pandas as pd
from base_test import ArkoudaTest
from context import arkouda as ak
SIZE = 1000
GROUPS = 32
verbose = True
OPS = frozenset(["mean", "min", "max", "sum", "prod"])
def groupby_to_arrays(df: pd.DataFrame, kname, vname, op):
g = df.groupby(kname)[vname]
agg = g.aggregate(op.replace("arg", "idx"))
keys = agg.index.values
return keys, agg.values
def make_arrays():
keys = np.random.randint(0, GROUPS, SIZE)
f = np.random.randn(SIZE)
# f.fill(5)
for i in range(SIZE):
if np.random.rand() < 0.2:
f[i] = np.nan
d = {"keys": keys, "float64": f}
return d
def compare_keys(pdkeys, akkeys, pdvals, akvals) -> int:
"""
Compares the numpy and arkouda arrays via the numpy.allclose method with the
default relative and absolute tolerances, returning 0 if the arrays are similar
element-wise within the tolerances, 1 if they are dissimilar.element
:return: 0 (identical) or 1 (dissimilar)
:rtype: int
"""
akkeys = akkeys.to_ndarray()
if not np.allclose(pdkeys, akkeys):
print("Different keys")
return 1
if not np.allclose(pdvals, akvals):
print(f"Different values (abs diff = {np.abs(pdvals - akvals).sum()})")
return 1
return 0
def run_test(verbose=True):
"""
The run_test method enables execution of ak.GroupBy and ak.GroupBy.Reductions
for mean, min, max, and sum
on a randomized set of arrays including nan values.
:return:
"""
d = make_arrays()
df = pd.DataFrame(d)
akdf = {k: ak.array(v) for k, v in d.items()}
akg = ak.GroupBy(akdf["keys"])
keyname = "keys"
tests = 0
failures = 0
not_impl = 0
tests += 1
pdkeys, pdvals = groupby_to_arrays(df, keyname, "float64", "count")
akkeys, akvals = akg.count()
akvals = akvals.to_ndarray()
for op in OPS:
tests += 1
do_check = True
try:
pdkeys, pdvals = groupby_to_arrays(df, keyname, "float64", op)
except Exception:
if verbose:
print("Pandas does not implement")
do_check = False
try:
akkeys, akvals = akg.aggregate(akdf["float64"], op, True)
akvals = akvals.to_ndarray()
except RuntimeError as E:
if verbose:
print("Arkouda error: ", E)
not_impl += 1
do_check = False
continue
if not do_check:
continue
for i in range(pdvals.size):
if np.isnan(pdvals[i]):
pdvals[i] = 0.0 # clear out any nans to match ak implementation
failures += compare_keys(pdkeys, akkeys, pdvals, akvals)
return failures
class NanTest(ArkoudaTest):
def test_nan(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test with nan values
"""
self.assertEqual(0, run_test()) | 0.467332 | 0.405037 |
from service import Storage as stor
from service import Fusion as fus
class MatricPotential:
def __init__(self,db_host,db_user,db_pass):
self.RNI={}
self.ITN=0
self.water_needed=0
self.t_irri=0
self.database = stor.Storage(db_host, db_user, db_pass)
self.fusion = fus.Fusion()
#field_information
self.field_id = 0
self.farm_id = 0
self.crop_id = 0
self.irrigation_system_id =0
# crop information
self.critical_condition_moisture = 0
# irrigation system info
self.irrigation_type = 0
self.irrigation_ef = 0
self.irrigation_p = 0
# soil layer info
self.monitoring_points_ids=[]
self.date = ''
def set_field_info(self,field_id):
self.field_id = field_id
field_info = self.database.get_field_information(field_id)
self.farm_id = field_info[0]
self.crop_id = field_info[1]
self.irrigation_system_id = field_info[4]
# crop information
crop_info = self.database.get_crop_info(self.crop_id)
self.critical_condition_moisture = crop_info[7]
# irrigation system info
irrigation_info = self.database.get_irrigation_system_info(self.irrigation_system_id)
self.irrigation_type = irrigation_info[0]
self.irrigation_ef = irrigation_info[1]
self.irrigation_p = irrigation_info[2]
# soil layer info
self.monitoring_points_ids = self.database.get_monitoring_points(field_id)
#print("monitoring point ids {}".format(monitoring_points_ids))
# apply outlier remotion criteria on data
def data_preprocessing(self,date):
soil_layers = []
self.soil_layer_data = {}
self.date = date
for mp_id in self.monitoring_points_ids:
soil_layers = self.database.get_soil_layer_info(self.field_id, mp_id)
for layer in soil_layers:
soil_layer_id = layer[0]
layer_depth_type = layer[1]
moisture_sensor_type_id = layer[4]
u = self.database.get_moisture_data(self.date, field_id, mp_id, soil_layer_id)
criterias = self.database.get_moisture_remotion_criteria(moisture_sensor_type_id)
self.fusion.set_soil_moisture_remotion_criteria(criterias["min"], criterias["max"])
u = self.fusion.check_soil_moisture_remotion_criteria(u)
data = []
if layer_depth_type in self.soil_layer_data:
data = self.soil_layer_data[layer_depth_type]
data.append(u)
self.soil_layer_data.update({layer_depth_type:data})
def compute_RNI(self,field_id,date):
self.set_field_info(field_id)
self.data_preprocessing(date)
fusioned_data=0
depth_info=[]
self.RNI={}
afd=0#available ready water
rni=0
layer_depth = 0
field_condition_moisture = 0
residual_water_content = 0
saturation_water_content = 0
alpha_air_entry_suction = 0
n_pore_size_distribution = 0
current_humidity=0
critical_moisture=0
fusion_id=0
for key in self.soil_layer_data.keys():
layer=self.database.get_soil_depth(key)
layer_depth=layer[1]
fusion_id=layer[3]
field_capacity=layer[4]
residual_water_content=layer[5]
saturation_water_content=layer[6]
alpha_air_entry_suction=layer[7]
n_pore_size_distribution = layer[8]
fusion_type = self.database.get_fusion_method(fusion_id)
fusioned_data = self.fusion.apply_method(fusion_type["name"],
self.soil_layer_data[key])
current_humidity = self.vanGenuchten(residual_water_content,
saturation_water_content,
alpha_air_entry_suction,
n_pore_size_distribution,
fusioned_data)
fc_moisture = self.vanGenuchten(residual_water_content,saturation_water_content,
alpha_air_entry_suction,n_pore_size_distribution,
field_capacity)
critical_moisture = self.vanGenuchten(residual_water_content,
saturation_water_content,
alpha_air_entry_suction,
n_pore_size_distribution,
self.critical_condition_moisture)
afd=(fc_moisture-critical_moisture)*layer_depth
rni=(fc_moisture-current_humidity)*layer_depth
self.RNI.update({layer_depth:[current_humidity,critical_moisture,afd,rni]})
depths=self.RNI.keys()
min_depth = min(depths)
moisture = self.RNI[min_depth]
current_humidity=moisture[0]
critical_moisture=moisture[1]
self.water_needed=0
if current_humidity <= critical_moisture:
for key in self.RNI.keys():
moisture=self.RNI[key]
if key==min_depth:
self.water_needed+= moisture[2]
else:
self.water_needed += moisture[3]
self.ITN=self.water_need/self.irrigation_ef
return self.ITN
#model to compute soil moisture
def vanGenuchten(self,thetaR, thetaS, alpha, n, psiM):
num = thetaS - thetaR
den = (alpha * psiM) ** n
den = (den + 1) ** (1 - 1 / n)
theta = thetaR + (num / den)
return theta
def compute_irrigation_time(self):
self.t_irri=self.ITN/self.irrigation_p
return self.t_irri
if __name__ == '__main__':
irrigation_management = MatricPotential('localhost', 'root', '12345678')
date = "2016-02-01"
field_id = 1
water_needed=irrigation_management.compute_RNI(field_id,date)
print('water needed={}'.format(water_needed)) | framework/application/matric_potential.py | from service import Storage as stor
from service import Fusion as fus
class MatricPotential:
def __init__(self,db_host,db_user,db_pass):
self.RNI={}
self.ITN=0
self.water_needed=0
self.t_irri=0
self.database = stor.Storage(db_host, db_user, db_pass)
self.fusion = fus.Fusion()
#field_information
self.field_id = 0
self.farm_id = 0
self.crop_id = 0
self.irrigation_system_id =0
# crop information
self.critical_condition_moisture = 0
# irrigation system info
self.irrigation_type = 0
self.irrigation_ef = 0
self.irrigation_p = 0
# soil layer info
self.monitoring_points_ids=[]
self.date = ''
def set_field_info(self,field_id):
self.field_id = field_id
field_info = self.database.get_field_information(field_id)
self.farm_id = field_info[0]
self.crop_id = field_info[1]
self.irrigation_system_id = field_info[4]
# crop information
crop_info = self.database.get_crop_info(self.crop_id)
self.critical_condition_moisture = crop_info[7]
# irrigation system info
irrigation_info = self.database.get_irrigation_system_info(self.irrigation_system_id)
self.irrigation_type = irrigation_info[0]
self.irrigation_ef = irrigation_info[1]
self.irrigation_p = irrigation_info[2]
# soil layer info
self.monitoring_points_ids = self.database.get_monitoring_points(field_id)
#print("monitoring point ids {}".format(monitoring_points_ids))
# apply outlier remotion criteria on data
def data_preprocessing(self,date):
soil_layers = []
self.soil_layer_data = {}
self.date = date
for mp_id in self.monitoring_points_ids:
soil_layers = self.database.get_soil_layer_info(self.field_id, mp_id)
for layer in soil_layers:
soil_layer_id = layer[0]
layer_depth_type = layer[1]
moisture_sensor_type_id = layer[4]
u = self.database.get_moisture_data(self.date, field_id, mp_id, soil_layer_id)
criterias = self.database.get_moisture_remotion_criteria(moisture_sensor_type_id)
self.fusion.set_soil_moisture_remotion_criteria(criterias["min"], criterias["max"])
u = self.fusion.check_soil_moisture_remotion_criteria(u)
data = []
if layer_depth_type in self.soil_layer_data:
data = self.soil_layer_data[layer_depth_type]
data.append(u)
self.soil_layer_data.update({layer_depth_type:data})
def compute_RNI(self,field_id,date):
self.set_field_info(field_id)
self.data_preprocessing(date)
fusioned_data=0
depth_info=[]
self.RNI={}
afd=0#available ready water
rni=0
layer_depth = 0
field_condition_moisture = 0
residual_water_content = 0
saturation_water_content = 0
alpha_air_entry_suction = 0
n_pore_size_distribution = 0
current_humidity=0
critical_moisture=0
fusion_id=0
for key in self.soil_layer_data.keys():
layer=self.database.get_soil_depth(key)
layer_depth=layer[1]
fusion_id=layer[3]
field_capacity=layer[4]
residual_water_content=layer[5]
saturation_water_content=layer[6]
alpha_air_entry_suction=layer[7]
n_pore_size_distribution = layer[8]
fusion_type = self.database.get_fusion_method(fusion_id)
fusioned_data = self.fusion.apply_method(fusion_type["name"],
self.soil_layer_data[key])
current_humidity = self.vanGenuchten(residual_water_content,
saturation_water_content,
alpha_air_entry_suction,
n_pore_size_distribution,
fusioned_data)
fc_moisture = self.vanGenuchten(residual_water_content,saturation_water_content,
alpha_air_entry_suction,n_pore_size_distribution,
field_capacity)
critical_moisture = self.vanGenuchten(residual_water_content,
saturation_water_content,
alpha_air_entry_suction,
n_pore_size_distribution,
self.critical_condition_moisture)
afd=(fc_moisture-critical_moisture)*layer_depth
rni=(fc_moisture-current_humidity)*layer_depth
self.RNI.update({layer_depth:[current_humidity,critical_moisture,afd,rni]})
depths=self.RNI.keys()
min_depth = min(depths)
moisture = self.RNI[min_depth]
current_humidity=moisture[0]
critical_moisture=moisture[1]
self.water_needed=0
if current_humidity <= critical_moisture:
for key in self.RNI.keys():
moisture=self.RNI[key]
if key==min_depth:
self.water_needed+= moisture[2]
else:
self.water_needed += moisture[3]
self.ITN=self.water_need/self.irrigation_ef
return self.ITN
#model to compute soil moisture
def vanGenuchten(self,thetaR, thetaS, alpha, n, psiM):
num = thetaS - thetaR
den = (alpha * psiM) ** n
den = (den + 1) ** (1 - 1 / n)
theta = thetaR + (num / den)
return theta
def compute_irrigation_time(self):
self.t_irri=self.ITN/self.irrigation_p
return self.t_irri
if __name__ == '__main__':
irrigation_management = MatricPotential('localhost', 'root', '12345678')
date = "2016-02-01"
field_id = 1
water_needed=irrigation_management.compute_RNI(field_id,date)
print('water needed={}'.format(water_needed)) | 0.309858 | 0.136206 |
import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
_DenseToDenseSetOperationOutput = collections.namedtuple(
"DenseToDenseSetOperation",
["result_indices", "result_values", "result_shape"])
def dense_to_dense_set_operation(set1, set2, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of 2 `Tensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2: A `Tensor`. Must have the same type as `set1`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DenseToDenseSetOperation",
name, tld.op_callbacks, set1, set2, "set_operation", set_operation,
"validate_indices", validate_indices)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return dense_to_dense_set_operation_eager_fallback(
set1, set2, set_operation=set_operation,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DenseToDenseSetOperation", set1=set1, set2=set2,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DenseToDenseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
DenseToDenseSetOperation = tf_export("raw_ops.DenseToDenseSetOperation")(_ops.to_raw_op(dense_to_dense_set_operation))
def dense_to_dense_set_operation_eager_fallback(set1, set2, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2], ctx)
(set1, set2) = _inputs_T
_inputs_flat = [set1, set2]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"DenseToDenseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DenseToDenseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
_DenseToSparseSetOperationOutput = collections.namedtuple(
"DenseToSparseSetOperation",
["result_indices", "result_values", "result_shape"])
def dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of `Tensor` and `SparseTensor`.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
If `validate_indices` is `True`, this op validates the order and range of `set2`
indices.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: A `Tensor`. Must have the same type as `set1`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
max set size across `n-1` dimensions.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DenseToSparseSetOperation",
name, tld.op_callbacks, set1, set2_indices, set2_values, set2_shape,
"set_operation", set_operation, "validate_indices", validate_indices)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return dense_to_sparse_set_operation_eager_fallback(
set1, set2_indices, set2_values, set2_shape,
set_operation=set_operation, validate_indices=validate_indices,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DenseToSparseSetOperation", set1=set1, set2_indices=set2_indices,
set2_values=set2_values,
set2_shape=set2_shape,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DenseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
DenseToSparseSetOperation = tf_export("raw_ops.DenseToSparseSetOperation")(_ops.to_raw_op(dense_to_sparse_set_operation))
def dense_to_sparse_set_operation_eager_fallback(set1, set2_indices, set2_values, set2_shape, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2_values], ctx)
(set1, set2_values) = _inputs_T
set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)
set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)
_inputs_flat = [set1, set2_indices, set2_values, set2_shape]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"DenseToSparseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DenseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
def set_size(set_indices, set_values, set_shape, validate_indices=True, name=None):
r"""Number of unique elements along last dimension of input `set`.
Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
and `set_shape`. The last dimension contains values in a set, duplicates are
allowed but ignored.
If `validate_indices` is `True`, this op validates the order and range of `set`
indices.
Args:
set_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`.
set_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
1D `Tensor`, values of a `SparseTensor`.
set_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "SetSize", name,
tld.op_callbacks, set_indices, set_values, set_shape,
"validate_indices", validate_indices)
return _result
except _core._FallbackException:
try:
return set_size_eager_fallback(
set_indices, set_values, set_shape,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SetSize", set_indices=set_indices, set_values=set_values,
set_shape=set_shape, validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SetSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
SetSize = tf_export("raw_ops.SetSize")(_ops.to_raw_op(set_size))
def set_size_eager_fallback(set_indices, set_values, set_shape, validate_indices, name, ctx):
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, (set_values,) = _execute.args_to_matching_eager([set_values], ctx)
set_indices = _ops.convert_to_tensor(set_indices, _dtypes.int64)
set_shape = _ops.convert_to_tensor(set_shape, _dtypes.int64)
_inputs_flat = [set_indices, set_values, set_shape]
_attrs = ("validate_indices", validate_indices, "T", _attr_T)
_result = _execute.execute(b"SetSize", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SetSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_SparseToSparseSetOperationOutput = collections.namedtuple(
"SparseToSparseSetOperation",
["result_indices", "result_values", "result_shape"])
def sparse_to_sparse_set_operation(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of 2 `SparseTensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
order and range of `set1` and `set2` indices.
Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
If `validate_indices` is `True`, this op validates the order and range of `set1`
and `set2` indices.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set1_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set1_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
max set size across `0...n-1` dimensions.
set2_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: A `Tensor`. Must have the same type as `set1_values`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
max set size across `0...n-1` dimensions.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1_values`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "SparseToSparseSetOperation",
name, tld.op_callbacks, set1_indices, set1_values, set1_shape,
set2_indices, set2_values, set2_shape, "set_operation", set_operation,
"validate_indices", validate_indices)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return sparse_to_sparse_set_operation_eager_fallback(
set1_indices, set1_values, set1_shape, set2_indices, set2_values,
set2_shape, set_operation=set_operation,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SparseToSparseSetOperation", set1_indices=set1_indices,
set1_values=set1_values,
set1_shape=set1_shape,
set2_indices=set2_indices,
set2_values=set2_values,
set2_shape=set2_shape,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SparseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result
SparseToSparseSetOperation = tf_export("raw_ops.SparseToSparseSetOperation")(_ops.to_raw_op(sparse_to_sparse_set_operation))
def sparse_to_sparse_set_operation_eager_fallback(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1_values, set2_values], ctx)
(set1_values, set2_values) = _inputs_T
set1_indices = _ops.convert_to_tensor(set1_indices, _dtypes.int64)
set1_shape = _ops.convert_to_tensor(set1_shape, _dtypes.int64)
set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)
set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)
_inputs_flat = [set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"SparseToSparseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SparseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result | venv/Lib/site-packages/tensorflow/python/ops/gen_set_ops.py | import collections
from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export
_DenseToDenseSetOperationOutput = collections.namedtuple(
"DenseToDenseSetOperation",
["result_indices", "result_values", "result_shape"])
def dense_to_dense_set_operation(set1, set2, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of 2 `Tensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2: A `Tensor`. Must have the same type as `set1`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DenseToDenseSetOperation",
name, tld.op_callbacks, set1, set2, "set_operation", set_operation,
"validate_indices", validate_indices)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return dense_to_dense_set_operation_eager_fallback(
set1, set2, set_operation=set_operation,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DenseToDenseSetOperation", set1=set1, set2=set2,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DenseToDenseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
DenseToDenseSetOperation = tf_export("raw_ops.DenseToDenseSetOperation")(_ops.to_raw_op(dense_to_dense_set_operation))
def dense_to_dense_set_operation_eager_fallback(set1, set2, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2], ctx)
(set1, set2) = _inputs_T
_inputs_flat = [set1, set2]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"DenseToDenseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DenseToDenseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToDenseSetOperationOutput._make(_result)
return _result
_DenseToSparseSetOperationOutput = collections.namedtuple(
"DenseToSparseSetOperation",
["result_indices", "result_values", "result_shape"])
def dense_to_sparse_set_operation(set1, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of `Tensor` and `SparseTensor`.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
If `validate_indices` is `True`, this op validates the order and range of `set2`
indices.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
`Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`.
Dimension `n` contains values in a set, duplicates are allowed but ignored.
set2_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: A `Tensor`. Must have the same type as `set1`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the
max set size across `n-1` dimensions.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "DenseToSparseSetOperation",
name, tld.op_callbacks, set1, set2_indices, set2_values, set2_shape,
"set_operation", set_operation, "validate_indices", validate_indices)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return dense_to_sparse_set_operation_eager_fallback(
set1, set2_indices, set2_values, set2_shape,
set_operation=set_operation, validate_indices=validate_indices,
name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"DenseToSparseSetOperation", set1=set1, set2_indices=set2_indices,
set2_values=set2_values,
set2_shape=set2_shape,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"DenseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
DenseToSparseSetOperation = tf_export("raw_ops.DenseToSparseSetOperation")(_ops.to_raw_op(dense_to_sparse_set_operation))
def dense_to_sparse_set_operation_eager_fallback(set1, set2_indices, set2_values, set2_shape, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1, set2_values], ctx)
(set1, set2_values) = _inputs_T
set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)
set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)
_inputs_flat = [set1, set2_indices, set2_values, set2_shape]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"DenseToSparseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"DenseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _DenseToSparseSetOperationOutput._make(_result)
return _result
def set_size(set_indices, set_values, set_shape, validate_indices=True, name=None):
r"""Number of unique elements along last dimension of input `set`.
Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`,
and `set_shape`. The last dimension contains values in a set, duplicates are
allowed but ignored.
If `validate_indices` is `True`, this op validates the order and range of `set`
indices.
Args:
set_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`.
set_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
1D `Tensor`, values of a `SparseTensor`.
set_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int32`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "SetSize", name,
tld.op_callbacks, set_indices, set_values, set_shape,
"validate_indices", validate_indices)
return _result
except _core._FallbackException:
try:
return set_size_eager_fallback(
set_indices, set_values, set_shape,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SetSize", set_indices=set_indices, set_values=set_values,
set_shape=set_shape, validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SetSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
SetSize = tf_export("raw_ops.SetSize")(_ops.to_raw_op(set_size))
def set_size_eager_fallback(set_indices, set_values, set_shape, validate_indices, name, ctx):
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, (set_values,) = _execute.args_to_matching_eager([set_values], ctx)
set_indices = _ops.convert_to_tensor(set_indices, _dtypes.int64)
set_shape = _ops.convert_to_tensor(set_shape, _dtypes.int64)
_inputs_flat = [set_indices, set_values, set_shape]
_attrs = ("validate_indices", validate_indices, "T", _attr_T)
_result = _execute.execute(b"SetSize", 1, inputs=_inputs_flat, attrs=_attrs,
ctx=ctx, name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SetSize", _inputs_flat, _attrs, _result)
_result, = _result
return _result
_SparseToSparseSetOperationOutput = collections.namedtuple(
"SparseToSparseSetOperation",
["result_indices", "result_values", "result_shape"])
def sparse_to_sparse_set_operation(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape, set_operation, validate_indices=True, name=None):
r"""Applies set operation along last dimension of 2 `SparseTensor` inputs.
See SetOperationOp::SetOperationFromContext for values of `set_operation`.
If `validate_indices` is `True`, `SparseToSparseSetOperation` validates the
order and range of `set1` and `set2` indices.
Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`,
and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same
as `set2`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`,
and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same
as `set1`. Dimension `n` contains values in a set, duplicates are allowed but
ignored.
If `validate_indices` is `True`, this op validates the order and range of `set1`
and `set2` indices.
Output `result` is a `SparseTensor` represented by `result_indices`,
`result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this
has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth`
dimension contains the result of `set_operation` applied to the corresponding
`[0...n-1]` dimension of `set`.
Args:
set1_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set1_values: A `Tensor`. Must be one of the following types: `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `string`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set1_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must
be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the
max set size across `0...n-1` dimensions.
set2_indices: A `Tensor` of type `int64`.
2D `Tensor`, indices of a `SparseTensor`. Must be in row-major
order.
set2_values: A `Tensor`. Must have the same type as `set1_values`.
1D `Tensor`, values of a `SparseTensor`. Must be in row-major
order.
set2_shape: A `Tensor` of type `int64`.
1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must
be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the
max set size across `0...n-1` dimensions.
set_operation: A `string`.
validate_indices: An optional `bool`. Defaults to `True`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (result_indices, result_values, result_shape).
result_indices: A `Tensor` of type `int64`.
result_values: A `Tensor`. Has the same type as `set1_values`.
result_shape: A `Tensor` of type `int64`.
"""
_ctx = _context._context or _context.context()
tld = _ctx._thread_local_data
if tld.is_eager:
try:
_result = pywrap_tfe.TFE_Py_FastPathExecute(
_ctx._context_handle, tld.device_name, "SparseToSparseSetOperation",
name, tld.op_callbacks, set1_indices, set1_values, set1_shape,
set2_indices, set2_values, set2_shape, "set_operation", set_operation,
"validate_indices", validate_indices)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result
except _core._FallbackException:
try:
return sparse_to_sparse_set_operation_eager_fallback(
set1_indices, set1_values, set1_shape, set2_indices, set2_values,
set2_shape, set_operation=set_operation,
validate_indices=validate_indices, name=name, ctx=_ctx)
except _core._SymbolicException:
pass # Add nodes to the TensorFlow graph.
except _core._NotOkStatusException as e:
_ops.raise_from_not_ok_status(e, name)
# Add nodes to the TensorFlow graph.
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_, _, _op, _outputs = _op_def_library._apply_op_helper(
"SparseToSparseSetOperation", set1_indices=set1_indices,
set1_values=set1_values,
set1_shape=set1_shape,
set2_indices=set2_indices,
set2_values=set2_values,
set2_shape=set2_shape,
set_operation=set_operation,
validate_indices=validate_indices,
name=name)
_result = _outputs[:]
if _execute.must_record_gradient():
_attrs = ("set_operation", _op.get_attr("set_operation"),
"validate_indices", _op._get_attr_bool("validate_indices"), "T",
_op._get_attr_type("T"))
_inputs_flat = _op.inputs
_execute.record_gradient(
"SparseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result
SparseToSparseSetOperation = tf_export("raw_ops.SparseToSparseSetOperation")(_ops.to_raw_op(sparse_to_sparse_set_operation))
def sparse_to_sparse_set_operation_eager_fallback(set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape, set_operation, validate_indices, name, ctx):
set_operation = _execute.make_str(set_operation, "set_operation")
if validate_indices is None:
validate_indices = True
validate_indices = _execute.make_bool(validate_indices, "validate_indices")
_attr_T, _inputs_T = _execute.args_to_matching_eager([set1_values, set2_values], ctx)
(set1_values, set2_values) = _inputs_T
set1_indices = _ops.convert_to_tensor(set1_indices, _dtypes.int64)
set1_shape = _ops.convert_to_tensor(set1_shape, _dtypes.int64)
set2_indices = _ops.convert_to_tensor(set2_indices, _dtypes.int64)
set2_shape = _ops.convert_to_tensor(set2_shape, _dtypes.int64)
_inputs_flat = [set1_indices, set1_values, set1_shape, set2_indices, set2_values, set2_shape]
_attrs = ("set_operation", set_operation, "validate_indices",
validate_indices, "T", _attr_T)
_result = _execute.execute(b"SparseToSparseSetOperation", 3,
inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
name=name)
if _execute.must_record_gradient():
_execute.record_gradient(
"SparseToSparseSetOperation", _inputs_flat, _attrs, _result)
_result = _SparseToSparseSetOperationOutput._make(_result)
return _result | 0.88658 | 0.512632 |
import math
import posixpath
from unittest import mock
from dvc.fs.base import FileSystem
from dvc.objects.db import ObjectDB
class _CallableOrNone:
"""Helper for testing if object is callable() or None."""
def __eq__(self, other):
return other is None or callable(other)
CallableOrNone = _CallableOrNone()
@mock.patch.object(ObjectDB, "_list_hashes_traverse")
@mock.patch.object(ObjectDB, "list_hashes_exists")
def test_hashes_exist(object_exists, traverse, dvc):
odb = ObjectDB(FileSystem(), None)
# remote does not support traverse
odb.fs.CAN_TRAVERSE = False
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = set(range(1000))
odb.hashes_exist(hashes)
object_exists.assert_called_with(hashes, None, None)
traverse.assert_not_called()
odb.fs.CAN_TRAVERSE = True
# large remote, small local
object_exists.reset_mock()
traverse.reset_mock()
with mock.patch.object(
odb, "_list_hashes", return_value=list(range(2048))
):
hashes = list(range(1000))
odb.hashes_exist(hashes)
# verify that _odb_paths_with_max() short circuits
# before returning all 2048 remote hashes
max_hashes = math.ceil(
odb._max_estimation_size(hashes)
/ pow(16, odb.fs.TRAVERSE_PREFIX_LEN)
)
assert max_hashes < 2048
object_exists.assert_called_with(
frozenset(range(max_hashes, 1000)), None, None
)
traverse.assert_not_called()
# large remote, large local
object_exists.reset_mock()
traverse.reset_mock()
odb.fs._JOBS = 16
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = list(range(1000000))
odb.hashes_exist(hashes)
object_exists.assert_not_called()
traverse.assert_called_with(
256 * pow(16, odb.fs.TRAVERSE_PREFIX_LEN),
set(range(256)),
jobs=None,
name=None,
)
@mock.patch.object(ObjectDB, "_list_hashes", return_value=[])
@mock.patch.object(ObjectDB, "_path_to_hash", side_effect=lambda x: x)
def test_list_hashes_traverse(_path_to_hash, list_hashes, dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
# parallel traverse
size = 256 / odb.fs._JOBS * odb.fs.LIST_OBJECT_PAGE_SIZE
list(odb._list_hashes_traverse(size, {0}))
for i in range(1, 16):
list_hashes.assert_any_call(f"{i:0{odb.fs.TRAVERSE_PREFIX_LEN}x}")
for i in range(1, 256):
list_hashes.assert_any_call(f"{i:02x}")
# default traverse (small remote)
size -= 1
list_hashes.reset_mock()
list(odb._list_hashes_traverse(size - 1, {0}))
list_hashes.assert_called_with(None)
def test_list_hashes(dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
with mock.patch.object(
odb, "_list_paths", return_value=["12/3456", "bar"]
):
hashes = list(odb._list_hashes())
assert hashes == ["123456"]
def test_list_paths(dvc):
path = "foo"
odb = ObjectDB(FileSystem(), path)
with mock.patch.object(odb.fs, "find", return_value=[]) as walk_mock:
for _ in odb._list_paths():
pass
walk_mock.assert_called_with(path, prefix=False)
for _ in odb._list_paths(prefix="000"):
pass
walk_mock.assert_called_with(
posixpath.join(path, "00", "0"), prefix=True
) | tests/unit/remote/test_base.py | import math
import posixpath
from unittest import mock
from dvc.fs.base import FileSystem
from dvc.objects.db import ObjectDB
class _CallableOrNone:
"""Helper for testing if object is callable() or None."""
def __eq__(self, other):
return other is None or callable(other)
CallableOrNone = _CallableOrNone()
@mock.patch.object(ObjectDB, "_list_hashes_traverse")
@mock.patch.object(ObjectDB, "list_hashes_exists")
def test_hashes_exist(object_exists, traverse, dvc):
odb = ObjectDB(FileSystem(), None)
# remote does not support traverse
odb.fs.CAN_TRAVERSE = False
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = set(range(1000))
odb.hashes_exist(hashes)
object_exists.assert_called_with(hashes, None, None)
traverse.assert_not_called()
odb.fs.CAN_TRAVERSE = True
# large remote, small local
object_exists.reset_mock()
traverse.reset_mock()
with mock.patch.object(
odb, "_list_hashes", return_value=list(range(2048))
):
hashes = list(range(1000))
odb.hashes_exist(hashes)
# verify that _odb_paths_with_max() short circuits
# before returning all 2048 remote hashes
max_hashes = math.ceil(
odb._max_estimation_size(hashes)
/ pow(16, odb.fs.TRAVERSE_PREFIX_LEN)
)
assert max_hashes < 2048
object_exists.assert_called_with(
frozenset(range(max_hashes, 1000)), None, None
)
traverse.assert_not_called()
# large remote, large local
object_exists.reset_mock()
traverse.reset_mock()
odb.fs._JOBS = 16
with mock.patch.object(odb, "_list_hashes", return_value=list(range(256))):
hashes = list(range(1000000))
odb.hashes_exist(hashes)
object_exists.assert_not_called()
traverse.assert_called_with(
256 * pow(16, odb.fs.TRAVERSE_PREFIX_LEN),
set(range(256)),
jobs=None,
name=None,
)
@mock.patch.object(ObjectDB, "_list_hashes", return_value=[])
@mock.patch.object(ObjectDB, "_path_to_hash", side_effect=lambda x: x)
def test_list_hashes_traverse(_path_to_hash, list_hashes, dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
# parallel traverse
size = 256 / odb.fs._JOBS * odb.fs.LIST_OBJECT_PAGE_SIZE
list(odb._list_hashes_traverse(size, {0}))
for i in range(1, 16):
list_hashes.assert_any_call(f"{i:0{odb.fs.TRAVERSE_PREFIX_LEN}x}")
for i in range(1, 256):
list_hashes.assert_any_call(f"{i:02x}")
# default traverse (small remote)
size -= 1
list_hashes.reset_mock()
list(odb._list_hashes_traverse(size - 1, {0}))
list_hashes.assert_called_with(None)
def test_list_hashes(dvc):
odb = ObjectDB(FileSystem(), None)
odb.fs_path = "foo"
with mock.patch.object(
odb, "_list_paths", return_value=["12/3456", "bar"]
):
hashes = list(odb._list_hashes())
assert hashes == ["123456"]
def test_list_paths(dvc):
path = "foo"
odb = ObjectDB(FileSystem(), path)
with mock.patch.object(odb.fs, "find", return_value=[]) as walk_mock:
for _ in odb._list_paths():
pass
walk_mock.assert_called_with(path, prefix=False)
for _ in odb._list_paths(prefix="000"):
pass
walk_mock.assert_called_with(
posixpath.join(path, "00", "0"), prefix=True
) | 0.666714 | 0.254625 |
from io import BytesIO
from hematite.raw import messages as M
from hematite.raw.parser import (StatusLine,
HTTPVersion,
HeadersWriter,
ResponseReader,
ResponseWriter,
parse_message_traits)
from hematite.raw.messages import Complete
from hematite.raw.datastructures import Headers
DEFAULT_STATUS_CODE = 200
DEFAULT_REASON = 'OK'
DEFAULT_HTTP_VERSION = HTTPVersion(1, 1)
# Does None/None make more sense for defaults, allowing the
# instantiation of a truly blank RawResponse?
class RawResponse(object):
def __init__(self, status_code=None, reason=None, headers=None, body=None,
http_version=None, **kwargs):
status_line = kwargs.pop('status_line', None)
if status_line:
self.status_line = status_line
else:
self.status_code = status_code
self.reason = reason
self.http_version = http_version
self.headers = headers or Headers()
self.body = body
traits = parse_message_traits(self.headers)
self.chunked = kwargs.pop('chunked', traits.chunked)
self.content_length = kwargs.pop('content_length',
traits.content_length)
self.connection_close = kwargs.pop('connection_close',
traits.connection_close)
if kwargs:
raise TypeError('got unexpected kwargs: %r' % kwargs.keys())
@property
def status_line(self):
return StatusLine(version=self.http_version,
status_code=self.status_code,
reason=self.reason)
@status_line.setter
def status_line(self, val):
if isinstance(val, bytes):
val = StatusLine.from_bytes(val)
try:
self.http_version, self.status_code, self.reason = val
except:
raise TypeError('expected StatusLine or tuple, not %r' % val)
def get_writer(self):
return ResponseWriter(status_line=self.status_line,
headers=HeadersWriter(self.headers),
body=[]) # TODO: bodies
def to_bytes(self):
writer = self.get_writer()
return b''.join(part for _state, part in writer.writer if
_state != Complete.type)
@classmethod
def from_bytes(cls, bytestr):
# TODO: generify
bio = BytesIO(bytestr)
reader = ResponseReader()
state = reader.state
while True:
if state is M.Complete:
break
elif state.type == M.NeedLine.type:
line = bio.readline() # TODO: limit?
next_state = M.HaveLine(value=line)
elif state.type == M.NeedData.type:
data = bio.read(state.amount)
# TODO: can this block or return None if empty etc?
next_state = M.HaveData(value=data)
elif state.type == M.NeedPeek.type:
peeked = bio.peek(state.amount)
if not peeked:
pass # TODO: again, what happens on end of stream
next_state = M.HavePeek(amount=peeked)
else:
raise RuntimeError('Unknown state %r' % (state,))
state = reader.send(next_state)
return reader.raw_response
def __repr__(self):
cn = self.__class__.__name__
parts = ['<%s "%s %s"' % (cn, self.status_code, self.reason)]
if self.content_length:
parts.append(' content_length=%s' % self.content_length)
if self.chunked:
parts.append(' +chunked')
if self.connection_close:
parts.append(' +connection_close')
if self.headers:
parts.append('\n Headers:\n ')
_hlines = ['%s: %s' % hi for hi in self.headers.items(multi=True)]
parts.append('\n '.join(_hlines))
if self.body:
parts.append('\n Body: %r' % self.body)
parts.append(' >')
return ''.join(parts) | hematite/raw/response.py |
from io import BytesIO
from hematite.raw import messages as M
from hematite.raw.parser import (StatusLine,
HTTPVersion,
HeadersWriter,
ResponseReader,
ResponseWriter,
parse_message_traits)
from hematite.raw.messages import Complete
from hematite.raw.datastructures import Headers
DEFAULT_STATUS_CODE = 200
DEFAULT_REASON = 'OK'
DEFAULT_HTTP_VERSION = HTTPVersion(1, 1)
# Does None/None make more sense for defaults, allowing the
# instantiation of a truly blank RawResponse?
class RawResponse(object):
def __init__(self, status_code=None, reason=None, headers=None, body=None,
http_version=None, **kwargs):
status_line = kwargs.pop('status_line', None)
if status_line:
self.status_line = status_line
else:
self.status_code = status_code
self.reason = reason
self.http_version = http_version
self.headers = headers or Headers()
self.body = body
traits = parse_message_traits(self.headers)
self.chunked = kwargs.pop('chunked', traits.chunked)
self.content_length = kwargs.pop('content_length',
traits.content_length)
self.connection_close = kwargs.pop('connection_close',
traits.connection_close)
if kwargs:
raise TypeError('got unexpected kwargs: %r' % kwargs.keys())
@property
def status_line(self):
return StatusLine(version=self.http_version,
status_code=self.status_code,
reason=self.reason)
@status_line.setter
def status_line(self, val):
if isinstance(val, bytes):
val = StatusLine.from_bytes(val)
try:
self.http_version, self.status_code, self.reason = val
except:
raise TypeError('expected StatusLine or tuple, not %r' % val)
def get_writer(self):
return ResponseWriter(status_line=self.status_line,
headers=HeadersWriter(self.headers),
body=[]) # TODO: bodies
def to_bytes(self):
writer = self.get_writer()
return b''.join(part for _state, part in writer.writer if
_state != Complete.type)
@classmethod
def from_bytes(cls, bytestr):
# TODO: generify
bio = BytesIO(bytestr)
reader = ResponseReader()
state = reader.state
while True:
if state is M.Complete:
break
elif state.type == M.NeedLine.type:
line = bio.readline() # TODO: limit?
next_state = M.HaveLine(value=line)
elif state.type == M.NeedData.type:
data = bio.read(state.amount)
# TODO: can this block or return None if empty etc?
next_state = M.HaveData(value=data)
elif state.type == M.NeedPeek.type:
peeked = bio.peek(state.amount)
if not peeked:
pass # TODO: again, what happens on end of stream
next_state = M.HavePeek(amount=peeked)
else:
raise RuntimeError('Unknown state %r' % (state,))
state = reader.send(next_state)
return reader.raw_response
def __repr__(self):
cn = self.__class__.__name__
parts = ['<%s "%s %s"' % (cn, self.status_code, self.reason)]
if self.content_length:
parts.append(' content_length=%s' % self.content_length)
if self.chunked:
parts.append(' +chunked')
if self.connection_close:
parts.append(' +connection_close')
if self.headers:
parts.append('\n Headers:\n ')
_hlines = ['%s: %s' % hi for hi in self.headers.items(multi=True)]
parts.append('\n '.join(_hlines))
if self.body:
parts.append('\n Body: %r' % self.body)
parts.append(' >')
return ''.join(parts) | 0.451085 | 0.09451 |
import os, sys
import logging
import json
import uuid
import config_utils as ch
logger = logging.getLogger(__name__)
def generate_uuid(all_uuids:list):
new_uuid = str(uuid.uuid4())
while new_uuid in all_uuids:
new_uuid = str(uuid.uuid4())
all_uuids.append(new_uuid)
return all_uuids, new_uuid
def generate_entity_contents(json_data:dict) -> dict:
"""Return entity lookup table w/ generated UUIDs.
Keyword arguments:
json_data -- json object, root of config file (required).
"""
entity_lookup = json_data
# find systems
try:
systems = json_data[ch.KEY_SYSTEMS]
except KeyError:
logger.warning("No '{}' property found in config file".format(ch.KEY_SYSTEMS))
systems = []
all_uuids = []
# iterate through systems
for i, system in enumerate(systems):
# system-specific actuator/sensor count
actuator_cnt = 0
actuator_ids = []
sensor_cnt = 0
sensor_ids = []
# get system name
try:
sys_name = system[ch.KEY_NAME].replace('/', '')
except KeyError:
sys_name = ch.get_default_system_name(i+1)
entity_lookup[ch.KEY_SYSTEMS][i][ch.KEY_NAME] = sys_name
logger.debug("Found system: '{}'".format(sys_name))
# add uuid for system
all_uuids, sys_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][ch.KEY_UUID] = sys_uuid
for container_type, container_type_singular in ch.CONTAINER_TYPES.items():
# find containers (tanks or crops)
try:
containers = system[container_type]
except KeyError:
logger.warning("No '{}' property found for system '{}'".format(container_type, i+1))
containers = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type] = containers
# iterate through containers of type tank or crop in system
for j, container in enumerate(containers):
# get container name
try:
container_name = "{}".format(container[ch.KEY_NAME].replace('/', ''))
except KeyError:
container_name = ch.get_default_container_name(container_type, j+1)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_NAME] = container_name
logger.debug("Found container: '{}'".format(container_name))
# add uuid for container
all_uuids, container_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_UUID] = container_uuid
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SYSTEM] = sys_name
# find actuators
try:
actuators = container[ch.KEY_ACTUATORS]
except KeyError:
logger.warning("No '{}' property found for container '{}'".format(ch.KEY_ACTUATORS, j+1))
actuators = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS] = actuators
# iterate through actuators
for k, actuator in enumerate(actuators):
logger.debug("Found actuator #{}".format(k+1))
# get actuator type
try:
actuator_type = actuator[ch.KEY_TYPE]
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', actuator #{}".format(ch.KEY_TYPE, sys_name, container_name, k+1))
continue
# get actuator drive type
try:
actuator[ch.KEY_ACTUATOR_DRIVE_TYPE]
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', actuator #{}".format(ch.KEY_ACTUATOR_DRIVE_TYPE, sys_name, container_name, k+1))
continue
# add uuid for actuator
all_uuids, actuator_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_UUID] = actuator_uuid
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_SYSTEM] = sys_name
# add container name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_TANK_OR_CROP] = container_name
# add actuator id
try:
actuator_id = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_ACTUATOR_ID]
if actuator_id in actuator_ids:
logger.error("Repeated actuator ID '{}' found for system '{}', container '{}', actuator '{}'".format(actuator_id, sys_name, container_name, actuator_type))
except KeyError:
while actuator_cnt in actuator_ids:
actuator_cnt += 1
actuator_id = actuator_cnt
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_ACTUATOR_ID] = actuator_id
actuator_cnt += 1
actuator_ids.append(actuator_id)
# find sensors
try:
sensors = container[ch.KEY_SENSORS]
except KeyError:
logger.warning("No '{}' property found for container '{}'".format(ch.KEY_SENSORS, j+1))
sensors = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS] = sensors
# iterate through sensors in container
for k, sensor in enumerate(sensors):
logger.debug("Found sensor #{}".format(k+1))
# get sensor type
try:
sensor_type = sensor[ch.KEY_TYPE].replace('/', '')
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', sensor #{}".format(ch.KEY_TYPE, sys_name, container_name, k+1))
continue
# add uuid for sensor
all_uuids, sensor_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_UUID] = sensor_uuid
# add sensor units
try:
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSORS_UNITS] = sensor[ch.KEY_SENSORS_UNITS]
except KeyError:
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSORS_UNITS] = ch.get_sensor_units(json_data, sensor_type)
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SYSTEM] = sys_name
# add container name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_TANK_OR_CROP] = container_name
# add sensor id
try:
sensor_id = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSOR_ID]
if sensor_id in sensor_ids:
logger.error("Repeated sensor ID '{}' found for system '{}', container '{}', sensor '{}'".format(sensor_id, sys_name, container_name, sensor_type))
except KeyError:
while sensor_cnt in sensor_ids:
sensor_cnt += 1
sensor_id = sensor_cnt
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSOR_ID] = sensor_id
sensor_cnt += 1
sensor_ids.append(sensor_id)
# delete empty sensors from list (if any)
all_sensors = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS]
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS] = [sensor for sensor in all_sensors if sensor != {}]
# delete empty actuators from list (if any)
all_actuators = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS]
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS] = [actuator for actuator in all_actuators if actuator != {}]
return entity_lookup
def generate_description_file(config_file:str, output_file:str=None) -> int:
""" Generates a description json file containing uuids for all entities. Expects a config json file at /home/$CONFIG_FILE.
Keyword arguments:
config_file -- path to config json file (required).
output_file -- filename of output description json. Omits writing if None (default).
"""
json_contents = ch.get_json_file_contents(config_file)
if json_contents is None:
logger.error("Internal Config file json data is null. Exiting")
sys.exit(1)
entity_lookup = generate_entity_contents(json_contents)
if output_file is not None:
with open(output_file, 'w') as f:
f.write(json.dumps(entity_lookup, indent=2))
logger.info("Wrote description file to '{}'".format(output_file))
return 0
def _init_logging():
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(name)s [%(levelname)s] %(message)s')
s_handler.setFormatter(formatter)
logger.addHandler(s_handler)
logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
_init_logging
logger.info("Starting description file generation")
# get config file
if os.getenv('CONFIG_FILE') != '':
config_file = '/home/{}'.format(os.getenv('CONFIG_FILE'))
else:
logger.error("CONFIG_FILE environment variable is not set.")
sys.exit(1)
# get description file
if os.getenv('DESCRIPTION_FILE') != '':
output_file = '/common/{}'.format(os.getenv('DESCRIPTION_FILE'))
else:
logger.warning("DESCRIPTION_FILE environment variable is not set.")
output_file = None
# run main program
generate_description_file(config_file, output_file=output_file)
logger.info("description file generation finished successfully")
sys.exit(0) | init/config/description_gen.py | import os, sys
import logging
import json
import uuid
import config_utils as ch
logger = logging.getLogger(__name__)
def generate_uuid(all_uuids:list):
new_uuid = str(uuid.uuid4())
while new_uuid in all_uuids:
new_uuid = str(uuid.uuid4())
all_uuids.append(new_uuid)
return all_uuids, new_uuid
def generate_entity_contents(json_data:dict) -> dict:
"""Return entity lookup table w/ generated UUIDs.
Keyword arguments:
json_data -- json object, root of config file (required).
"""
entity_lookup = json_data
# find systems
try:
systems = json_data[ch.KEY_SYSTEMS]
except KeyError:
logger.warning("No '{}' property found in config file".format(ch.KEY_SYSTEMS))
systems = []
all_uuids = []
# iterate through systems
for i, system in enumerate(systems):
# system-specific actuator/sensor count
actuator_cnt = 0
actuator_ids = []
sensor_cnt = 0
sensor_ids = []
# get system name
try:
sys_name = system[ch.KEY_NAME].replace('/', '')
except KeyError:
sys_name = ch.get_default_system_name(i+1)
entity_lookup[ch.KEY_SYSTEMS][i][ch.KEY_NAME] = sys_name
logger.debug("Found system: '{}'".format(sys_name))
# add uuid for system
all_uuids, sys_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][ch.KEY_UUID] = sys_uuid
for container_type, container_type_singular in ch.CONTAINER_TYPES.items():
# find containers (tanks or crops)
try:
containers = system[container_type]
except KeyError:
logger.warning("No '{}' property found for system '{}'".format(container_type, i+1))
containers = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type] = containers
# iterate through containers of type tank or crop in system
for j, container in enumerate(containers):
# get container name
try:
container_name = "{}".format(container[ch.KEY_NAME].replace('/', ''))
except KeyError:
container_name = ch.get_default_container_name(container_type, j+1)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_NAME] = container_name
logger.debug("Found container: '{}'".format(container_name))
# add uuid for container
all_uuids, container_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_UUID] = container_uuid
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SYSTEM] = sys_name
# find actuators
try:
actuators = container[ch.KEY_ACTUATORS]
except KeyError:
logger.warning("No '{}' property found for container '{}'".format(ch.KEY_ACTUATORS, j+1))
actuators = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS] = actuators
# iterate through actuators
for k, actuator in enumerate(actuators):
logger.debug("Found actuator #{}".format(k+1))
# get actuator type
try:
actuator_type = actuator[ch.KEY_TYPE]
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', actuator #{}".format(ch.KEY_TYPE, sys_name, container_name, k+1))
continue
# get actuator drive type
try:
actuator[ch.KEY_ACTUATOR_DRIVE_TYPE]
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', actuator #{}".format(ch.KEY_ACTUATOR_DRIVE_TYPE, sys_name, container_name, k+1))
continue
# add uuid for actuator
all_uuids, actuator_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_UUID] = actuator_uuid
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_SYSTEM] = sys_name
# add container name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_TANK_OR_CROP] = container_name
# add actuator id
try:
actuator_id = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_ACTUATOR_ID]
if actuator_id in actuator_ids:
logger.error("Repeated actuator ID '{}' found for system '{}', container '{}', actuator '{}'".format(actuator_id, sys_name, container_name, actuator_type))
except KeyError:
while actuator_cnt in actuator_ids:
actuator_cnt += 1
actuator_id = actuator_cnt
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS][k][ch.KEY_ACTUATOR_ID] = actuator_id
actuator_cnt += 1
actuator_ids.append(actuator_id)
# find sensors
try:
sensors = container[ch.KEY_SENSORS]
except KeyError:
logger.warning("No '{}' property found for container '{}'".format(ch.KEY_SENSORS, j+1))
sensors = []
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS] = sensors
# iterate through sensors in container
for k, sensor in enumerate(sensors):
logger.debug("Found sensor #{}".format(k+1))
# get sensor type
try:
sensor_type = sensor[ch.KEY_TYPE].replace('/', '')
except KeyError:
logger.warning("No '{}' property found for system '{}', container '{}', sensor #{}".format(ch.KEY_TYPE, sys_name, container_name, k+1))
continue
# add uuid for sensor
all_uuids, sensor_uuid = generate_uuid(all_uuids)
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_UUID] = sensor_uuid
# add sensor units
try:
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSORS_UNITS] = sensor[ch.KEY_SENSORS_UNITS]
except KeyError:
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSORS_UNITS] = ch.get_sensor_units(json_data, sensor_type)
# add system name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SYSTEM] = sys_name
# add container name
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_TANK_OR_CROP] = container_name
# add sensor id
try:
sensor_id = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSOR_ID]
if sensor_id in sensor_ids:
logger.error("Repeated sensor ID '{}' found for system '{}', container '{}', sensor '{}'".format(sensor_id, sys_name, container_name, sensor_type))
except KeyError:
while sensor_cnt in sensor_ids:
sensor_cnt += 1
sensor_id = sensor_cnt
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS][k][ch.KEY_SENSOR_ID] = sensor_id
sensor_cnt += 1
sensor_ids.append(sensor_id)
# delete empty sensors from list (if any)
all_sensors = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS]
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_SENSORS] = [sensor for sensor in all_sensors if sensor != {}]
# delete empty actuators from list (if any)
all_actuators = entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS]
entity_lookup[ch.KEY_SYSTEMS][i][container_type][j][ch.KEY_ACTUATORS] = [actuator for actuator in all_actuators if actuator != {}]
return entity_lookup
def generate_description_file(config_file:str, output_file:str=None) -> int:
""" Generates a description json file containing uuids for all entities. Expects a config json file at /home/$CONFIG_FILE.
Keyword arguments:
config_file -- path to config json file (required).
output_file -- filename of output description json. Omits writing if None (default).
"""
json_contents = ch.get_json_file_contents(config_file)
if json_contents is None:
logger.error("Internal Config file json data is null. Exiting")
sys.exit(1)
entity_lookup = generate_entity_contents(json_contents)
if output_file is not None:
with open(output_file, 'w') as f:
f.write(json.dumps(entity_lookup, indent=2))
logger.info("Wrote description file to '{}'".format(output_file))
return 0
def _init_logging():
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] %(name)s [%(levelname)s] %(message)s')
s_handler.setFormatter(formatter)
logger.addHandler(s_handler)
logger.setLevel(logging.DEBUG)
if __name__ == "__main__":
_init_logging
logger.info("Starting description file generation")
# get config file
if os.getenv('CONFIG_FILE') != '':
config_file = '/home/{}'.format(os.getenv('CONFIG_FILE'))
else:
logger.error("CONFIG_FILE environment variable is not set.")
sys.exit(1)
# get description file
if os.getenv('DESCRIPTION_FILE') != '':
output_file = '/common/{}'.format(os.getenv('DESCRIPTION_FILE'))
else:
logger.warning("DESCRIPTION_FILE environment variable is not set.")
output_file = None
# run main program
generate_description_file(config_file, output_file=output_file)
logger.info("description file generation finished successfully")
sys.exit(0) | 0.139426 | 0.040541 |
import re
import urllib
import urllib2
from lib import requests
from htmlentitydefs import name2codepoint
from util.tools import remove_spaces
import HTMLParser
h = HTMLParser.HTMLParser()
paste_url = 'http://paste.ml'
short_ignored = ['bit.ly', 'is.gd', 'goo.gl', 'links.ml']
exec_uri = 'http://eval.appspot.com/eval'
def http(method, rdata='all', uri=None, timeout=7, params=None, data=None, headers=None, **kwargs):
if not method:
raise 'No method specified'
if not uri:
raise 'Invalid URI supplied'
if not headers:
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, '
'like Gecko) Chrome/22.0.1229.79 Safari/537.4',
'Cache-Control': 'max-age=0',
'Accept-Encoding': 'gzip,deflate,sdch',
'X-Service': 'Code Python IRC Bot'
}
if method == 'get':
response = requests.get(uri, timeout=timeout, params=params, headers=headers, **kwargs)
elif method == 'post':
response = requests.post(uri, timeout=timeout, data=data, headers=headers, **kwargs)
elif method == 'head':
response = requests.head(uri, timeout=timeout, data=data, headers=headers, **kwargs)
else:
raise 'Method not supported'
if rdata == 'all':
return response
elif rdata == 'json':
return response.json()
elif rdata == 'text':
return response.text
elif rdata == 'headers':
return response.headers
else:
raise 'Return data not supported'
def post(uri, **args):
return http(method='post', rdata='all', uri=uri, **args)
def get(uri, **args):
return http(method='get', rdata='all', uri=uri, **args)
def json(uri, **args):
return http(method='get', rdata='json', uri=uri, **args)
def text(uri, **args):
return http(method='get', rdata='text', uri=uri, **args)
def headers(uri, **args):
return http(method='get', rdata='headers', uri=uri, **args)
def head(uri, **args):
return http(method='head', rdata='all', uri=uri, **args)
def quote(string):
return urllib2.quote(string)
def urlencode(data):
return urllib.urlencode(data)
r_entity = re.compile(r'&([^;\s]+);')
def findin(regex, string, least=1):
tmp = list(re.findall('(?m)' + regex, string))
if len(tmp) < 1:
raise Exception('No results found')
return tmp
def entity(match):
value = match.group(1).lower()
if value.startswith('#x'):
return unichr(int(value[2:], 16))
elif value.startswith('#'):
return unichr(int(value[1:]))
elif value in name2codepoint:
return unichr(name2codepoint[value])
return '[' + value + ']'
def escape(string):
return h.unescape(string)
def striptags(string):
return re.compile(r'(?ims)<[^>]+>').sub('', string).strip()
def clean(string):
string = string.replace('\r', '').replace('\n', '')
return remove_spaces(escape(string)).strip()
def decode(html):
return r_entity.sub(entity, html)
def uncharset(string):
try:
string = unicode(string, 'utf-8')
except:
pass
try:
string = string.encode('utf8', 'ignore')
except:
pass
try:
string = unicode(string, 'utf-8')
except:
pass
return string
r_string = re.compile(r'("(\\.|[^"\\])*")')
r_json = re.compile(r'^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]+$')
env = {'__builtins__': None, 'null': None, 'true': True, 'false': False}
def haste(string, extension='txt'):
data = post(paste_url + '/documents', data=string).json()
return '{uri}/{key}.{ext}'.format(uri=paste_url, key=data['key'], ext=extension)
def shorten(url):
try:
for bad in short_ignored:
if bad in url.lower():
return url
data = post('http://links.ml/add', data={'url': url}).json()
if not data['success']:
return url
return data['url']
except:
return url
def exec_py(data):
attempts = 0
while True:
if attempts == 2:
return "Failed to execute code."
attempts += 1
try:
data = text(exec_uri, params={"statement": data}).strip('\n')
if len(data) == 0:
continue
break
except:
continue
return data | util/web.py | import re
import urllib
import urllib2
from lib import requests
from htmlentitydefs import name2codepoint
from util.tools import remove_spaces
import HTMLParser
h = HTMLParser.HTMLParser()
paste_url = 'http://paste.ml'
short_ignored = ['bit.ly', 'is.gd', 'goo.gl', 'links.ml']
exec_uri = 'http://eval.appspot.com/eval'
def http(method, rdata='all', uri=None, timeout=7, params=None, data=None, headers=None, **kwargs):
if not method:
raise 'No method specified'
if not uri:
raise 'Invalid URI supplied'
if not headers:
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, '
'like Gecko) Chrome/22.0.1229.79 Safari/537.4',
'Cache-Control': 'max-age=0',
'Accept-Encoding': 'gzip,deflate,sdch',
'X-Service': 'Code Python IRC Bot'
}
if method == 'get':
response = requests.get(uri, timeout=timeout, params=params, headers=headers, **kwargs)
elif method == 'post':
response = requests.post(uri, timeout=timeout, data=data, headers=headers, **kwargs)
elif method == 'head':
response = requests.head(uri, timeout=timeout, data=data, headers=headers, **kwargs)
else:
raise 'Method not supported'
if rdata == 'all':
return response
elif rdata == 'json':
return response.json()
elif rdata == 'text':
return response.text
elif rdata == 'headers':
return response.headers
else:
raise 'Return data not supported'
def post(uri, **args):
return http(method='post', rdata='all', uri=uri, **args)
def get(uri, **args):
return http(method='get', rdata='all', uri=uri, **args)
def json(uri, **args):
return http(method='get', rdata='json', uri=uri, **args)
def text(uri, **args):
return http(method='get', rdata='text', uri=uri, **args)
def headers(uri, **args):
return http(method='get', rdata='headers', uri=uri, **args)
def head(uri, **args):
return http(method='head', rdata='all', uri=uri, **args)
def quote(string):
return urllib2.quote(string)
def urlencode(data):
return urllib.urlencode(data)
r_entity = re.compile(r'&([^;\s]+);')
def findin(regex, string, least=1):
tmp = list(re.findall('(?m)' + regex, string))
if len(tmp) < 1:
raise Exception('No results found')
return tmp
def entity(match):
value = match.group(1).lower()
if value.startswith('#x'):
return unichr(int(value[2:], 16))
elif value.startswith('#'):
return unichr(int(value[1:]))
elif value in name2codepoint:
return unichr(name2codepoint[value])
return '[' + value + ']'
def escape(string):
return h.unescape(string)
def striptags(string):
return re.compile(r'(?ims)<[^>]+>').sub('', string).strip()
def clean(string):
string = string.replace('\r', '').replace('\n', '')
return remove_spaces(escape(string)).strip()
def decode(html):
return r_entity.sub(entity, html)
def uncharset(string):
try:
string = unicode(string, 'utf-8')
except:
pass
try:
string = string.encode('utf8', 'ignore')
except:
pass
try:
string = unicode(string, 'utf-8')
except:
pass
return string
r_string = re.compile(r'("(\\.|[^"\\])*")')
r_json = re.compile(r'^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]+$')
env = {'__builtins__': None, 'null': None, 'true': True, 'false': False}
def haste(string, extension='txt'):
data = post(paste_url + '/documents', data=string).json()
return '{uri}/{key}.{ext}'.format(uri=paste_url, key=data['key'], ext=extension)
def shorten(url):
try:
for bad in short_ignored:
if bad in url.lower():
return url
data = post('http://links.ml/add', data={'url': url}).json()
if not data['success']:
return url
return data['url']
except:
return url
def exec_py(data):
attempts = 0
while True:
if attempts == 2:
return "Failed to execute code."
attempts += 1
try:
data = text(exec_uri, params={"statement": data}).strip('\n')
if len(data) == 0:
continue
break
except:
continue
return data | 0.291082 | 0.135518 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
@testing_utils.skipUnlessGPU
class TestBertModel(unittest.TestCase):
"""
Test of Bert biencoder and crossencoder.
Checks that Both Biencoder and CrossEncoder of Bert can be trained
for about 100 samples on convai2
"""
def test_biencoder(self):
stdout, valid, test = testing_utils.train_model(
dict(
task='convai2',
model='bert_ranker/bi_encoder_ranker',
num_epochs=0.1,
batchsize=8,
learningrate=3e-4,
text_truncate=32,
validation_max_exs=20,
short_final_eval=True,
)
)
# can't conclude much from the biencoder after that little iterations.
# this test will just make sure it hasn't crashed and the accuracy isn't
# too high
self.assertLessEqual(
test['accuracy'],
0.5,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
def test_crossencoder(self):
stdout, valid, test = testing_utils.train_model(
dict(
task='convai2',
model='bert_ranker/cross_encoder_ranker',
num_epochs=0.002,
batchsize=1,
candidates="inline",
type_optimization="all_encoder_layers",
warmup_updates=100,
text_truncate=32,
label_truncate=32,
validation_max_exs=20,
short_final_eval=True,
)
)
# The cross encoder reaches an interesting state MUCH faster
# accuracy should be present and somewhere between 0.2 and 0.8
# (large interval so that it doesn't flake.)
self.assertGreaterEqual(
test['accuracy'],
0.03,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
self.assertLessEqual(
test['accuracy'],
0.8,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
if __name__ == '__main__':
unittest.main() | tests/nightly/gpu/test_bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import parlai.utils.testing as testing_utils
@testing_utils.skipUnlessGPU
class TestBertModel(unittest.TestCase):
"""
Test of Bert biencoder and crossencoder.
Checks that Both Biencoder and CrossEncoder of Bert can be trained
for about 100 samples on convai2
"""
def test_biencoder(self):
stdout, valid, test = testing_utils.train_model(
dict(
task='convai2',
model='bert_ranker/bi_encoder_ranker',
num_epochs=0.1,
batchsize=8,
learningrate=3e-4,
text_truncate=32,
validation_max_exs=20,
short_final_eval=True,
)
)
# can't conclude much from the biencoder after that little iterations.
# this test will just make sure it hasn't crashed and the accuracy isn't
# too high
self.assertLessEqual(
test['accuracy'],
0.5,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
def test_crossencoder(self):
stdout, valid, test = testing_utils.train_model(
dict(
task='convai2',
model='bert_ranker/cross_encoder_ranker',
num_epochs=0.002,
batchsize=1,
candidates="inline",
type_optimization="all_encoder_layers",
warmup_updates=100,
text_truncate=32,
label_truncate=32,
validation_max_exs=20,
short_final_eval=True,
)
)
# The cross encoder reaches an interesting state MUCH faster
# accuracy should be present and somewhere between 0.2 and 0.8
# (large interval so that it doesn't flake.)
self.assertGreaterEqual(
test['accuracy'],
0.03,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
self.assertLessEqual(
test['accuracy'],
0.8,
'test accuracy = {}\nLOG:\n{}'.format(test['accuracy'], stdout),
)
if __name__ == '__main__':
unittest.main() | 0.863765 | 0.492737 |
_sample_uploads_endpoints = [
[
"GetSampleV3",
"GET",
"/samples/entities/samples/v3",
"Retrieves the file associated with the given ID (SHA256)",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
},
{
"type": "string",
"default": False,
"description": "Flag whether the sample should be zipped and password protected with pass='<PASSWORD>'",
"name": "password_protected",
"in": "query"
}
]
],
[
"UploadSampleV3",
"POST",
"/samples/entities/samples/v3",
"Upload a file for further cloud analysis. After uploading, call the specific analysis API endpoint.",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"description": "Content of the uploaded sample in binary format. For example, use `--data-binary "
"@$FILE_PATH` when using cURL. Max file size: 100 MB.\n\nAccepted file formats:\n\n- Portable "
"executables: `.exe`, `.scr`, `.pif`, `.dll`, `.com`, `.cpl`, etc.\n- Office documents: `.doc`, "
"`.docx`, `.ppt`, `.pps`, `.pptx`, `.ppsx`, `.xls`, `.xlsx`, `.rtf`, `.pub`\n- PDF\n- APK\n- "
"Executable JAR\n- Windows script component: `.sct`\n- Windows shortcut: `.lnk`\n- Windows help: "
"`.chm`\n- HTML application: `.hta`\n- Windows script file: `.wsf`\n- Javascript: `.js`\n- Visual "
"Basic: `.vbs`, `.vbe`\n- Shockwave Flash: `.swf`\n- Perl: `.pl`\n- Powershell: `.ps1`, `.psd1`, "
"`.psm1`\n- Scalable vector graphics: `.svg`\n- Python: `.py`\n- Linux ELF executables\n- Email "
"files: MIME RFC 822 `.eml`, Outlook `.msg`.",
"name": "body",
"in": "body",
"required": True
},
{
"type": "file",
"description": "The binary file.",
"name": "upfile",
"in": "formData",
"required": True
},
{
"type": "string",
"description": "Name of the file.",
"name": "file_name",
"in": "query",
"required": True
},
{
"type": "string",
"description": "A descriptive comment to identify the file for other users.",
"name": "comment",
"in": "query"
},
{
"type": "boolean",
"default": True,
"description": "Defines visibility of this file in Falcon MalQuery, either via the API or the "
"Falcon console.\n\n- `true`: File is only shown to users within your customer account\n- `false`: "
"File can be seen by other CrowdStrike customers \n\nDefault: `true`.",
"name": "is_confidential",
"in": "query"
}
]
],
[
"DeleteSampleV3",
"DELETE",
"/samples/entities/samples/v3",
"Removes a sample, including file, meta and submissions from the collection",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
}
]
]
] | src/falconpy/_endpoint/_sample_uploads.py | _sample_uploads_endpoints = [
[
"GetSampleV3",
"GET",
"/samples/entities/samples/v3",
"Retrieves the file associated with the given ID (SHA256)",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
},
{
"type": "string",
"default": False,
"description": "Flag whether the sample should be zipped and password protected with pass='<PASSWORD>'",
"name": "password_protected",
"in": "query"
}
]
],
[
"UploadSampleV3",
"POST",
"/samples/entities/samples/v3",
"Upload a file for further cloud analysis. After uploading, call the specific analysis API endpoint.",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"description": "Content of the uploaded sample in binary format. For example, use `--data-binary "
"@$FILE_PATH` when using cURL. Max file size: 100 MB.\n\nAccepted file formats:\n\n- Portable "
"executables: `.exe`, `.scr`, `.pif`, `.dll`, `.com`, `.cpl`, etc.\n- Office documents: `.doc`, "
"`.docx`, `.ppt`, `.pps`, `.pptx`, `.ppsx`, `.xls`, `.xlsx`, `.rtf`, `.pub`\n- PDF\n- APK\n- "
"Executable JAR\n- Windows script component: `.sct`\n- Windows shortcut: `.lnk`\n- Windows help: "
"`.chm`\n- HTML application: `.hta`\n- Windows script file: `.wsf`\n- Javascript: `.js`\n- Visual "
"Basic: `.vbs`, `.vbe`\n- Shockwave Flash: `.swf`\n- Perl: `.pl`\n- Powershell: `.ps1`, `.psd1`, "
"`.psm1`\n- Scalable vector graphics: `.svg`\n- Python: `.py`\n- Linux ELF executables\n- Email "
"files: MIME RFC 822 `.eml`, Outlook `.msg`.",
"name": "body",
"in": "body",
"required": True
},
{
"type": "file",
"description": "The binary file.",
"name": "upfile",
"in": "formData",
"required": True
},
{
"type": "string",
"description": "Name of the file.",
"name": "file_name",
"in": "query",
"required": True
},
{
"type": "string",
"description": "A descriptive comment to identify the file for other users.",
"name": "comment",
"in": "query"
},
{
"type": "boolean",
"default": True,
"description": "Defines visibility of this file in Falcon MalQuery, either via the API or the "
"Falcon console.\n\n- `true`: File is only shown to users within your customer account\n- `false`: "
"File can be seen by other CrowdStrike customers \n\nDefault: `true`.",
"name": "is_confidential",
"in": "query"
}
]
],
[
"DeleteSampleV3",
"DELETE",
"/samples/entities/samples/v3",
"Removes a sample, including file, meta and submissions from the collection",
"sample_uploads",
[
{
"type": "string",
"description": "User UUID",
"name": "X-CS-USERUUID",
"in": "header"
},
{
"type": "string",
"description": "The file SHA256.",
"name": "ids",
"in": "query",
"required": True
}
]
]
] | 0.785473 | 0.396915 |
from ._application_gateways_operations import ApplicationGatewaysOperations
from ._express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from ._express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from ._express_route_circuits_operations import ExpressRouteCircuitsOperations
from ._express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from ._load_balancers_operations import LoadBalancersOperations
from ._network_interfaces_operations import NetworkInterfacesOperations
from ._network_security_groups_operations import NetworkSecurityGroupsOperations
from ._security_rules_operations import SecurityRulesOperations
from ._public_ip_addresses_operations import PublicIPAddressesOperations
from ._route_tables_operations import RouteTablesOperations
from ._routes_operations import RoutesOperations
from ._usages_operations import UsagesOperations
from ._virtual_networks_operations import VirtualNetworksOperations
from ._subnets_operations import SubnetsOperations
from ._virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from ._virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from ._local_network_gateways_operations import LocalNetworkGatewaysOperations
from ._network_management_client_operations import NetworkManagementClientOperationsMixin
__all__ = [
'ApplicationGatewaysOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'NetworkInterfacesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'PublicIPAddressesOperations',
'RouteTablesOperations',
'RoutesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'NetworkManagementClientOperationsMixin',
] | sdk/network/azure-mgmt-network/azure/mgmt/network/v2015_06_15/operations/__init__.py |
from ._application_gateways_operations import ApplicationGatewaysOperations
from ._express_route_circuit_authorizations_operations import ExpressRouteCircuitAuthorizationsOperations
from ._express_route_circuit_peerings_operations import ExpressRouteCircuitPeeringsOperations
from ._express_route_circuits_operations import ExpressRouteCircuitsOperations
from ._express_route_service_providers_operations import ExpressRouteServiceProvidersOperations
from ._load_balancers_operations import LoadBalancersOperations
from ._network_interfaces_operations import NetworkInterfacesOperations
from ._network_security_groups_operations import NetworkSecurityGroupsOperations
from ._security_rules_operations import SecurityRulesOperations
from ._public_ip_addresses_operations import PublicIPAddressesOperations
from ._route_tables_operations import RouteTablesOperations
from ._routes_operations import RoutesOperations
from ._usages_operations import UsagesOperations
from ._virtual_networks_operations import VirtualNetworksOperations
from ._subnets_operations import SubnetsOperations
from ._virtual_network_gateways_operations import VirtualNetworkGatewaysOperations
from ._virtual_network_gateway_connections_operations import VirtualNetworkGatewayConnectionsOperations
from ._local_network_gateways_operations import LocalNetworkGatewaysOperations
from ._network_management_client_operations import NetworkManagementClientOperationsMixin
__all__ = [
'ApplicationGatewaysOperations',
'ExpressRouteCircuitAuthorizationsOperations',
'ExpressRouteCircuitPeeringsOperations',
'ExpressRouteCircuitsOperations',
'ExpressRouteServiceProvidersOperations',
'LoadBalancersOperations',
'NetworkInterfacesOperations',
'NetworkSecurityGroupsOperations',
'SecurityRulesOperations',
'PublicIPAddressesOperations',
'RouteTablesOperations',
'RoutesOperations',
'UsagesOperations',
'VirtualNetworksOperations',
'SubnetsOperations',
'VirtualNetworkGatewaysOperations',
'VirtualNetworkGatewayConnectionsOperations',
'LocalNetworkGatewaysOperations',
'NetworkManagementClientOperationsMixin',
] | 0.542621 | 0.064036 |
import random
random.seed(42)
import numpy as np
np.random.seed(42)
import cvxpy as cp
from numpy.random import multivariate_normal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from copy import deepcopy
from ceml.sklearn import generate_counterfactual
from gnb_utils import compute_cf_gradient, compare_cf_gradients
from utils import plot_feature_diff
def sample_from_classdist(class_means, cov, n_samples_per_class=200):
X = []
y = []
for i in range(class_means.shape[0]):
y += [i for _ in range(n_samples_per_class)]
for _ in range(n_samples_per_class):
X.append(multivariate_normal(mean=class_means[i,:], cov=cov))
X = np.array(X)
y = np.array(y)
return X, y
def generate_drifting_dataset():
X0, y0 = sample_from_classdist(np.array([[0., 0.], [5., 0.]]), np.eye(2,2))
X1, y1 = sample_from_classdist(np.array([[5., 5.]]), np.eye(2,2))
return (X0, y0), (X1, y1)
if __name__ == "__main__":
# Create data set:
# Old data: Two blobs on the same axis - can be separated with a threshold on the first feature
# New data: Another blob of the first class located approx. above the blob of the second class from the old data set => Second feature must be used when separating both classes!
batches = generate_drifting_dataset()
X0, y0 = batches[0]
X1, y1 = batches[1]
Xcftest, ycftest = sample_from_classdist(np.array([[0., 5.]]), np.eye(2,2))
print(X0.shape, X1.shape, Xcftest.shape)
# Test data for computing counterfactual explanations:
# Blob above the blob of the first class from the old data set => Classification should still work fine since the second feature is not important for the first data set and the first feature remains important even after adapting to the new data set
# Fit model on the first batch of data
model = GaussianNB()
model.partial_fit(X0, y0, classes=[0, 1])
print("Accuracy: {0}".format(accuracy_score(y0, model.predict(X0))))
# Adapt model to new second batch of data
model_old = deepcopy(model)
print("Accuracy on new data before adaptation: {0}".format(accuracy_score(y1, model.predict(X1))))
model.partial_fit(X1, y1)
print("Accuracy on new data after adaptation: {0}".format(accuracy_score(y1, model.predict(X1))))
print("Accuracy after adaptation on old data: {0}".format(accuracy_score(y0, model.predict(X0))))
# Find interesting samples
cftest_scores = []
for i in range(Xcftest.shape[0]): # Only consider samples from a hold out set of samples
x = Xcftest[i,:]
y_target = 0 if ycftest[i] == 1 else 1
gradA = compute_cf_gradient(model_old, x, y_target)
gradB = compute_cf_gradient(model, x, y_target)
score = compare_cf_gradients(gradA, gradB)
cftest_scores.append(score)
cftest_scores_sorting = np.argsort(cftest_scores)
# Compute counterfactuals under old and new model
print("Accuracy on test data - old model: {0}".format(accuracy_score(ycftest, model_old.predict(Xcftest))))
print("Accuracy on test data: {0}".format(accuracy_score(ycftest, model.predict(Xcftest))))
cf_new = []
cf_old = []
for i in range(Xcftest.shape[0]):
x = Xcftest[i,:]
y = ycftest[i]
y_target = 0 if y == 1 else 1
if model_old.predict([x]) != y or model.predict([x]) != y: # Check if both models classifiy the sample correctly!
print("Skipping misslcassified sample!")
continue
x_cf, _, delta_cf = generate_counterfactual(model_old, x, y_target=y_target, return_as_dict=False, optimizer="mp", optimizer_args={"solver": cp.MOSEK})
cf_old.append((x, x_cf, y_target, delta_cf))
x_cf, _, delta_cf = generate_counterfactual(model, x, y_target=y_target, return_as_dict=False, optimizer="mp", optimizer_args={"solver": cp.MOSEK})
cf_new.append((x, x_cf, y_target, delta_cf))
# Compare counterfactuals
feature_diff = []
for cf0, cf1 in zip(cf_old, cf_new):
diff = cf0[3] - cf1[3]
feature_diff.append(diff)
feature_diff = np.array(feature_diff)
plot_feature_diff(feature_diff, file_path_out="exp_results/gaussianblobs_notrelevant.pdf")
print(f"Mean difference: {np.mean(feature_diff,axis=0)}, Variance: {np.var(feature_diff, axis=0)}")
# => Interpretation/Insight: The second feature becomes more important after adapting to the new data set! This is consistent with the ground truth :) | Code/model_feature_drift.py | import random
random.seed(42)
import numpy as np
np.random.seed(42)
import cvxpy as cp
from numpy.random import multivariate_normal
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from copy import deepcopy
from ceml.sklearn import generate_counterfactual
from gnb_utils import compute_cf_gradient, compare_cf_gradients
from utils import plot_feature_diff
def sample_from_classdist(class_means, cov, n_samples_per_class=200):
X = []
y = []
for i in range(class_means.shape[0]):
y += [i for _ in range(n_samples_per_class)]
for _ in range(n_samples_per_class):
X.append(multivariate_normal(mean=class_means[i,:], cov=cov))
X = np.array(X)
y = np.array(y)
return X, y
def generate_drifting_dataset():
X0, y0 = sample_from_classdist(np.array([[0., 0.], [5., 0.]]), np.eye(2,2))
X1, y1 = sample_from_classdist(np.array([[5., 5.]]), np.eye(2,2))
return (X0, y0), (X1, y1)
if __name__ == "__main__":
# Create data set:
# Old data: Two blobs on the same axis - can be separated with a threshold on the first feature
# New data: Another blob of the first class located approx. above the blob of the second class from the old data set => Second feature must be used when separating both classes!
batches = generate_drifting_dataset()
X0, y0 = batches[0]
X1, y1 = batches[1]
Xcftest, ycftest = sample_from_classdist(np.array([[0., 5.]]), np.eye(2,2))
print(X0.shape, X1.shape, Xcftest.shape)
# Test data for computing counterfactual explanations:
# Blob above the blob of the first class from the old data set => Classification should still work fine since the second feature is not important for the first data set and the first feature remains important even after adapting to the new data set
# Fit model on the first batch of data
model = GaussianNB()
model.partial_fit(X0, y0, classes=[0, 1])
print("Accuracy: {0}".format(accuracy_score(y0, model.predict(X0))))
# Adapt model to new second batch of data
model_old = deepcopy(model)
print("Accuracy on new data before adaptation: {0}".format(accuracy_score(y1, model.predict(X1))))
model.partial_fit(X1, y1)
print("Accuracy on new data after adaptation: {0}".format(accuracy_score(y1, model.predict(X1))))
print("Accuracy after adaptation on old data: {0}".format(accuracy_score(y0, model.predict(X0))))
# Find interesting samples
cftest_scores = []
for i in range(Xcftest.shape[0]): # Only consider samples from a hold out set of samples
x = Xcftest[i,:]
y_target = 0 if ycftest[i] == 1 else 1
gradA = compute_cf_gradient(model_old, x, y_target)
gradB = compute_cf_gradient(model, x, y_target)
score = compare_cf_gradients(gradA, gradB)
cftest_scores.append(score)
cftest_scores_sorting = np.argsort(cftest_scores)
# Compute counterfactuals under old and new model
print("Accuracy on test data - old model: {0}".format(accuracy_score(ycftest, model_old.predict(Xcftest))))
print("Accuracy on test data: {0}".format(accuracy_score(ycftest, model.predict(Xcftest))))
cf_new = []
cf_old = []
for i in range(Xcftest.shape[0]):
x = Xcftest[i,:]
y = ycftest[i]
y_target = 0 if y == 1 else 1
if model_old.predict([x]) != y or model.predict([x]) != y: # Check if both models classifiy the sample correctly!
print("Skipping misslcassified sample!")
continue
x_cf, _, delta_cf = generate_counterfactual(model_old, x, y_target=y_target, return_as_dict=False, optimizer="mp", optimizer_args={"solver": cp.MOSEK})
cf_old.append((x, x_cf, y_target, delta_cf))
x_cf, _, delta_cf = generate_counterfactual(model, x, y_target=y_target, return_as_dict=False, optimizer="mp", optimizer_args={"solver": cp.MOSEK})
cf_new.append((x, x_cf, y_target, delta_cf))
# Compare counterfactuals
feature_diff = []
for cf0, cf1 in zip(cf_old, cf_new):
diff = cf0[3] - cf1[3]
feature_diff.append(diff)
feature_diff = np.array(feature_diff)
plot_feature_diff(feature_diff, file_path_out="exp_results/gaussianblobs_notrelevant.pdf")
print(f"Mean difference: {np.mean(feature_diff,axis=0)}, Variance: {np.var(feature_diff, axis=0)}")
# => Interpretation/Insight: The second feature becomes more important after adapting to the new data set! This is consistent with the ground truth :) | 0.737914 | 0.665492 |
from __future__ import print_function
from collections import OrderedDict
from knack.util import CLIError
from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date
def transform_extension_search_results_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_extension_search_result_row(item))
return table_output
def _transform_extension_search_result_row(row):
table_row = OrderedDict()
table_row['Publisher Id'] = row['publisher']['publisherName']
table_row['Extension Id'] = row['extensionName']
table_row['Name'] = row['displayName']
return table_row
def transform_extension_table_output(result):
table_output = [_transform_extension_row(result)]
return table_output
def transform_extensions_table_output(result):
table_output = []
for item in sorted(result, key=_get_extension_key):
table_output.append(_transform_extension_row(item))
return table_output
def _transform_extension_row(row):
table_row = OrderedDict()
table_row['Publisher Id'] = trim_for_display(row['publisherId'], 10)
table_row['Extension Id'] = trim_for_display(row['extensionId'], 20)
table_row['Name'] = trim_for_display(row['extensionName'], 20)
table_row['Version '] = trim_for_display(row['version'], 20)
table_row['Last Updated '] = date_time_to_only_date(row['lastPublished'])
table_row['States'] = trim_for_display(row['installState']['flags'], 20)
table_row['Flags'] = trim_for_display(row['flags'], 20)
return table_row
def transform_projects_table_output(result):
table_output = []
for item in sorted(result, key=_get_project_key):
table_output.append(_transform_project_row(item))
return table_output
def transform_project_table_output(result):
table_output = [_transform_project_row(result)]
return table_output
def _transform_project_row(row):
from .project import (PROCESS_TEMPLATE_CAPABILITY_NAME,
VERSION_CONTROL_CAPABILITY_NAME,
VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME)
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Visibility'] = row['visibility'].capitalize()
if 'capabilities' in row:
capabilities = row['capabilities']
if PROCESS_TEMPLATE_CAPABILITY_NAME in capabilities:
process_capabilities = capabilities[PROCESS_TEMPLATE_CAPABILITY_NAME]
if 'templateName' in process_capabilities:
table_row['Process'] = process_capabilities['templateName']
if VERSION_CONTROL_CAPABILITY_NAME in capabilities:
version_capabilities = capabilities[VERSION_CONTROL_CAPABILITY_NAME]
if VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME in version_capabilities:
table_row['Source Control'] = version_capabilities[VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME]
return table_row
def transform_service_endpoints_table_output(result):
table_output = []
for item in sorted(result, key=_get_service_endpoint_key):
table_output.append(_transform_service_endpoint_row(item))
return table_output
def _transform_service_endpoint_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Type'] = row['type']
table_row['Is Ready'] = row['isReady']
table_row['Created By'] = row['createdBy']['displayName']
return table_row
def transform_groups_table_output(result):
table_output = []
if result['continuationToken'] is not None:
print('Showing only 500 groups. ' +
'To list next set of groups use this token as --continuation-token argument and run the command again.' +
' TOKEN:', result['continuationToken'])
for item in result['graphGroups']:
table_output.append(_transform_group_row(item))
return table_output
def transform_group_table_output(result):
table_output = [_transform_group_show_table_output(result)]
return table_output
def _transform_group_show_table_output(row):
table_row = OrderedDict()
table_row['Name'] = row['principalName']
table_row['Description'] = row['description']
return table_row
def _transform_group_row(row):
table_row = OrderedDict()
table_row['Name'] = row['principalName']
table_row['Descriptor'] = row['descriptor']
return table_row
def transform_memberships_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_membership_row(result[item]))
return table_output
def transform_membership_table_output(result):
table_output = []
for item in result:
table_row = OrderedDict()
row = result[item]
if row['subjectKind'] == 'user':
table_row['Name'] = row['displayName']
else:
table_row['Name'] = row['principalName']
table_row['Type'] = row['subjectKind']
table_row['Email'] = row['mailAddress']
table_output.append(table_row)
return table_output
def _transform_membership_row(row):
table_row = OrderedDict()
if row['subjectKind'] == 'user':
table_row['Name'] = row['displayName']
else:
table_row['Name'] = row['principalName']
table_row['Type'] = row['subjectKind']
table_row['Email'] = row['mailAddress']
table_row['Descriptor'] = row['descriptor']
return table_row
def transform_namespaces_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_namespace_row(item))
return table_output
def _transform_namespace_row(row):
table_row = OrderedDict()
table_row['Id'] = row['namespaceId']
table_row['Name'] = row['name']
return table_row
def transform_namespace_table_output(result):
table_output = []
for item in result[0]['actions']:
table_output.append(_transform_namespace_details_row(item))
return table_output
def _transform_namespace_details_row(row):
table_row = OrderedDict()
table_row['Name'] = row['name']
table_row['Permission Description'] = row['displayName']
table_row['Permission Bit'] = row['bit']
return table_row
def transform_acl_output(result):
table_output = []
for item in result:
table_output.append(_transform_acl_details_row(item))
return table_output
def _transform_acl_details_row(row):
if len(row['acesDictionary']) > 1:
raise CLIError('More than one entry found in Aces dictionary for this user/group.')
table_row = OrderedDict()
table_row['token'] = row['token']
ace = list(row['acesDictionary'].values())[0]
if row['includeExtendedInfo']:
if ace['extendedInfo']['effectiveAllow'] is not None:
table_row['Effective Allow'] = ace['extendedInfo']['effectiveAllow']
else:
table_row['Effective Allow'] = 0
if ace['extendedInfo']['effectiveDeny'] is not None:
table_row['Effective Deny'] = ace['extendedInfo']['effectiveDeny']
else:
table_row['Effective Deny'] = 0
return table_row
def transform_resolve_permission_bits(result):
table_output = []
ace_entry = list(result[0]['acesDictionary'].values())[0]
permissions = ace_entry['resolvedPermissions']
for permission in permissions:
table_output.append(_transform_resolve_bits_row(permission))
return table_output
def _transform_resolve_bits_row(row):
table_row = OrderedDict()
table_row['Name'] = row['name']
table_row['Bit'] = row['bit']
table_row['Permission Description'] = row['displayName']
table_row['Permission Value'] = row['effectivePermission']
return table_row
def transform_teams_table_output(result):
table_output = []
for item in sorted(result, key=_get_team_key):
table_output.append(_transform_team_row(item))
return table_output
def transform_team_table_output(result):
table_output = [_transform_team_row(result)]
return table_output
def _transform_team_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Description'] = row['description']
return table_row
def transform_wikis_table_output(result):
table_output = []
for item in sorted(result, key=_get_wiki_key):
table_output.append(_transform_wiki_row(item))
return table_output
def transform_wiki_table_output(result):
table_output = [_transform_wiki_row(result)]
return table_output
def _transform_wiki_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Type'] = row['type']
return table_row
def transform_wiki_page_table_output(result):
table_output = [_transform_wiki_page_row(result)]
return table_output
def _transform_wiki_page_row(row):
table_row = OrderedDict()
table_row['ETag'] = row['eTag']
table_row['Page Path'] = '\'{}\''.format(row['page']['path'])
table_row['Is Parent'] = row['page']['isParentPage']
table_row['order'] = row['page']['order']
return table_row
def transform_team_members_table_output(result):
table_output = []
for item in sorted(result, key=_get_member_key):
table_output.append(_transform_team_member_row(item))
return table_output
def _transform_team_member_row(row):
table_row = OrderedDict()
table_row['ID'] = row['identity']['id']
table_row['Name'] = row['identity']['displayName']
table_row['Email'] = row['identity']['uniqueName']
return table_row
def transform_users_table_output(result):
members = result['members']
table_output = []
for item in members:
table_output.append(_transform_user_row(item))
return table_output
def transform_user_table_output(result):
table_output = [_transform_user_row(result)]
return table_output
def _transform_user_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Display Name'] = row['user']['displayName']
table_row['Email'] = row['user']['mailAddress']
table_row['License Type'] = row['accessLevel']['accountLicenseType']
table_row['Access Level'] = row['accessLevel']['licenseDisplayName']
table_row['Status'] = row['accessLevel']['status']
return table_row
def _get_extension_key(extension):
return extension['extensionName'].lower()
def _get_permission_key(permission_row):
return permission_row['displayName'].lower()
def _get_service_endpoint_key(service_endpoint_row):
return service_endpoint_row['name'].lower()
def _get_project_key(project_row):
return project_row['name'].lower()
def _get_team_key(team_row):
return team_row['name'].lower()
def _get_wiki_key(wiki_row):
return wiki_row['name'].lower()
def _get_member_key(member_row):
return member_row['identity']['uniqueName'].lower() | azure-devops/azext_devops/dev/team/_format.py |
from __future__ import print_function
from collections import OrderedDict
from knack.util import CLIError
from azext_devops.dev.common.format import trim_for_display, date_time_to_only_date
def transform_extension_search_results_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_extension_search_result_row(item))
return table_output
def _transform_extension_search_result_row(row):
table_row = OrderedDict()
table_row['Publisher Id'] = row['publisher']['publisherName']
table_row['Extension Id'] = row['extensionName']
table_row['Name'] = row['displayName']
return table_row
def transform_extension_table_output(result):
table_output = [_transform_extension_row(result)]
return table_output
def transform_extensions_table_output(result):
table_output = []
for item in sorted(result, key=_get_extension_key):
table_output.append(_transform_extension_row(item))
return table_output
def _transform_extension_row(row):
table_row = OrderedDict()
table_row['Publisher Id'] = trim_for_display(row['publisherId'], 10)
table_row['Extension Id'] = trim_for_display(row['extensionId'], 20)
table_row['Name'] = trim_for_display(row['extensionName'], 20)
table_row['Version '] = trim_for_display(row['version'], 20)
table_row['Last Updated '] = date_time_to_only_date(row['lastPublished'])
table_row['States'] = trim_for_display(row['installState']['flags'], 20)
table_row['Flags'] = trim_for_display(row['flags'], 20)
return table_row
def transform_projects_table_output(result):
table_output = []
for item in sorted(result, key=_get_project_key):
table_output.append(_transform_project_row(item))
return table_output
def transform_project_table_output(result):
table_output = [_transform_project_row(result)]
return table_output
def _transform_project_row(row):
from .project import (PROCESS_TEMPLATE_CAPABILITY_NAME,
VERSION_CONTROL_CAPABILITY_NAME,
VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME)
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Visibility'] = row['visibility'].capitalize()
if 'capabilities' in row:
capabilities = row['capabilities']
if PROCESS_TEMPLATE_CAPABILITY_NAME in capabilities:
process_capabilities = capabilities[PROCESS_TEMPLATE_CAPABILITY_NAME]
if 'templateName' in process_capabilities:
table_row['Process'] = process_capabilities['templateName']
if VERSION_CONTROL_CAPABILITY_NAME in capabilities:
version_capabilities = capabilities[VERSION_CONTROL_CAPABILITY_NAME]
if VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME in version_capabilities:
table_row['Source Control'] = version_capabilities[VERSION_CONTROL_CAPABILITY_ATTRIBUTE_NAME]
return table_row
def transform_service_endpoints_table_output(result):
table_output = []
for item in sorted(result, key=_get_service_endpoint_key):
table_output.append(_transform_service_endpoint_row(item))
return table_output
def _transform_service_endpoint_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Type'] = row['type']
table_row['Is Ready'] = row['isReady']
table_row['Created By'] = row['createdBy']['displayName']
return table_row
def transform_groups_table_output(result):
table_output = []
if result['continuationToken'] is not None:
print('Showing only 500 groups. ' +
'To list next set of groups use this token as --continuation-token argument and run the command again.' +
' TOKEN:', result['continuationToken'])
for item in result['graphGroups']:
table_output.append(_transform_group_row(item))
return table_output
def transform_group_table_output(result):
table_output = [_transform_group_show_table_output(result)]
return table_output
def _transform_group_show_table_output(row):
table_row = OrderedDict()
table_row['Name'] = row['principalName']
table_row['Description'] = row['description']
return table_row
def _transform_group_row(row):
table_row = OrderedDict()
table_row['Name'] = row['principalName']
table_row['Descriptor'] = row['descriptor']
return table_row
def transform_memberships_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_membership_row(result[item]))
return table_output
def transform_membership_table_output(result):
table_output = []
for item in result:
table_row = OrderedDict()
row = result[item]
if row['subjectKind'] == 'user':
table_row['Name'] = row['displayName']
else:
table_row['Name'] = row['principalName']
table_row['Type'] = row['subjectKind']
table_row['Email'] = row['mailAddress']
table_output.append(table_row)
return table_output
def _transform_membership_row(row):
table_row = OrderedDict()
if row['subjectKind'] == 'user':
table_row['Name'] = row['displayName']
else:
table_row['Name'] = row['principalName']
table_row['Type'] = row['subjectKind']
table_row['Email'] = row['mailAddress']
table_row['Descriptor'] = row['descriptor']
return table_row
def transform_namespaces_table_output(result):
table_output = []
for item in result:
table_output.append(_transform_namespace_row(item))
return table_output
def _transform_namespace_row(row):
table_row = OrderedDict()
table_row['Id'] = row['namespaceId']
table_row['Name'] = row['name']
return table_row
def transform_namespace_table_output(result):
table_output = []
for item in result[0]['actions']:
table_output.append(_transform_namespace_details_row(item))
return table_output
def _transform_namespace_details_row(row):
table_row = OrderedDict()
table_row['Name'] = row['name']
table_row['Permission Description'] = row['displayName']
table_row['Permission Bit'] = row['bit']
return table_row
def transform_acl_output(result):
table_output = []
for item in result:
table_output.append(_transform_acl_details_row(item))
return table_output
def _transform_acl_details_row(row):
if len(row['acesDictionary']) > 1:
raise CLIError('More than one entry found in Aces dictionary for this user/group.')
table_row = OrderedDict()
table_row['token'] = row['token']
ace = list(row['acesDictionary'].values())[0]
if row['includeExtendedInfo']:
if ace['extendedInfo']['effectiveAllow'] is not None:
table_row['Effective Allow'] = ace['extendedInfo']['effectiveAllow']
else:
table_row['Effective Allow'] = 0
if ace['extendedInfo']['effectiveDeny'] is not None:
table_row['Effective Deny'] = ace['extendedInfo']['effectiveDeny']
else:
table_row['Effective Deny'] = 0
return table_row
def transform_resolve_permission_bits(result):
table_output = []
ace_entry = list(result[0]['acesDictionary'].values())[0]
permissions = ace_entry['resolvedPermissions']
for permission in permissions:
table_output.append(_transform_resolve_bits_row(permission))
return table_output
def _transform_resolve_bits_row(row):
table_row = OrderedDict()
table_row['Name'] = row['name']
table_row['Bit'] = row['bit']
table_row['Permission Description'] = row['displayName']
table_row['Permission Value'] = row['effectivePermission']
return table_row
def transform_teams_table_output(result):
table_output = []
for item in sorted(result, key=_get_team_key):
table_output.append(_transform_team_row(item))
return table_output
def transform_team_table_output(result):
table_output = [_transform_team_row(result)]
return table_output
def _transform_team_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Description'] = row['description']
return table_row
def transform_wikis_table_output(result):
table_output = []
for item in sorted(result, key=_get_wiki_key):
table_output.append(_transform_wiki_row(item))
return table_output
def transform_wiki_table_output(result):
table_output = [_transform_wiki_row(result)]
return table_output
def _transform_wiki_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Name'] = row['name']
table_row['Type'] = row['type']
return table_row
def transform_wiki_page_table_output(result):
table_output = [_transform_wiki_page_row(result)]
return table_output
def _transform_wiki_page_row(row):
table_row = OrderedDict()
table_row['ETag'] = row['eTag']
table_row['Page Path'] = '\'{}\''.format(row['page']['path'])
table_row['Is Parent'] = row['page']['isParentPage']
table_row['order'] = row['page']['order']
return table_row
def transform_team_members_table_output(result):
table_output = []
for item in sorted(result, key=_get_member_key):
table_output.append(_transform_team_member_row(item))
return table_output
def _transform_team_member_row(row):
table_row = OrderedDict()
table_row['ID'] = row['identity']['id']
table_row['Name'] = row['identity']['displayName']
table_row['Email'] = row['identity']['uniqueName']
return table_row
def transform_users_table_output(result):
members = result['members']
table_output = []
for item in members:
table_output.append(_transform_user_row(item))
return table_output
def transform_user_table_output(result):
table_output = [_transform_user_row(result)]
return table_output
def _transform_user_row(row):
table_row = OrderedDict()
table_row['ID'] = row['id']
table_row['Display Name'] = row['user']['displayName']
table_row['Email'] = row['user']['mailAddress']
table_row['License Type'] = row['accessLevel']['accountLicenseType']
table_row['Access Level'] = row['accessLevel']['licenseDisplayName']
table_row['Status'] = row['accessLevel']['status']
return table_row
def _get_extension_key(extension):
return extension['extensionName'].lower()
def _get_permission_key(permission_row):
return permission_row['displayName'].lower()
def _get_service_endpoint_key(service_endpoint_row):
return service_endpoint_row['name'].lower()
def _get_project_key(project_row):
return project_row['name'].lower()
def _get_team_key(team_row):
return team_row['name'].lower()
def _get_wiki_key(wiki_row):
return wiki_row['name'].lower()
def _get_member_key(member_row):
return member_row['identity']['uniqueName'].lower() | 0.502686 | 0.129183 |
import sys
import os.path
import tornado
import logging
from tornado.options import define, options
from server import OTAServerApplication
from coap import COAP_PORT, COAP_HOST
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)14s - '
'%(levelname)5s - %(message)s')
logger = logging.getLogger("otaserver")
STATIC_PATH = os.path.join(os.path.dirname(__file__), "static")
UPLOAD_PATH = os.path.join(os.path.dirname(__file__), "firmwares")
def parse_command_line():
"""Parse command line arguments for IoT broker application."""
define("static-path",
default=STATIC_PATH,
help="Static files path (containing npm package.json file).")
define("upload-path",
default=UPLOAD_PATH,
help="Path where uploaded files are stored.")
define("http_host", default="localhost", help="Web application HTTP host.")
define("http_port", default=8080, help="Web application HTTP port.")
define("with_coap_server", default=True, help="Use own CoAP server.")
define("coap_host", default=COAP_HOST, help="CoAP server host.")
define("coap_port", default=COAP_PORT, help="CoAP server port.")
define("root_url", default="", help="Root Url to service Application.")
define("notify_url", default="suit/trigger", help="Device update trigger url.")
define("debug", default=False, help="Enable debug mode.")
options.parse_command_line()
def run(arguments=[]):
"""Start a broker instance."""
if arguments != []:
sys.argv[1:] = arguments
parse_command_line()
if options.debug:
logger.setLevel(logging.DEBUG)
if not os.path.exists(options.upload_path):
if options.upload_path == UPLOAD_PATH:
os.makedirs(UPLOAD_PATH)
else:
logger.error("Upload path doesn't exists, '{}' was given."
.format(options.upload_path))
return
try:
app = OTAServerApplication()
app.listen(options.http_port)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
logger.debug("Stopping application")
tornado.ioloop.IOLoop.instance().stop()
if __name__ == '__main__':
run() | otaserver/main.py |
import sys
import os.path
import tornado
import logging
from tornado.options import define, options
from server import OTAServerApplication
from coap import COAP_PORT, COAP_HOST
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)14s - '
'%(levelname)5s - %(message)s')
logger = logging.getLogger("otaserver")
STATIC_PATH = os.path.join(os.path.dirname(__file__), "static")
UPLOAD_PATH = os.path.join(os.path.dirname(__file__), "firmwares")
def parse_command_line():
"""Parse command line arguments for IoT broker application."""
define("static-path",
default=STATIC_PATH,
help="Static files path (containing npm package.json file).")
define("upload-path",
default=UPLOAD_PATH,
help="Path where uploaded files are stored.")
define("http_host", default="localhost", help="Web application HTTP host.")
define("http_port", default=8080, help="Web application HTTP port.")
define("with_coap_server", default=True, help="Use own CoAP server.")
define("coap_host", default=COAP_HOST, help="CoAP server host.")
define("coap_port", default=COAP_PORT, help="CoAP server port.")
define("root_url", default="", help="Root Url to service Application.")
define("notify_url", default="suit/trigger", help="Device update trigger url.")
define("debug", default=False, help="Enable debug mode.")
options.parse_command_line()
def run(arguments=[]):
"""Start a broker instance."""
if arguments != []:
sys.argv[1:] = arguments
parse_command_line()
if options.debug:
logger.setLevel(logging.DEBUG)
if not os.path.exists(options.upload_path):
if options.upload_path == UPLOAD_PATH:
os.makedirs(UPLOAD_PATH)
else:
logger.error("Upload path doesn't exists, '{}' was given."
.format(options.upload_path))
return
try:
app = OTAServerApplication()
app.listen(options.http_port)
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
logger.debug("Stopping application")
tornado.ioloop.IOLoop.instance().stop()
if __name__ == '__main__':
run() | 0.312895 | 0.037257 |
import torch.nn as nn
import torch
class Listener(nn.Module):
r"""
Converts low level speech signals into higher level features
Args:
rnn_cell (str, optional): type of RNN cell (default: gru)
hidden_size (int): the number of features in the hidden state `h`
n_layers (int, optional): number of recurrent layers (default: 1)
bidirectional (bool, optional): if True, becomes a bidirectional encoder (defulat: False)
use_pyramidal (bool): flag indication whether to use pyramidal rnn for time resolution (default: True)
dropout_p (float, optional): dropout probability for the output sequence (default: 0)
Inputs: inputs, hidden
- **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.
- **hidden**: variable containing the features in the hidden state h
Returns: output
- **output**: tensor containing the encoded features of the input sequence
Examples::
>>> listener = Listener(in_features, hidden_size, dropout_p=0.5, n_layers=5)
>>> output = listener(inputs)
"""
def __init__(self, in_features, hidden_size, device, dropout_p=0.5, n_layers=5,
bidirectional=True, rnn_cell='gru', use_pyramidal=True):
super(Listener, self).__init__()
assert rnn_cell.lower() in ('lstm', 'gru', 'rnn'), 'rnn_cell should be lstm or gru or rnn'
assert n_layers > 1, 'n_layers should be bigger than 1'
if use_pyramidal:
assert n_layers > 4, 'Pyramidal Listener`s n_layers should be bigger than 4'
self.use_pyramidal = use_pyramidal
self.rnn_cell = nn.LSTM if rnn_cell.lower() == 'lstm' else nn.GRU if rnn_cell.lower() == 'gru' else nn.RNN
self.device = device
self.conv = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=64),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=64),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=128),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=128),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=256),
nn.MaxPool2d(kernel_size=2, stride=2)
)
in_features = (in_features - 1) << 6 if in_features % 2 else in_features << 6
if use_pyramidal:
self.bottom_rnn = self.rnn_cell(
in_features=in_features,
hidden_size=hidden_size,
num_layers=2,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p
)
self.middle_rnn = PyramidalRNN(
rnn_cell=rnn_cell,
in_features=hidden_size << 1 if bidirectional else 0,
hidden_size=hidden_size,
dropout_p=dropout_p,
n_layers=2,
device=device
)
self.top_rnn = PyramidalRNN(
rnn_cell=rnn_cell,
in_features=hidden_size << 1 if bidirectional else 0,
hidden_size=hidden_size,
dropout_p=dropout_p,
n_layers=n_layers - 4,
device=device
)
else:
self.rnn = self.rnn_cell(
input_size=in_features,
hidden_size=hidden_size,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p
)
def forward(self, inputs):
"""
Applies a multi-layer RNN to an input sequence.
Args:
inputs (batch, seq_len): tensor containing the features of the input sequence.
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence
- **hidden** (num_layers * directions, batch, hidden_size): variable containing the features in the hidden
"""
x = self.conv(inputs.unsqueeze(1)).to(self.device)
x = x.transpose(1, 2)
x = x.contiguous().view(x.size(0), x.size(1), x.size(2) * x.size(3)).to(self.device)
if self.training:
self.flatten_parameters()
if self.use_pyramidal:
output, hidden = self.bottom_rnn(x)
output, hidden = self.middle_rnn(output)
output, hidden = self.top_rnn(output)
else:
output, hidden = self.rnn(x)
return output, hidden
def flatten_parameters(self):
""" flatten parameters for fast training """
if self.use_pyramidal:
self.bottom_rnn.flatten_parameters()
self.middle_rnn.flatten_parameters()
self.top_rnn.flatten_parameters()
else:
self.rnn.flatten_parameters()
class PyramidalRNN(nn.Module):
r"""
Pyramidal RNN for time resolution reduction
Args:
rnn_cell (str, optional): type of RNN cell (default: gru)
hidden_size (int): the number of features in the hidden state `h`
n_layers (int, optional): number of recurrent layers (default: 1)
in_features (int): size of input feature
dropout_p (float, optional): dropout probability for the output sequence (default: 0)
Inputs: inputs
- **inputs**: sequences whose length is the batch size and within which each sequence is a list of token IDs.
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence
- **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden
Examples::
>>> rnn = PyramidalRNN(rnn_cell, input_size, hidden_size, dropout_p)
>>> output, hidden = rnn(inputs)
"""
def __init__(self, rnn_cell, in_features, hidden_size, dropout_p, device, n_layers=2):
super(PyramidalRNN, self).__init__()
assert rnn_cell.lower() in ('lstm', 'gru', 'rnn'), 'rnn_cell should be lstm or gru or rnn'
self.rnn_cell = nn.LSTM if rnn_cell.lower() == 'lstm' else nn.GRU if rnn_cell.lower() == 'gru' else nn.RNN
self.rnn = self.rnn_cell(
input_size=in_features << 1,
hidden_size=hidden_size,
num_layers=n_layers,
bidirectional=True,
batch_first=True,
dropout=dropout_p
)
self.device = device
def forward(self, inputs):
"""
Applies a CNN & multi-layer RNN to an input sequence.
Args:
inputs (batch, seq_len): tensor containing the features of the input sequence.
Returns: output
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence
"""
batch_size = inputs.size(0)
seq_length = inputs.size(1)
input_size = inputs.size(2)
if seq_length % 2:
zeros = torch.zeros((inputs.size(0), 1, inputs.size(2))).to(self.device)
inputs = torch.cat([inputs, zeros], dim=1)
seq_length += 1
inputs = inputs.contiguous().view(batch_size, int(seq_length / 2), input_size * 2)
output, hidden = self.rnn(inputs)
return output, hidden
def flatten_parameters(self):
self.rnn.flatten_parameters() | models/listener.py | import torch.nn as nn
import torch
class Listener(nn.Module):
r"""
Converts low level speech signals into higher level features
Args:
rnn_cell (str, optional): type of RNN cell (default: gru)
hidden_size (int): the number of features in the hidden state `h`
n_layers (int, optional): number of recurrent layers (default: 1)
bidirectional (bool, optional): if True, becomes a bidirectional encoder (defulat: False)
use_pyramidal (bool): flag indication whether to use pyramidal rnn for time resolution (default: True)
dropout_p (float, optional): dropout probability for the output sequence (default: 0)
Inputs: inputs, hidden
- **inputs**: list of sequences, whose length is the batch size and within which each sequence is a list of token IDs.
- **hidden**: variable containing the features in the hidden state h
Returns: output
- **output**: tensor containing the encoded features of the input sequence
Examples::
>>> listener = Listener(in_features, hidden_size, dropout_p=0.5, n_layers=5)
>>> output = listener(inputs)
"""
def __init__(self, in_features, hidden_size, device, dropout_p=0.5, n_layers=5,
bidirectional=True, rnn_cell='gru', use_pyramidal=True):
super(Listener, self).__init__()
assert rnn_cell.lower() in ('lstm', 'gru', 'rnn'), 'rnn_cell should be lstm or gru or rnn'
assert n_layers > 1, 'n_layers should be bigger than 1'
if use_pyramidal:
assert n_layers > 4, 'Pyramidal Listener`s n_layers should be bigger than 4'
self.use_pyramidal = use_pyramidal
self.rnn_cell = nn.LSTM if rnn_cell.lower() == 'lstm' else nn.GRU if rnn_cell.lower() == 'gru' else nn.RNN
self.device = device
self.conv = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=64),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.BatchNorm2d(num_features=64),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=128),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=128),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
nn.Hardtanh(0, 20, inplace=True),
nn.BatchNorm2d(num_features=256),
nn.MaxPool2d(kernel_size=2, stride=2)
)
in_features = (in_features - 1) << 6 if in_features % 2 else in_features << 6
if use_pyramidal:
self.bottom_rnn = self.rnn_cell(
in_features=in_features,
hidden_size=hidden_size,
num_layers=2,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p
)
self.middle_rnn = PyramidalRNN(
rnn_cell=rnn_cell,
in_features=hidden_size << 1 if bidirectional else 0,
hidden_size=hidden_size,
dropout_p=dropout_p,
n_layers=2,
device=device
)
self.top_rnn = PyramidalRNN(
rnn_cell=rnn_cell,
in_features=hidden_size << 1 if bidirectional else 0,
hidden_size=hidden_size,
dropout_p=dropout_p,
n_layers=n_layers - 4,
device=device
)
else:
self.rnn = self.rnn_cell(
input_size=in_features,
hidden_size=hidden_size,
num_layers=n_layers,
batch_first=True,
bidirectional=bidirectional,
dropout=dropout_p
)
def forward(self, inputs):
"""
Applies a multi-layer RNN to an input sequence.
Args:
inputs (batch, seq_len): tensor containing the features of the input sequence.
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence
- **hidden** (num_layers * directions, batch, hidden_size): variable containing the features in the hidden
"""
x = self.conv(inputs.unsqueeze(1)).to(self.device)
x = x.transpose(1, 2)
x = x.contiguous().view(x.size(0), x.size(1), x.size(2) * x.size(3)).to(self.device)
if self.training:
self.flatten_parameters()
if self.use_pyramidal:
output, hidden = self.bottom_rnn(x)
output, hidden = self.middle_rnn(output)
output, hidden = self.top_rnn(output)
else:
output, hidden = self.rnn(x)
return output, hidden
def flatten_parameters(self):
""" flatten parameters for fast training """
if self.use_pyramidal:
self.bottom_rnn.flatten_parameters()
self.middle_rnn.flatten_parameters()
self.top_rnn.flatten_parameters()
else:
self.rnn.flatten_parameters()
class PyramidalRNN(nn.Module):
r"""
Pyramidal RNN for time resolution reduction
Args:
rnn_cell (str, optional): type of RNN cell (default: gru)
hidden_size (int): the number of features in the hidden state `h`
n_layers (int, optional): number of recurrent layers (default: 1)
in_features (int): size of input feature
dropout_p (float, optional): dropout probability for the output sequence (default: 0)
Inputs: inputs
- **inputs**: sequences whose length is the batch size and within which each sequence is a list of token IDs.
Returns: output, hidden
- **output** (batch, seq_len, hidden_size): tensor containing the encoded features of the input sequence
- **hidden** (num_layers * num_directions, batch, hidden_size): tensor containing the features in the hidden
Examples::
>>> rnn = PyramidalRNN(rnn_cell, input_size, hidden_size, dropout_p)
>>> output, hidden = rnn(inputs)
"""
def __init__(self, rnn_cell, in_features, hidden_size, dropout_p, device, n_layers=2):
super(PyramidalRNN, self).__init__()
assert rnn_cell.lower() in ('lstm', 'gru', 'rnn'), 'rnn_cell should be lstm or gru or rnn'
self.rnn_cell = nn.LSTM if rnn_cell.lower() == 'lstm' else nn.GRU if rnn_cell.lower() == 'gru' else nn.RNN
self.rnn = self.rnn_cell(
input_size=in_features << 1,
hidden_size=hidden_size,
num_layers=n_layers,
bidirectional=True,
batch_first=True,
dropout=dropout_p
)
self.device = device
def forward(self, inputs):
"""
Applies a CNN & multi-layer RNN to an input sequence.
Args:
inputs (batch, seq_len): tensor containing the features of the input sequence.
Returns: output
- **output** (batch, seq_len, hidden_size): variable containing the encoded features of the input sequence
"""
batch_size = inputs.size(0)
seq_length = inputs.size(1)
input_size = inputs.size(2)
if seq_length % 2:
zeros = torch.zeros((inputs.size(0), 1, inputs.size(2))).to(self.device)
inputs = torch.cat([inputs, zeros], dim=1)
seq_length += 1
inputs = inputs.contiguous().view(batch_size, int(seq_length / 2), input_size * 2)
output, hidden = self.rnn(inputs)
return output, hidden
def flatten_parameters(self):
self.rnn.flatten_parameters() | 0.962427 | 0.703088 |
from msrest.serialization import Model
class TunnelConnectionHealth(Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values include: 'Unknown', 'Connecting', 'Connected',
'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this
connection
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this
connection
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection
was established in Utc format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None | azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/tunnel_connection_health_py3.py |
from msrest.serialization import Model
class TunnelConnectionHealth(Model):
"""VirtualNetworkGatewayConnection properties.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar tunnel: Tunnel name.
:vartype tunnel: str
:ivar connection_status: Virtual network Gateway connection status.
Possible values include: 'Unknown', 'Connecting', 'Connected',
'NotConnected'
:vartype connection_status: str or
~azure.mgmt.network.v2017_03_01.models.VirtualNetworkGatewayConnectionStatus
:ivar ingress_bytes_transferred: The Ingress Bytes Transferred in this
connection
:vartype ingress_bytes_transferred: long
:ivar egress_bytes_transferred: The Egress Bytes Transferred in this
connection
:vartype egress_bytes_transferred: long
:ivar last_connection_established_utc_time: The time at which connection
was established in Utc format.
:vartype last_connection_established_utc_time: str
"""
_validation = {
'tunnel': {'readonly': True},
'connection_status': {'readonly': True},
'ingress_bytes_transferred': {'readonly': True},
'egress_bytes_transferred': {'readonly': True},
'last_connection_established_utc_time': {'readonly': True},
}
_attribute_map = {
'tunnel': {'key': 'tunnel', 'type': 'str'},
'connection_status': {'key': 'connectionStatus', 'type': 'str'},
'ingress_bytes_transferred': {'key': 'ingressBytesTransferred', 'type': 'long'},
'egress_bytes_transferred': {'key': 'egressBytesTransferred', 'type': 'long'},
'last_connection_established_utc_time': {'key': 'lastConnectionEstablishedUtcTime', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(TunnelConnectionHealth, self).__init__(**kwargs)
self.tunnel = None
self.connection_status = None
self.ingress_bytes_transferred = None
self.egress_bytes_transferred = None
self.last_connection_established_utc_time = None | 0.869659 | 0.205695 |
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class SparseSegmentSqrtN(Op):
''' The operation computes the sum along sparse segments of a tensor and divides it by the square root of N, where N is a number of rows in a segment.
For more details, see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-segment-sqrt-n.
Three inputs:
- [0, required] Data tensor from which rows are selected for the sum divided by sqrt of N (ND),
- [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D),
- [2, required] Tensor of segment IDs to which selected rows belong.
Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input.
Values must be sorted and can be repeated. (1D).
One output:
- [0, required] The output has the same shape as the data tensor, except for dimension 0, which has a size equal to a number of segments (ND).
'''
op = 'SparseSegmentSqrtN'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'version': 'experimental',
'infer': __class__.infer,
'in_ports_count': 3,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return []
@staticmethod
def infer(node: Node):
# check a number of input/output edges
assert len(node.in_nodes()) == 3
assert len(node.out_nodes()) == 1
data_shape = node.in_port(0).data.get_shape()
indices_shape = node.in_port(1).data.get_shape()
segment_ids_shape = node.in_port(2).data.get_shape()
data_value = node.in_port(0).data.get_value()
indices_value = node.in_port(1).data.get_value()
segment_ids_value = node.in_port(2).data.get_value()
# check input shapes
assert data_shape is not None, \
"Shape for input data tensor to SparseSegmentSqrtN must be defined"
assert indices_shape is not None and indices_shape.size == 1, \
"SparseSegmentSqrtN supports only 1D indices tensor"
assert segment_ids_shape is not None and segment_ids_shape.size == 1, \
"SparseSegmentSqrtN supports only 1D segment IDs tensor"
assert segment_ids_shape == indices_shape, \
"Indices and segment IDs tensors must have the same shape"
# computes output shape
output_shape = data_shape
output_shape[0] = segment_ids_shape[0]
node.out_port(0).data.set_shape(output_shape)
# infer if all input is constant
if data_value is None or indices_value is None or segment_ids_value is None:
return
# check that values in segment_ids are sorted
for i in range(1, len(segment_ids_value)):
assert segment_ids_value[i-1] <= segment_ids_value[i], \
"Values in segment IDs are not sorted"
num_segments = int(segment_ids_value[-1]) + 1
# check that indices are in a range [0, data_shape[0])
assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \
"Some value in indices tensor is out of range"
# infer
num_adds = np.zeros(num_segments, dtype=np.int)
output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float)
output_shape = output_value.shape
for i in range(len(segment_ids_value)):
segment_id = int(segment_ids_value[i])
indice = int(indices_value[i])
output_value[segment_id, :] += data_value[indice, :]
num_adds[segment_id] += 1
num_adds = np.sqrt(num_adds)
for segment_id in range(num_segments):
if num_adds[segment_id] != 0:
output_value[segment_id, :] /= num_adds[segment_id]
node.out_port(0).data.set_shape(output_shape)
node.out_port(0).data.set_value(output_value) | model-optimizer/extensions/ops/sparse_segment_sqrtn.py |
import numpy as np
from mo.graph.graph import Node, Graph
from mo.ops.op import Op
class SparseSegmentSqrtN(Op):
''' The operation computes the sum along sparse segments of a tensor and divides it by the square root of N, where N is a number of rows in a segment.
For more details, see https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/sparse-segment-sqrt-n.
Three inputs:
- [0, required] Data tensor from which rows are selected for the sum divided by sqrt of N (ND),
- [1, required] Tensor of indices of selected rows from the first input tensor along 0 dimension (1D),
- [2, required] Tensor of segment IDs to which selected rows belong.
Selected rows belonging to the same segment are summed up. The tensor has the same size as the second input.
Values must be sorted and can be repeated. (1D).
One output:
- [0, required] The output has the same shape as the data tensor, except for dimension 0, which has a size equal to a number of segments (ND).
'''
op = 'SparseSegmentSqrtN'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': __class__.op,
'op': __class__.op,
'version': 'experimental',
'infer': __class__.infer,
'in_ports_count': 3,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
def supported_attrs(self):
return []
@staticmethod
def infer(node: Node):
# check a number of input/output edges
assert len(node.in_nodes()) == 3
assert len(node.out_nodes()) == 1
data_shape = node.in_port(0).data.get_shape()
indices_shape = node.in_port(1).data.get_shape()
segment_ids_shape = node.in_port(2).data.get_shape()
data_value = node.in_port(0).data.get_value()
indices_value = node.in_port(1).data.get_value()
segment_ids_value = node.in_port(2).data.get_value()
# check input shapes
assert data_shape is not None, \
"Shape for input data tensor to SparseSegmentSqrtN must be defined"
assert indices_shape is not None and indices_shape.size == 1, \
"SparseSegmentSqrtN supports only 1D indices tensor"
assert segment_ids_shape is not None and segment_ids_shape.size == 1, \
"SparseSegmentSqrtN supports only 1D segment IDs tensor"
assert segment_ids_shape == indices_shape, \
"Indices and segment IDs tensors must have the same shape"
# computes output shape
output_shape = data_shape
output_shape[0] = segment_ids_shape[0]
node.out_port(0).data.set_shape(output_shape)
# infer if all input is constant
if data_value is None or indices_value is None or segment_ids_value is None:
return
# check that values in segment_ids are sorted
for i in range(1, len(segment_ids_value)):
assert segment_ids_value[i-1] <= segment_ids_value[i], \
"Values in segment IDs are not sorted"
num_segments = int(segment_ids_value[-1]) + 1
# check that indices are in a range [0, data_shape[0])
assert np.all(indices_value >= 0) and np.all(indices_value < data_shape[0]), \
"Some value in indices tensor is out of range"
# infer
num_adds = np.zeros(num_segments, dtype=np.int)
output_value = np.zeros([num_segments] + data_shape[1:].tolist(), dtype=np.float)
output_shape = output_value.shape
for i in range(len(segment_ids_value)):
segment_id = int(segment_ids_value[i])
indice = int(indices_value[i])
output_value[segment_id, :] += data_value[indice, :]
num_adds[segment_id] += 1
num_adds = np.sqrt(num_adds)
for segment_id in range(num_segments):
if num_adds[segment_id] != 0:
output_value[segment_id, :] /= num_adds[segment_id]
node.out_port(0).data.set_shape(output_shape)
node.out_port(0).data.set_value(output_value) | 0.856287 | 0.76895 |
import re
from collections import namedtuple
from collections import OrderedDict
import json
Point = namedtuple('Point', ['x', 'y'])
Rect = namedtuple('Rect', ['ll', 'ur'])
Port = namedtuple('Port', ['port_nm', 'layer', 'rect'])
Blockage = namedtuple('Blockage', ['layer', 'rect'])
Terminal = namedtuple('Terminal', ['net_nm', 'layer'])
class Placement:
def __init__( self):
self.die = None
self.block_placement = OrderedDict()
self.net_wire_lengths = []
def __repr__( self):
return 'Placement(' + str(self.die) + "," + str(self.block_placement) + "," + str(self.net_wire_lengths) + ')'
def semantic( self):
assert self.die is not None
assert self.die.ll.x <= self.die.ur.x
assert self.die.ll.y <= self.die.ur.y
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_die = re.compile( r'^DIE\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*$')
p_triple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s*$')
p_quadruple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$')
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_die.match(line)
if m:
self.die = Rect( Point( int(m.groups()[0]), int(m.groups()[1])), Point( int(m.groups()[2]), int(m.groups()[3])))
continue
m = p_triple.match(line)
if m:
self.net_wire_lengths.append( ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2]))))
continue
m = p_quadruple.match(line)
if m:
self.block_placement[m.groups()[0]] = ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2])), m.groups()[3])
continue
assert False, line
class Constraint:
def __init__( self):
pass
class SymmNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "SymmNet(" + str( self.lst0) + "," + str( self.lst1) + ")"
def semantic( self):
assert len(self.lst0) >= 2
assert len(self.lst1) >= 2
class CritNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "CritNet(" + self.net_nm + "," + self.level + ")"
def semantic( self):
assert self.level in ['mid','min']
class ShieldNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "ShieldNet(" + ")"
def semantic( self):
pass
class MatchBlock(Constraint):
def __init__( self):
pass
def __repr__( self):
return "MatchBlock(" + ")"
def semantic( self):
pass
class Constraints:
def __init__( self):
self.constraints = []
def __repr__( self):
return ','.join( [ str(x) for x in self.constraints])
def semantic( self):
for c in self.constraints:
c.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_constraint = re.compile( r'^(SymmNet|CritNet|ShieldNet|MatchBlock)'
r'\s*\('
r'(.*)'
r'\)\s*$')
p_bracecommasep = re.compile( r'^{(.+)}\s*,\s*{(.+)}$')
p_commasep = re.compile( r'^(\S+)\s*,\s*(\S+)$')
p_pin = re.compile( r'^(.+)/(.+)$')
def toLst( s):
lst = s.split(',')
assert len(lst) >= 2, lst
result = lst[0:1]
for e in lst[1:]:
m = p_pin.match( e)
if m:
block_nm = m.groups()[0]
formal_nm = m.groups()[1]
result.append( ( block_nm, formal_nm))
continue
result.append( ( 'terminal', e))
return result
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_constraint.match(line)
if m:
tag = m.groups()[0]
rest = m.groups()[1].strip( ' ')
if tag == 'SymmNet':
c = SymmNet()
mm = p_bracecommasep.match( rest)
assert mm, rest
c.lst0 = toLst( mm.groups()[0])
c.lst1 = toLst( mm.groups()[1])
elif tag == 'CritNet':
c = CritNet()
mm = p_commasep.match( rest)
assert mm, rest
c.net_nm = mm.groups()[0]
c.level = mm.groups()[1]
elif tag == 'ShieldNet':
c = ShieldNet()
pass
elif tag == 'MatchBlock':
c = MatchBlock()
pass
else:
assert False
self.constraints.append( c)
continue
assert False, line
class Net:
def __init__( self):
self.net_nm = None
self.pin_count = None
self.pin_lst = []
def __repr__( self):
return "Net(" + self.net_nm + "," + str(self.pin_count) + "," + str(self.pin_lst) + ")"
class Netlist:
def __init__( self):
self.params = OrderedDict()
self.nets = OrderedDict()
self.pins = {}
def __repr__( self):
return "Netlist(" + str(self.params) + "," + str(self.nets) + ")"
def semantic( self):
assert self.params['NumNets'] == len(self.nets)
nPins = sum( [ len([ x for x in v.pin_lst if x[0] != 'terminal']) for (k,v) in self.nets.items()])
assert self.params['NumPins'] == nPins, (self.params['NumPins'], nPins)
for (k,v) in self.nets.items():
assert v.pin_count is not None, k
assert v.pin_count == len(v.pin_lst), (k, v.pin_count, len(v.pin_lst))
for pin in v.pin_lst:
assert pin not in self.pins, (k, pin,'not in',self.pins)
self.pins[pin] = k
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumNets|NumPins)\s*:\s*(\d+)\s*$')
p_net_and_count = re.compile( r'^(\S+)\s*:\s*(\d+)\s*$')
p_pairs = re.compile( r'^(\S+)\s+(\S+)\s*$')
net = None
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_net_and_count.match(line)
if m:
net = Net()
net.net_nm = m.groups()[0]
net.pin_count = int(m.groups()[1])
self.nets[net.net_nm] = net
continue
m = p_pairs.match(line)
if m:
net.pin_lst.append( (m.groups()[0], m.groups()[1]))
continue
assert False, line
class Block:
def __init__( self, nm):
self.nm = nm
self.rect = None
self.port_count = None
self.port_lst = []
self.blockage_lst = []
def __repr__( self):
return 'Block(' + self.nm + "," + str(self.port_count) + "," + str(self.port_lst) + ')'
def semantic( self):
assert self.port_count is not None
assert self.port_count == len(self.port_lst), (self.port_count, len(self.port_lst))
class Blocks:
def __init__( self):
self.params = {}
self.block_lst = OrderedDict()
self.terminal_lst = []
def __repr__( self):
return 'Blocks(' + str(self.params) + "," + str(self.block_lst) + "," + str(self.terminal_lst) + ')'
def semantic( self):
assert self.params['NumSoftRectangularBlocks'] == 0
assert self.params['NumHardRectilinearBlocks'] == len(self.block_lst)
assert self.params['NumTerminals'] == len(self.terminal_lst)
for (k,v) in self.block_lst.items():
v.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumSoftRectangularBlocks|NumHardRectilinearBlocks|NumTerminals)\s*:\s*(\d+)\s*$')
p_outline = re.compile( r'^(\S+)\s+(hardrectilinear)\s+'
r'(\d+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*)*)'
r'$')
p_block = re.compile( r'^BLOCK\s+(\S+)\s*:\s*(\d+)\s*$')
p_port = re.compile( r'^(\S+)\s+(\S+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*){4})'
r'$')
p_terminal = re.compile( r'^(\S+)\s+(\S+)\s+terminal\s*$')
p_pair = re.compile( r'^\s*\(\s*(-?\d+)\s*,\s*(-?\d+)\s*\)(.*)$')
def parse_pair_list( s):
result = []
rest = s
while True:
m = p_blank.match( rest)
if m: return result
m = p_pair.match( rest)
assert m, rest
x = int(m.groups()[0])
y = int(m.groups()[1])
rest = m.groups()[2]
result.append( Point(x=x,y=y))
block = None
if True:
for line in fp:
line = line.rstrip('\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_outline.match(line)
if m:
block_nm = m.groups()[0]
block = Block( block_nm)
type_nm = m.groups()[1]
point_count = int(m.groups()[2])
point_lst = parse_pair_list( m.groups()[3])
assert point_count == len(point_lst)
assert point_count == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
assert rect.ll.x <= p.x
assert rect.ll.y <= p.y
assert rect.ur.x >= p.x
assert rect.ur.y >= p.y
block.rect = rect
self.block_lst[block_nm] = block
block = None
continue
m = p_block.match(line)
if m:
block_nm = m.groups()[0]
assert block_nm in self.block_lst
block = self.block_lst[block_nm]
block.port_count = int(m.groups()[1])
continue
m = p_port.match(line)
if m:
port_nm = m.groups()[0]
layer = m.groups()[1]
point_lst = parse_pair_list( m.groups()[2])
assert len(point_lst) == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
pass
# assert rect.ll.x <= p.x, (p, 'should be inside', rect)
# assert rect.ll.y <= p.y, (p, 'should be inside', rect)
# assert rect.ur.x >= p.x, (p, 'should be inside', rect)
# assert rect.ur.y >= p.y, (p, 'should be inside', rect)
if port_nm == 'INT':
blockage = Blockage( layer, rect)
block.blockage_lst.append( port)
else:
port = Port( port_nm, layer, rect)
block.port_lst.append( port)
continue
m = p_terminal.match(line)
if m:
net_nm = m.groups()[0]
layer = m.groups()[1]
self.terminal_lst.append( Terminal( net_nm, layer))
continue
assert False, line
import io
def test_n3():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, 0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_negative():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (-0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (-0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, -0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_shortened():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 1
NumTerminals : 0
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_net():
s = """#UMN nets 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumNets : 8
NumPins : 11
net2 : 2
L1_MM3_MM2 D1
terminal net2
net8 : 2
L1_MM4_MM5 D1
L1_MM1_MM0 D1
net10 : 2
L1_MM3_MM2 D2
L1_MM1_MM0 S
net11 : 2
L1_MM4_MM5 D2
L1_MM1_MM0 D2
net14 : 2
terminal net14
L1_MM1_MM0 G2
net17 : 2
terminal net17
L1_MM1_MM0 G1
gnd! : 2
L1_MM3_MM2 S
terminal gnd!
vdd! : 2
L1_MM4_MM5 S
terminal vdd!
"""
with io.StringIO(s) as fp:
nl = Netlist()
nl.parse( fp)
nl.semantic()
def test_consts():
s = """SymmNet ( {net8,L1_MM1_MM0/D1,L1_MM4_MM5/D1} , {net11,L1_MM1_MM0/D2,L1_MM4_MM5/D2} )
SymmNet ( {net17,L1_MM1_MM0/G1,net17} , {net14,L1_MM1_MM0/G2,net14} )
CritNet ( net8 , min )
CritNet ( net10 , mid )
"""
with io.StringIO(s) as fp:
cs = Constraints()
cs.parse( fp)
cs.semantic()
print( cs)
def test_pl():
s = """# TAMU blocks 1.0
DIE {0, 0} {648, 2620}
L1_MM4_MM5 0 0 N
L1_MM1_MM0 648 889 FN
L1_MM3_MM2 0 2620 FS
net2 648 1932
net14 0 1649
net17 648 1652
gnd! 0 1851
vdd! 0 768
"""
with io.StringIO(s) as fp:
p = Placement()
p.parse( fp)
p.semantic()
print( p)
class Design:
def __init__(self):
pass
def parse( self, ibasename, obasename):
with open( ibasename + '.blocks', 'rt') as fp:
self.blocks = Blocks()
self.blocks.parse( fp)
self.blocks.semantic()
with open( ibasename + '.nets', 'rt') as fp:
self.nl = Netlist()
self.nl.parse( fp)
self.nl.semantic()
with open( ibasename + '.const', 'rt') as fp:
self.cs = Constraints()
self.cs.parse( fp)
self.cs.semantic()
with open( obasename + '.pl', 'rt') as fp:
self.p = Placement()
self.p.parse( fp)
self.p.semantic()
def write_json_for_viewer( self, fp):
"""Write out bbox for instances as well as terminals
Need:
bbox -- [llx,lly,urx,ury]
globalRoutes -- []
globalRouteGrid -- []
terminals -- each in array { 'netName': , 'layer': , 'gid': , 'rect': [llx,lly,urx,ury]}
"""
d = {}
d['bbox'] = [self.p.die.ll.x, self.p.die.ll.y, self.p.die.ur.x, self.p.die.ur.y]
d['cellBoundaries'] = []
d['globalRoutes'] = []
d['globalRouteGrid'] = []
d['terminals'] = []
def translateLayer( layer):
if layer == 'M1':
return 'metal1'
else:
assert False, layer
# fake terminal for diearea
dd = {}
dd['netName'] = 'top'
dd['layer'] = 'diearea'
dd['gid'] = -1
dd['rect'] = d['bbox']
d['terminals'].append( dd)
for (nm,block) in self.blocks.block_lst.items():
assert nm == block.nm
plc = self.p.block_placement[block.nm]
o = plc[1]
flip = plc[2]
sx,sy = 1,1
if flip == 'FN': # apparently means mirror across y axis; origin at top left
sx = -1
elif flip == 'FS': # apparently means mirror across x axis; origin at bot right
sy = -1
elif flip == 'S': # apparently means mirror across both x and y axes; origin at top right
sx,sy = -1,-1
elif flip == 'N': # no flip
pass
else:
assert False, flip
def hit( x, y):
return x*sx+o.x, y*sy+o.y
def transformRect( r):
llx,lly = hit( r.ll.x, r.ll.y)
urx,ury = hit( r.ur.x, r.ur.y)
# Make sure the rectangles are not empty
if llx > urx: urx,llx = llx,urx
if lly > ury: ury,lly = lly,ury
return [llx,lly,urx,ury]
r = block.rect
# fake terminal for cell area
dd = {}
dd['netName'] = block.nm
dd['layer'] = 'cellarea'
dd['gid'] = -1
dd['rect'] = transformRect( r)
d['terminals'].append( dd)
for port in block.port_lst:
r = port.rect
formal = port.port_nm
actual = self.nl.pins[ (block.nm, formal)]
dd = {}
dd['netName'] = actual
dd['layer'] = translateLayer( port.layer)
dd['gid'] = -1
dd['rect'] = transformRect( r)
d['terminals'].append( dd)
json.dump(d, fp, sort_keys=True, indent=4)
fp.write( '\n')
def print( self):
print( self.blocks)
print( self.nl)
print( self.cs)
print( self.p)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser( description="Reads results of Placement/Placer and generates a JSON file for the Viewer")
parser.add_argument( "-n", "--block_name", type=str, default="n3")
parser.add_argument( "-id", "--input_dir", type=str, default="../Placement/testcase")
parser.add_argument( "-od", "--output_dir", type=str, default="../Placement/testcase")
parser.add_argument( "-j", "--json_output_file", type=str, default="../Viewer/INPUT/mydesign_dr_globalrouting.json")
args = parser.parse_args()
block_name = args.block_name
design = Design()
design.parse( args.input_dir + '/' + block_name, args.output_dir + '/' + block_name)
with open( args.json_output_file, 'wt') as fp:
design.write_json_for_viewer( fp) | docs/Tutorials/NetlistParser/parse.py |
import re
from collections import namedtuple
from collections import OrderedDict
import json
Point = namedtuple('Point', ['x', 'y'])
Rect = namedtuple('Rect', ['ll', 'ur'])
Port = namedtuple('Port', ['port_nm', 'layer', 'rect'])
Blockage = namedtuple('Blockage', ['layer', 'rect'])
Terminal = namedtuple('Terminal', ['net_nm', 'layer'])
class Placement:
def __init__( self):
self.die = None
self.block_placement = OrderedDict()
self.net_wire_lengths = []
def __repr__( self):
return 'Placement(' + str(self.die) + "," + str(self.block_placement) + "," + str(self.net_wire_lengths) + ')'
def semantic( self):
assert self.die is not None
assert self.die.ll.x <= self.die.ur.x
assert self.die.ll.y <= self.die.ur.y
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_die = re.compile( r'^DIE\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*'
r'{\s*(-?\d+)\s*,\s*(-?\d+)\s*}'
r'\s*$')
p_triple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s*$')
p_quadruple = re.compile( r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s*$')
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_die.match(line)
if m:
self.die = Rect( Point( int(m.groups()[0]), int(m.groups()[1])), Point( int(m.groups()[2]), int(m.groups()[3])))
continue
m = p_triple.match(line)
if m:
self.net_wire_lengths.append( ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2]))))
continue
m = p_quadruple.match(line)
if m:
self.block_placement[m.groups()[0]] = ( m.groups()[0], Point( int(m.groups()[1]), int(m.groups()[2])), m.groups()[3])
continue
assert False, line
class Constraint:
def __init__( self):
pass
class SymmNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "SymmNet(" + str( self.lst0) + "," + str( self.lst1) + ")"
def semantic( self):
assert len(self.lst0) >= 2
assert len(self.lst1) >= 2
class CritNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "CritNet(" + self.net_nm + "," + self.level + ")"
def semantic( self):
assert self.level in ['mid','min']
class ShieldNet(Constraint):
def __init__( self):
pass
def __repr__( self):
return "ShieldNet(" + ")"
def semantic( self):
pass
class MatchBlock(Constraint):
def __init__( self):
pass
def __repr__( self):
return "MatchBlock(" + ")"
def semantic( self):
pass
class Constraints:
def __init__( self):
self.constraints = []
def __repr__( self):
return ','.join( [ str(x) for x in self.constraints])
def semantic( self):
for c in self.constraints:
c.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_constraint = re.compile( r'^(SymmNet|CritNet|ShieldNet|MatchBlock)'
r'\s*\('
r'(.*)'
r'\)\s*$')
p_bracecommasep = re.compile( r'^{(.+)}\s*,\s*{(.+)}$')
p_commasep = re.compile( r'^(\S+)\s*,\s*(\S+)$')
p_pin = re.compile( r'^(.+)/(.+)$')
def toLst( s):
lst = s.split(',')
assert len(lst) >= 2, lst
result = lst[0:1]
for e in lst[1:]:
m = p_pin.match( e)
if m:
block_nm = m.groups()[0]
formal_nm = m.groups()[1]
result.append( ( block_nm, formal_nm))
continue
result.append( ( 'terminal', e))
return result
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_constraint.match(line)
if m:
tag = m.groups()[0]
rest = m.groups()[1].strip( ' ')
if tag == 'SymmNet':
c = SymmNet()
mm = p_bracecommasep.match( rest)
assert mm, rest
c.lst0 = toLst( mm.groups()[0])
c.lst1 = toLst( mm.groups()[1])
elif tag == 'CritNet':
c = CritNet()
mm = p_commasep.match( rest)
assert mm, rest
c.net_nm = mm.groups()[0]
c.level = mm.groups()[1]
elif tag == 'ShieldNet':
c = ShieldNet()
pass
elif tag == 'MatchBlock':
c = MatchBlock()
pass
else:
assert False
self.constraints.append( c)
continue
assert False, line
class Net:
def __init__( self):
self.net_nm = None
self.pin_count = None
self.pin_lst = []
def __repr__( self):
return "Net(" + self.net_nm + "," + str(self.pin_count) + "," + str(self.pin_lst) + ")"
class Netlist:
def __init__( self):
self.params = OrderedDict()
self.nets = OrderedDict()
self.pins = {}
def __repr__( self):
return "Netlist(" + str(self.params) + "," + str(self.nets) + ")"
def semantic( self):
assert self.params['NumNets'] == len(self.nets)
nPins = sum( [ len([ x for x in v.pin_lst if x[0] != 'terminal']) for (k,v) in self.nets.items()])
assert self.params['NumPins'] == nPins, (self.params['NumPins'], nPins)
for (k,v) in self.nets.items():
assert v.pin_count is not None, k
assert v.pin_count == len(v.pin_lst), (k, v.pin_count, len(v.pin_lst))
for pin in v.pin_lst:
assert pin not in self.pins, (k, pin,'not in',self.pins)
self.pins[pin] = k
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumNets|NumPins)\s*:\s*(\d+)\s*$')
p_net_and_count = re.compile( r'^(\S+)\s*:\s*(\d+)\s*$')
p_pairs = re.compile( r'^(\S+)\s+(\S+)\s*$')
net = None
for line in fp:
line = line.rstrip( '\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_net_and_count.match(line)
if m:
net = Net()
net.net_nm = m.groups()[0]
net.pin_count = int(m.groups()[1])
self.nets[net.net_nm] = net
continue
m = p_pairs.match(line)
if m:
net.pin_lst.append( (m.groups()[0], m.groups()[1]))
continue
assert False, line
class Block:
def __init__( self, nm):
self.nm = nm
self.rect = None
self.port_count = None
self.port_lst = []
self.blockage_lst = []
def __repr__( self):
return 'Block(' + self.nm + "," + str(self.port_count) + "," + str(self.port_lst) + ')'
def semantic( self):
assert self.port_count is not None
assert self.port_count == len(self.port_lst), (self.port_count, len(self.port_lst))
class Blocks:
def __init__( self):
self.params = {}
self.block_lst = OrderedDict()
self.terminal_lst = []
def __repr__( self):
return 'Blocks(' + str(self.params) + "," + str(self.block_lst) + "," + str(self.terminal_lst) + ')'
def semantic( self):
assert self.params['NumSoftRectangularBlocks'] == 0
assert self.params['NumHardRectilinearBlocks'] == len(self.block_lst)
assert self.params['NumTerminals'] == len(self.terminal_lst)
for (k,v) in self.block_lst.items():
v.semantic()
def parse( self, fp):
p_comment = re.compile( r'^#.*$')
p_blank = re.compile( r'^\s*$')
p_assignments = re.compile( r'^(NumSoftRectangularBlocks|NumHardRectilinearBlocks|NumTerminals)\s*:\s*(\d+)\s*$')
p_outline = re.compile( r'^(\S+)\s+(hardrectilinear)\s+'
r'(\d+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*)*)'
r'$')
p_block = re.compile( r'^BLOCK\s+(\S+)\s*:\s*(\d+)\s*$')
p_port = re.compile( r'^(\S+)\s+(\S+)\s+'
r'((\(\s*(-?\d+)\s*\,\s*(-?\d+)\s*\)\s*){4})'
r'$')
p_terminal = re.compile( r'^(\S+)\s+(\S+)\s+terminal\s*$')
p_pair = re.compile( r'^\s*\(\s*(-?\d+)\s*,\s*(-?\d+)\s*\)(.*)$')
def parse_pair_list( s):
result = []
rest = s
while True:
m = p_blank.match( rest)
if m: return result
m = p_pair.match( rest)
assert m, rest
x = int(m.groups()[0])
y = int(m.groups()[1])
rest = m.groups()[2]
result.append( Point(x=x,y=y))
block = None
if True:
for line in fp:
line = line.rstrip('\n')
m = p_comment.match(line)
if m: continue
m = p_blank.match(line)
if m: continue
m = p_assignments.match(line)
if m:
self.params[m.groups()[0]] = int(m.groups()[1])
continue
m = p_outline.match(line)
if m:
block_nm = m.groups()[0]
block = Block( block_nm)
type_nm = m.groups()[1]
point_count = int(m.groups()[2])
point_lst = parse_pair_list( m.groups()[3])
assert point_count == len(point_lst)
assert point_count == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
assert rect.ll.x <= p.x
assert rect.ll.y <= p.y
assert rect.ur.x >= p.x
assert rect.ur.y >= p.y
block.rect = rect
self.block_lst[block_nm] = block
block = None
continue
m = p_block.match(line)
if m:
block_nm = m.groups()[0]
assert block_nm in self.block_lst
block = self.block_lst[block_nm]
block.port_count = int(m.groups()[1])
continue
m = p_port.match(line)
if m:
port_nm = m.groups()[0]
layer = m.groups()[1]
point_lst = parse_pair_list( m.groups()[2])
assert len(point_lst) == 4
rect = Rect( ll=point_lst[0], ur=point_lst[2])
for p in point_lst:
pass
# assert rect.ll.x <= p.x, (p, 'should be inside', rect)
# assert rect.ll.y <= p.y, (p, 'should be inside', rect)
# assert rect.ur.x >= p.x, (p, 'should be inside', rect)
# assert rect.ur.y >= p.y, (p, 'should be inside', rect)
if port_nm == 'INT':
blockage = Blockage( layer, rect)
block.blockage_lst.append( port)
else:
port = Port( port_nm, layer, rect)
block.port_lst.append( port)
continue
m = p_terminal.match(line)
if m:
net_nm = m.groups()[0]
layer = m.groups()[1]
self.terminal_lst.append( Terminal( net_nm, layer))
continue
assert False, line
import io
def test_n3():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, 0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_negative():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 3
NumTerminals : 5
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
L1_MM1_MM0 hardrectilinear 4 (0, 0) (-0, 842) (648, 842) (648, 0)
L1_MM3_MM2 hardrectilinear 4 (-0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
BLOCK L1_MM1_MM0 : 5
G1 M1 (108, 684) (108, 842) (148, 842) (148, 684)
G2 M1 (504, 684) (504, 836) (544, 836) (544, 684)
D1 M1 (88, 4) (88, 146) (128, 146) (128, 4)
S M1 (236, 796) (236, 836) (412, 836) (412, 796)
D2 M1 (520, -0) (520, 146) (560, 146) (560, 0)
INT M1 (196, 612) (196, 836) (236, 836) (196, 836)
INT M1 (412, 612) (412, 836) (452, 836) (412, 836)
BLOCK L1_MM3_MM2 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (236, 749) (236, 789) (412, 789) (412, 749)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (236, 619)
INT M1 (412, 619) (412, 789) (452, 789) (452, 619)
INT M1 (89, 39) (89, 148) (125, 148) (125, 39)
INT M1 (89, 39) (89, 75) (471, 75) (471, 39)
gnd! M1 terminal
vdd! M1 terminal
net2 M1 terminal
net14 M1 terminal
net17 M1 terminal
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_shortened():
s = """#UMN blocks 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumSoftRectangularBlocks : 0
NumHardRectilinearBlocks : 1
NumTerminals : 0
L1_MM4_MM5 hardrectilinear 4 (0, 0) (0, 789) (648, 789) (648, 0)
BLOCK L1_MM4_MM5 : 3
D1 M1 (520, 615) (520, 761) (560, 761) (560, 615)
S M1 (196, 748) (196, 788) (236, 788) (236, 748)
D2 M1 (88, 615) (88, 757) (128, 757) (128, 615)
INT M1 (196, 619) (196, 789) (236, 789) (196, 789)
INT M1 (412, 619) (412, 789) (452, 789) (412, 789)
"""
with io.StringIO(s) as fp:
blocks = Blocks()
blocks.parse( fp)
blocks.semantic()
def test_net():
s = """#UMN nets 1.0
# Created : July 09 19:15:43
# User : <EMAIL>
# Platform : Linux
NumNets : 8
NumPins : 11
net2 : 2
L1_MM3_MM2 D1
terminal net2
net8 : 2
L1_MM4_MM5 D1
L1_MM1_MM0 D1
net10 : 2
L1_MM3_MM2 D2
L1_MM1_MM0 S
net11 : 2
L1_MM4_MM5 D2
L1_MM1_MM0 D2
net14 : 2
terminal net14
L1_MM1_MM0 G2
net17 : 2
terminal net17
L1_MM1_MM0 G1
gnd! : 2
L1_MM3_MM2 S
terminal gnd!
vdd! : 2
L1_MM4_MM5 S
terminal vdd!
"""
with io.StringIO(s) as fp:
nl = Netlist()
nl.parse( fp)
nl.semantic()
def test_consts():
s = """SymmNet ( {net8,L1_MM1_MM0/D1,L1_MM4_MM5/D1} , {net11,L1_MM1_MM0/D2,L1_MM4_MM5/D2} )
SymmNet ( {net17,L1_MM1_MM0/G1,net17} , {net14,L1_MM1_MM0/G2,net14} )
CritNet ( net8 , min )
CritNet ( net10 , mid )
"""
with io.StringIO(s) as fp:
cs = Constraints()
cs.parse( fp)
cs.semantic()
print( cs)
def test_pl():
s = """# TAMU blocks 1.0
DIE {0, 0} {648, 2620}
L1_MM4_MM5 0 0 N
L1_MM1_MM0 648 889 FN
L1_MM3_MM2 0 2620 FS
net2 648 1932
net14 0 1649
net17 648 1652
gnd! 0 1851
vdd! 0 768
"""
with io.StringIO(s) as fp:
p = Placement()
p.parse( fp)
p.semantic()
print( p)
class Design:
def __init__(self):
pass
def parse( self, ibasename, obasename):
with open( ibasename + '.blocks', 'rt') as fp:
self.blocks = Blocks()
self.blocks.parse( fp)
self.blocks.semantic()
with open( ibasename + '.nets', 'rt') as fp:
self.nl = Netlist()
self.nl.parse( fp)
self.nl.semantic()
with open( ibasename + '.const', 'rt') as fp:
self.cs = Constraints()
self.cs.parse( fp)
self.cs.semantic()
with open( obasename + '.pl', 'rt') as fp:
self.p = Placement()
self.p.parse( fp)
self.p.semantic()
def write_json_for_viewer( self, fp):
"""Write out bbox for instances as well as terminals
Need:
bbox -- [llx,lly,urx,ury]
globalRoutes -- []
globalRouteGrid -- []
terminals -- each in array { 'netName': , 'layer': , 'gid': , 'rect': [llx,lly,urx,ury]}
"""
d = {}
d['bbox'] = [self.p.die.ll.x, self.p.die.ll.y, self.p.die.ur.x, self.p.die.ur.y]
d['cellBoundaries'] = []
d['globalRoutes'] = []
d['globalRouteGrid'] = []
d['terminals'] = []
def translateLayer( layer):
if layer == 'M1':
return 'metal1'
else:
assert False, layer
# fake terminal for diearea
dd = {}
dd['netName'] = 'top'
dd['layer'] = 'diearea'
dd['gid'] = -1
dd['rect'] = d['bbox']
d['terminals'].append( dd)
for (nm,block) in self.blocks.block_lst.items():
assert nm == block.nm
plc = self.p.block_placement[block.nm]
o = plc[1]
flip = plc[2]
sx,sy = 1,1
if flip == 'FN': # apparently means mirror across y axis; origin at top left
sx = -1
elif flip == 'FS': # apparently means mirror across x axis; origin at bot right
sy = -1
elif flip == 'S': # apparently means mirror across both x and y axes; origin at top right
sx,sy = -1,-1
elif flip == 'N': # no flip
pass
else:
assert False, flip
def hit( x, y):
return x*sx+o.x, y*sy+o.y
def transformRect( r):
llx,lly = hit( r.ll.x, r.ll.y)
urx,ury = hit( r.ur.x, r.ur.y)
# Make sure the rectangles are not empty
if llx > urx: urx,llx = llx,urx
if lly > ury: ury,lly = lly,ury
return [llx,lly,urx,ury]
r = block.rect
# fake terminal for cell area
dd = {}
dd['netName'] = block.nm
dd['layer'] = 'cellarea'
dd['gid'] = -1
dd['rect'] = transformRect( r)
d['terminals'].append( dd)
for port in block.port_lst:
r = port.rect
formal = port.port_nm
actual = self.nl.pins[ (block.nm, formal)]
dd = {}
dd['netName'] = actual
dd['layer'] = translateLayer( port.layer)
dd['gid'] = -1
dd['rect'] = transformRect( r)
d['terminals'].append( dd)
json.dump(d, fp, sort_keys=True, indent=4)
fp.write( '\n')
def print( self):
print( self.blocks)
print( self.nl)
print( self.cs)
print( self.p)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser( description="Reads results of Placement/Placer and generates a JSON file for the Viewer")
parser.add_argument( "-n", "--block_name", type=str, default="n3")
parser.add_argument( "-id", "--input_dir", type=str, default="../Placement/testcase")
parser.add_argument( "-od", "--output_dir", type=str, default="../Placement/testcase")
parser.add_argument( "-j", "--json_output_file", type=str, default="../Viewer/INPUT/mydesign_dr_globalrouting.json")
args = parser.parse_args()
block_name = args.block_name
design = Design()
design.parse( args.input_dir + '/' + block_name, args.output_dir + '/' + block_name)
with open( args.json_output_file, 'wt') as fp:
design.write_json_for_viewer( fp) | 0.464173 | 0.436142 |
from setuptools import find_packages
import sys
requires = ['gevent', 'flask', 'ujson', 'redis']
if sys.prefix == '/usr':
etc_prefix = '/etc'
else:
etc_prefix = sys.prefix + '/etc'
author = "Plivo Inc"
author_email = "<EMAIL>"
maintainer = "Plivo Inc"
maintainer_email = "<EMAIL>"
license = "MPL 1.1"
setup_args = {
'name':'plivo',
'version':'0.1.1',
'description':'Plivo Framework - Rapid Telephony Application Prototyping Framework',
'url':'http://github.com/plivo/plivoframework',
'author':author,
'author_email':author_email,
'maintainer':maintainer,
'maintainer_email':maintainer_email,
'platforms':['linux'],
'long_description':'Framework to prototype telephony applications rapidly in any language',
'package_dir':{'': 'src'},
'packages':find_packages('src'),
'include_package_data':True,
'scripts':['src/bin/plivo-rest',
'src/bin/plivo-outbound',
'src/bin/plivo-cache',
'src/bin/plivo-postinstall',
'src/bin/wavdump.py',
'src/bin/wavstream.sh',
'src/bin/cacheserver',
'src/bin/plivo'],
'data_files':[(etc_prefix+'/plivo/', ['src/config/default.conf', 'src/config/cache.conf',
'src/initscripts/centos/plivo', 'src/initscripts/centos/plivocache']),
],
'keywords':"telecom voip telephony freeswitch ivr rest",
'license':license,
'zip_safe':False,
'classifiers':[
"Programming Language :: Python",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Communications",
"Topic :: Multimedia",
"Environment :: Console",
"Environment :: Web Environment",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)",
"Development Status :: 1 - Beta"]
}
try:
from setuptools import setup
setup_args['install_requires'] = requires
except ImportError:
from distutils.core import setup
setup_args['requires'] = requires
# setup
setup(**setup_args) | setup.py | from setuptools import find_packages
import sys
requires = ['gevent', 'flask', 'ujson', 'redis']
if sys.prefix == '/usr':
etc_prefix = '/etc'
else:
etc_prefix = sys.prefix + '/etc'
author = "Plivo Inc"
author_email = "<EMAIL>"
maintainer = "Plivo Inc"
maintainer_email = "<EMAIL>"
license = "MPL 1.1"
setup_args = {
'name':'plivo',
'version':'0.1.1',
'description':'Plivo Framework - Rapid Telephony Application Prototyping Framework',
'url':'http://github.com/plivo/plivoframework',
'author':author,
'author_email':author_email,
'maintainer':maintainer,
'maintainer_email':maintainer_email,
'platforms':['linux'],
'long_description':'Framework to prototype telephony applications rapidly in any language',
'package_dir':{'': 'src'},
'packages':find_packages('src'),
'include_package_data':True,
'scripts':['src/bin/plivo-rest',
'src/bin/plivo-outbound',
'src/bin/plivo-cache',
'src/bin/plivo-postinstall',
'src/bin/wavdump.py',
'src/bin/wavstream.sh',
'src/bin/cacheserver',
'src/bin/plivo'],
'data_files':[(etc_prefix+'/plivo/', ['src/config/default.conf', 'src/config/cache.conf',
'src/initscripts/centos/plivo', 'src/initscripts/centos/plivocache']),
],
'keywords':"telecom voip telephony freeswitch ivr rest",
'license':license,
'zip_safe':False,
'classifiers':[
"Programming Language :: Python",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Communications",
"Topic :: Multimedia",
"Environment :: Console",
"Environment :: Web Environment",
"Programming Language :: Python",
"Intended Audience :: Developers",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Mozilla Public License 1.1 (MPL 1.1)",
"Development Status :: 1 - Beta"]
}
try:
from setuptools import setup
setup_args['install_requires'] = requires
except ImportError:
from distutils.core import setup
setup_args['requires'] = requires
# setup
setup(**setup_args) | 0.282295 | 0.0713 |
import os
import re
try:
from email.mime.text import MIMEText
except ImportError:
from email.MIMEText import MIMEText
from smtplib import SMTP, SMTPException
from urlparse import urlparse
from zine.utils.validators import is_valid_email, check
_mail_split_re = re.compile(r'^(.*?)(?:\s+<(.+)>)?$')
def split_email(s):
"""Split a mail address:
>>> split_email("<NAME>")
('<NAME>', None)
>>> split_email("<NAME> <<EMAIL>>")
('<NAME>', '<EMAIL>')
>>> split_email("<EMAIL>")
(None, '<EMAIL>')
"""
p1, p2 = _mail_split_re.search(s).groups()
if p2:
return p1, p2
elif check(is_valid_email, p1):
return None, p1
return p1, None
def send_email(subject, text, to_addrs, quiet=True):
"""Send a mail using the `EMail` class. This will log the email instead
if the application configuration wants to log email.
"""
e = EMail(subject, text, to_addrs)
if e.app.cfg['log_email_only']:
return e.log()
if quiet:
return e.send_quiet()
return e.send()
class EMail(object):
"""Represents one E-Mail message that can be sent."""
def __init__(self, subject=None, text='', to_addrs=None):
self.app = app = get_application()
self.subject = u' '.join(subject.splitlines())
self.text = text
from_addr = app.cfg['blog_email']
if not from_addr:
from_addr = 'noreply@' + urlparse(app.cfg['blog_url']) \
[1].split(':')[0]
self.from_addr = u'%s <%s>' % (
app.cfg['blog_title'],
from_addr
)
self.to_addrs = []
if isinstance(to_addrs, basestring):
self.add_addr(to_addrs)
else:
for addr in to_addrs:
self.add_addr(addr)
def add_addr(self, addr):
"""Add an mail address to the list of recipients"""
lines = addr.splitlines()
if len(lines) != 1:
raise ValueError('invalid value for email address')
self.to_addrs.append(lines[0])
def as_message(self):
"""Return the email as MIMEText object."""
if not self.subject or not self.text or not self.to_addrs:
raise RuntimeError("Not all mailing parameters filled in")
msg = MIMEText(self.text.encode('utf-8'))
#: MIMEText sucks, it does not override the values on
#: setitem, it appends them. We get rid of some that
#: are predefined under some versions of python
del msg['Content-Transfer-Encoding']
del msg['Content-Type']
msg['From'] = self.from_addr.encode('utf-8')
msg['To'] = ', '.join(x.encode('utf-8') for x in self.to_addrs)
msg['Subject'] = self.subject.encode('utf-8')
msg['Content-Transfer-Encoding'] = '8bit'
msg['Content-Type'] = 'text/plain; charset=utf-8'
return msg
def format(self, sep='\r\n'):
"""Format the message into a string."""
return sep.join(self.as_message().as_string().splitlines())
def log(self):
"""Logs the email"""
f = open(os.path.join(self.app.instance_folder, 'mail.log'), 'a')
try:
f.write('%s\n%s\n\n' % ('-' * 79, self.format('\n').rstrip()))
finally:
f.close()
def send(self):
"""Send the message."""
try:
smtp = SMTP(self.app.cfg['smtp_host'], self.app.cfg['smtp_port'])
except SMTPException, e:
raise RuntimeError(str(e))
if self.app.cfg['smtp_use_tls']:
#smtp.set_debuglevel(1)
smtp.ehlo()
if not smtp.esmtp_features.has_key('starttls'):
# XXX: untranslated because python exceptions do not support
# unicode messages.
raise RuntimeError('TLS enabled but server does not '
'support TLS')
smtp.starttls()
smtp.ehlo()
if self.app.cfg['smtp_user']:
try:
smtp.login(self.app.cfg['smtp_user'],
self.app.cfg['smtp_password'])
except SMTPException, e:
raise RuntimeError(str(e))
msgtext = self.format()
try:
try:
return smtp.sendmail(self.from_addr, self.to_addrs, msgtext)
except SMTPException, e:
raise RuntimeError(str(e))
finally:
if self.app.cfg['smtp_use_tls']:
# avoid false failure detection when the server closes
# the SMTP connection with TLS enabled
import socket
try:
smtp.quit()
except socket.sslerror:
pass
else:
smtp.quit()
def send_quiet(self):
"""Send the message, swallowing exceptions."""
try:
return self.send()
except Exception:
return
from zine.application import get_application | zine/utils/mail.py | import os
import re
try:
from email.mime.text import MIMEText
except ImportError:
from email.MIMEText import MIMEText
from smtplib import SMTP, SMTPException
from urlparse import urlparse
from zine.utils.validators import is_valid_email, check
_mail_split_re = re.compile(r'^(.*?)(?:\s+<(.+)>)?$')
def split_email(s):
"""Split a mail address:
>>> split_email("<NAME>")
('<NAME>', None)
>>> split_email("<NAME> <<EMAIL>>")
('<NAME>', '<EMAIL>')
>>> split_email("<EMAIL>")
(None, '<EMAIL>')
"""
p1, p2 = _mail_split_re.search(s).groups()
if p2:
return p1, p2
elif check(is_valid_email, p1):
return None, p1
return p1, None
def send_email(subject, text, to_addrs, quiet=True):
"""Send a mail using the `EMail` class. This will log the email instead
if the application configuration wants to log email.
"""
e = EMail(subject, text, to_addrs)
if e.app.cfg['log_email_only']:
return e.log()
if quiet:
return e.send_quiet()
return e.send()
class EMail(object):
"""Represents one E-Mail message that can be sent."""
def __init__(self, subject=None, text='', to_addrs=None):
self.app = app = get_application()
self.subject = u' '.join(subject.splitlines())
self.text = text
from_addr = app.cfg['blog_email']
if not from_addr:
from_addr = 'noreply@' + urlparse(app.cfg['blog_url']) \
[1].split(':')[0]
self.from_addr = u'%s <%s>' % (
app.cfg['blog_title'],
from_addr
)
self.to_addrs = []
if isinstance(to_addrs, basestring):
self.add_addr(to_addrs)
else:
for addr in to_addrs:
self.add_addr(addr)
def add_addr(self, addr):
"""Add an mail address to the list of recipients"""
lines = addr.splitlines()
if len(lines) != 1:
raise ValueError('invalid value for email address')
self.to_addrs.append(lines[0])
def as_message(self):
"""Return the email as MIMEText object."""
if not self.subject or not self.text or not self.to_addrs:
raise RuntimeError("Not all mailing parameters filled in")
msg = MIMEText(self.text.encode('utf-8'))
#: MIMEText sucks, it does not override the values on
#: setitem, it appends them. We get rid of some that
#: are predefined under some versions of python
del msg['Content-Transfer-Encoding']
del msg['Content-Type']
msg['From'] = self.from_addr.encode('utf-8')
msg['To'] = ', '.join(x.encode('utf-8') for x in self.to_addrs)
msg['Subject'] = self.subject.encode('utf-8')
msg['Content-Transfer-Encoding'] = '8bit'
msg['Content-Type'] = 'text/plain; charset=utf-8'
return msg
def format(self, sep='\r\n'):
"""Format the message into a string."""
return sep.join(self.as_message().as_string().splitlines())
def log(self):
"""Logs the email"""
f = open(os.path.join(self.app.instance_folder, 'mail.log'), 'a')
try:
f.write('%s\n%s\n\n' % ('-' * 79, self.format('\n').rstrip()))
finally:
f.close()
def send(self):
"""Send the message."""
try:
smtp = SMTP(self.app.cfg['smtp_host'], self.app.cfg['smtp_port'])
except SMTPException, e:
raise RuntimeError(str(e))
if self.app.cfg['smtp_use_tls']:
#smtp.set_debuglevel(1)
smtp.ehlo()
if not smtp.esmtp_features.has_key('starttls'):
# XXX: untranslated because python exceptions do not support
# unicode messages.
raise RuntimeError('TLS enabled but server does not '
'support TLS')
smtp.starttls()
smtp.ehlo()
if self.app.cfg['smtp_user']:
try:
smtp.login(self.app.cfg['smtp_user'],
self.app.cfg['smtp_password'])
except SMTPException, e:
raise RuntimeError(str(e))
msgtext = self.format()
try:
try:
return smtp.sendmail(self.from_addr, self.to_addrs, msgtext)
except SMTPException, e:
raise RuntimeError(str(e))
finally:
if self.app.cfg['smtp_use_tls']:
# avoid false failure detection when the server closes
# the SMTP connection with TLS enabled
import socket
try:
smtp.quit()
except socket.sslerror:
pass
else:
smtp.quit()
def send_quiet(self):
"""Send the message, swallowing exceptions."""
try:
return self.send()
except Exception:
return
from zine.application import get_application | 0.478041 | 0.099077 |
import time
from typing import Tuple, List
import numpy as np
import robot_con.yumi.yumi_robot as yr
import robot_con.yumi.yumi_state as ys
class Yumi_Controller:
def __init__(self, debug: bool = False):
"""
is_add_all: Set True, the function `move_jntspace_path` will send multiple joint angles at once.
Otherwise, it will send single joint angle at once
"""
self.rbtx = yr.YuMiRobot(debug=debug)
self._is_add_all = True
@property
def lft_arm_hnd(self):
return self.rbtx.left
@property
def rgt_arm_hnd(self):
return self.rbtx.right
def get_pose(self, component_name: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Get pose of the robot computed by YUMI Server
:return 1x3 position vector and 3x3 rotation matrix
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm', 'rgt_arm']!")
pose = armx.get_pose()
pos = pose.translation
rot = pose.rotation
return pos, rot
def move_jnts(self, component_name: str, jnt_vals: np.ndarray, speed_n: int = 100):
"""
move one arm joints of the yumi
:param component_name
:param jnt_vals: 1x7 np.array
:param speed_n: speed number. If speed_n = 100, then speed will be set to the corresponding v100
specified in RAPID. Loosely, n is translational speed in milimeters per second
Please refer to page 1186 of
https://library.e.abb.com/public/688894b98123f87bc1257cc50044e809/Technical%20reference%20manual_RAPID_3HAC16581-1_revJ_en.pdf
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm', 'rgt_arm']!")
if speed_n == -1:
armx.set_speed_max()
else:
self.rbtx.set_v(speed_n)
armjnts = np.rad2deg(jnt_vals)
ajstate = ys.YuMiState(armjnts)
armx.movetstate_sgl(ajstate)
def contactL(self, component_name: str, jnt_vals: np.ndarray, desired_torque: float = .5) -> bool:
"""
Use contactL. Move the robot to a target pose. The robot will stop in advance if the torque reach desired torque
:return True if the robot reach target pose else False
"""
if component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['rgt_arm']!")
armjnts = np.rad2deg(jnt_vals)
ajstate = ys.YuMiState(armjnts)
return armx.contactL(ajstate, desired_torque)
def get_jnt_values(self, component_name: str):
"""
get the joint angles of both arms
:return: 1x6 array
"""
if component_name == "all":
lftjnts = self._get_arm_jnts("lft")
rgtjnts = self._get_arm_jnts("rgt")
return np.array(lftjnts + rgtjnts)
elif component_name in ["lft_arm", "lft_hnd"]:
return self._get_arm_jnts("lft")
elif component_name in ["rgt_arm", "rgt_hnd"]:
return self._get_arm_jnts("rgt")
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
def move_jntspace_path(self, component_name: str, path: List[np.ndarray], speed_n: int = 100) -> bool:
"""
:param speed_n: speed number. If speed_n = 100, then speed will be set to the corresponding v100
specified in RAPID. Loosely, n is translational speed in milimeters per second
Please refer to page 1186 of
https://library.e.abb.com/public/688894b98123f87bc1257cc50044e809/Technical%20reference%20manual_RAPID_3HAC16581-1_revJ_en.pdf
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
statelist = []
st = time.time()
for armjnts in path:
armjnts = np.rad2deg(armjnts)
ajstate = ys.YuMiState(armjnts)
statelist.append(ajstate)
et = time.time()
print("time calculating sending information", et - st)
# set the speed of the robot
if speed_n == -1:
armx.set_speed_max()
else:
self.rbtx.set_v(speed_n)
exec_result = armx.movetstate_cont(statelist, is_add_all=self._is_add_all)
return exec_result
def calibrate_gripper(self):
"""
Calibrate the gripper
:param speed : float, optional
Max speed of the gripper in mm/s.
Defaults to 10 mm/s. If None, will use maximum speed in RAPID.
:param force : float, optional
Hold force used by the gripper in N.
Defaults to 10 N. If None, will use maximum force the gripper can provide (20N).
"""
self.rgt_arm_hnd.calibrate_gripper()
self.lft_arm_hnd.calibrate_gripper()
def __set_gripper_force(self, component_name: str, force: float = 10):
"""
TODO: this program has bug. Fix it later.
:param force: Hold force by the gripper in Newton.
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.set_gripper_force(force=force)
def set_gripper_speed(self, component_name: str, speed: int = 10):
"""
:param speed: In mm/s.
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.set_gripper_max_speed(max_speed=speed)
def move_gripper(self, component_name: str, width: float):
"""
Moves the gripper to the given width in meters.
width : float
Target width in meters, range[0 , 0.025]
if you want to fully close or fully open the gripper,
please use the open_gripper or close_gripper!!
Otherwise the program may stuck
"""
assert 0 <= width < yr.YMC.MAX_GRIPPER_WIDTH
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.move_gripper(width=width / 2)
def open_gripper(self, component_name: str):
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.open_gripper()
def close_gripper(self, component_name: str, force: float = 10):
assert 0 <= force <= yr.YMC.MAX_GRIPPER_FORCE
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.close_gripper(force=force)
def get_gripper_width(self, component_name: str):
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
return armx.get_gripper_width() * 2
def _get_arm_jnts(self, armname: str):
if armname == "rgt":
return np.deg2rad(self.rbtx.right.get_state().joints)
elif armname == "lft":
return np.deg2rad(self.rbtx.left.get_state().joints)
else:
raise ValueError("Arm name must be right or left!")
def get_hc_img(self, armname: str):
if armname == "rgt":
self.rbtx.right.write_handcamimg_ftp()
elif armname == "lft":
self.rbtx.left.write_handcamimg_ftp()
else:
raise ValueError("Arm name must be right or left!")
def toggle_vac(self, toggletag, armname):
if armname == "rgt":
self.rbtx.right.toggle_vacuum(toggletag)
elif armname == "lft":
self.rbtx.left.toggle_vacuum(toggletag)
def get_pressure(self, armname):
if armname == "rgt":
return self.rbtx.right.get_pressure()
elif armname == "lft":
return self.rbtx.left.get_pressure()
def stop(self):
self.rbtx.stop()
if __name__ == "__main__":
ycc = Yumi_Controller(debug=False)
ycc.set_gripper_speed("rgt_arm", 10)
ycc.open_gripper("rgt_hnd")
a = ycc.get_gripper_width("rgt_hnd")
print(a)
ycc.close_gripper("rgt_hnd", force=5)
a = ycc.get_gripper_width("rgt_hnd")
print(a)
# ycc.get_pose("rgt_arm")
# ycc.calibrate_gripper()
# print(ycc.get_jnt_values("rgt_arm"))
# ycc.set_gripper_speed("rgt_arm", 10) | robot_con/yumi/yumi_con.py | import time
from typing import Tuple, List
import numpy as np
import robot_con.yumi.yumi_robot as yr
import robot_con.yumi.yumi_state as ys
class Yumi_Controller:
def __init__(self, debug: bool = False):
"""
is_add_all: Set True, the function `move_jntspace_path` will send multiple joint angles at once.
Otherwise, it will send single joint angle at once
"""
self.rbtx = yr.YuMiRobot(debug=debug)
self._is_add_all = True
@property
def lft_arm_hnd(self):
return self.rbtx.left
@property
def rgt_arm_hnd(self):
return self.rbtx.right
def get_pose(self, component_name: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Get pose of the robot computed by YUMI Server
:return 1x3 position vector and 3x3 rotation matrix
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm', 'rgt_arm']!")
pose = armx.get_pose()
pos = pose.translation
rot = pose.rotation
return pos, rot
def move_jnts(self, component_name: str, jnt_vals: np.ndarray, speed_n: int = 100):
"""
move one arm joints of the yumi
:param component_name
:param jnt_vals: 1x7 np.array
:param speed_n: speed number. If speed_n = 100, then speed will be set to the corresponding v100
specified in RAPID. Loosely, n is translational speed in milimeters per second
Please refer to page 1186 of
https://library.e.abb.com/public/688894b98123f87bc1257cc50044e809/Technical%20reference%20manual_RAPID_3HAC16581-1_revJ_en.pdf
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm', 'rgt_arm']!")
if speed_n == -1:
armx.set_speed_max()
else:
self.rbtx.set_v(speed_n)
armjnts = np.rad2deg(jnt_vals)
ajstate = ys.YuMiState(armjnts)
armx.movetstate_sgl(ajstate)
def contactL(self, component_name: str, jnt_vals: np.ndarray, desired_torque: float = .5) -> bool:
"""
Use contactL. Move the robot to a target pose. The robot will stop in advance if the torque reach desired torque
:return True if the robot reach target pose else False
"""
if component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['rgt_arm']!")
armjnts = np.rad2deg(jnt_vals)
ajstate = ys.YuMiState(armjnts)
return armx.contactL(ajstate, desired_torque)
def get_jnt_values(self, component_name: str):
"""
get the joint angles of both arms
:return: 1x6 array
"""
if component_name == "all":
lftjnts = self._get_arm_jnts("lft")
rgtjnts = self._get_arm_jnts("rgt")
return np.array(lftjnts + rgtjnts)
elif component_name in ["lft_arm", "lft_hnd"]:
return self._get_arm_jnts("lft")
elif component_name in ["rgt_arm", "rgt_hnd"]:
return self._get_arm_jnts("rgt")
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
def move_jntspace_path(self, component_name: str, path: List[np.ndarray], speed_n: int = 100) -> bool:
"""
:param speed_n: speed number. If speed_n = 100, then speed will be set to the corresponding v100
specified in RAPID. Loosely, n is translational speed in milimeters per second
Please refer to page 1186 of
https://library.e.abb.com/public/688894b98123f87bc1257cc50044e809/Technical%20reference%20manual_RAPID_3HAC16581-1_revJ_en.pdf
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
statelist = []
st = time.time()
for armjnts in path:
armjnts = np.rad2deg(armjnts)
ajstate = ys.YuMiState(armjnts)
statelist.append(ajstate)
et = time.time()
print("time calculating sending information", et - st)
# set the speed of the robot
if speed_n == -1:
armx.set_speed_max()
else:
self.rbtx.set_v(speed_n)
exec_result = armx.movetstate_cont(statelist, is_add_all=self._is_add_all)
return exec_result
def calibrate_gripper(self):
"""
Calibrate the gripper
:param speed : float, optional
Max speed of the gripper in mm/s.
Defaults to 10 mm/s. If None, will use maximum speed in RAPID.
:param force : float, optional
Hold force used by the gripper in N.
Defaults to 10 N. If None, will use maximum force the gripper can provide (20N).
"""
self.rgt_arm_hnd.calibrate_gripper()
self.lft_arm_hnd.calibrate_gripper()
def __set_gripper_force(self, component_name: str, force: float = 10):
"""
TODO: this program has bug. Fix it later.
:param force: Hold force by the gripper in Newton.
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.set_gripper_force(force=force)
def set_gripper_speed(self, component_name: str, speed: int = 10):
"""
:param speed: In mm/s.
"""
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.set_gripper_max_speed(max_speed=speed)
def move_gripper(self, component_name: str, width: float):
"""
Moves the gripper to the given width in meters.
width : float
Target width in meters, range[0 , 0.025]
if you want to fully close or fully open the gripper,
please use the open_gripper or close_gripper!!
Otherwise the program may stuck
"""
assert 0 <= width < yr.YMC.MAX_GRIPPER_WIDTH
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.move_gripper(width=width / 2)
def open_gripper(self, component_name: str):
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.open_gripper()
def close_gripper(self, component_name: str, force: float = 10):
assert 0 <= force <= yr.YMC.MAX_GRIPPER_FORCE
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
armx.close_gripper(force=force)
def get_gripper_width(self, component_name: str):
if component_name in ["lft_arm", "lft_hnd"]:
armx = self.rbtx.left
elif component_name in ["rgt_arm", "rgt_hnd"]:
armx = self.rbtx.right
else:
raise ValueError("Component_name must be in ['lft_arm/lft_hnd', 'rgt_arm/rgt_hnd']!")
return armx.get_gripper_width() * 2
def _get_arm_jnts(self, armname: str):
if armname == "rgt":
return np.deg2rad(self.rbtx.right.get_state().joints)
elif armname == "lft":
return np.deg2rad(self.rbtx.left.get_state().joints)
else:
raise ValueError("Arm name must be right or left!")
def get_hc_img(self, armname: str):
if armname == "rgt":
self.rbtx.right.write_handcamimg_ftp()
elif armname == "lft":
self.rbtx.left.write_handcamimg_ftp()
else:
raise ValueError("Arm name must be right or left!")
def toggle_vac(self, toggletag, armname):
if armname == "rgt":
self.rbtx.right.toggle_vacuum(toggletag)
elif armname == "lft":
self.rbtx.left.toggle_vacuum(toggletag)
def get_pressure(self, armname):
if armname == "rgt":
return self.rbtx.right.get_pressure()
elif armname == "lft":
return self.rbtx.left.get_pressure()
def stop(self):
self.rbtx.stop()
if __name__ == "__main__":
ycc = Yumi_Controller(debug=False)
ycc.set_gripper_speed("rgt_arm", 10)
ycc.open_gripper("rgt_hnd")
a = ycc.get_gripper_width("rgt_hnd")
print(a)
ycc.close_gripper("rgt_hnd", force=5)
a = ycc.get_gripper_width("rgt_hnd")
print(a)
# ycc.get_pose("rgt_arm")
# ycc.calibrate_gripper()
# print(ycc.get_jnt_values("rgt_arm"))
# ycc.set_gripper_speed("rgt_arm", 10) | 0.864996 | 0.346569 |
model = dict(
type='FastRCNN',
backbone=dict(
type='ResNet3dSlowFast',
pretrained=None,
resample_rate=4,
speed_ratio=4,
channel_ratio=8,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
fusion_kernel=7,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
spatial_strides=(1, 2, 2, 1)),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
spatial_strides=(1, 2, 2, 1))),
roi_head=dict(
type='AVARoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor3D',
roi_layer_type='RoIAlign',
output_size=8,
with_temporal_pool=True),
bbox_head=dict(
type='BBoxHeadAVA',
in_channels=2304,
num_classes=81,
multilabel=True,
dropout_ratio=0.5)),
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssignerAVA',
pos_iou_thr=0.9,
neg_iou_thr=0.9,
min_pos_iou=0.9),
sampler=dict(
type='RandomSampler',
num=32,
pos_fraction=1,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=1.0,
debug=False)),
test_cfg=dict(rcnn=dict(action_thr=0.002)))
dataset_type = 'AVADataset'
data_root = 'data/ava/rawframes'
anno_root = 'data/ava/annotations'
ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
'recall_93.9.pkl')
proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
dict(type='RawFrameDecode'),
dict(type='RandomRescale', scale_range=(256, 320)),
dict(type='RandomCrop', size=256),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW', collapse=True),
# Rename is needed to use mmdet detectors
dict(type='Rename', mapping=dict(imgs='img')),
dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
dict(
type='ToDataContainer',
fields=[
dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
]),
dict(
type='Collect',
keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
meta_keys=['scores', 'entity_ids'])
]
# The testing is w/o. any cropping / flipping
val_pipeline = [
dict(
type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW', collapse=True),
# Rename is needed to use mmdet detectors
dict(type='Rename', mapping=dict(imgs='img')),
dict(type='ToTensor', keys=['img', 'proposals']),
dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
dict(
type='Collect',
keys=['img', 'proposals'],
meta_keys=['scores', 'img_shape'],
nested=True)
]
data = dict(
videos_per_gpu=5,
workers_per_gpu=2,
val_dataloader=dict(videos_per_gpu=1),
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
exclude_file=exclude_file_train,
pipeline=train_pipeline,
label_file=label_file,
proposal_file=proposal_file_train,
person_det_score_thr=0.9,
data_prefix=data_root),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
exclude_file=exclude_file_val,
pipeline=val_pipeline,
label_file=label_file,
proposal_file=proposal_file_val,
person_det_score_thr=0.9,
data_prefix=data_root))
data['test'] = data['val']
optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
# this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[10, 15],
warmup='linear',
warmup_by_epoch=True,
warmup_iters=5,
warmup_ratio=0.1)
total_epochs = 20
checkpoint_config = dict(interval=1)
workflow = [('train', 1)]
evaluation = dict(interval=1, save_best='mAP@0.5IOU')
log_config = dict(
interval=20, hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = ('./work_dirs/ava/'
'slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb')
load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
'slowfast_r50_8x8x1_256e_kinetics400_rgb/'
'slowfast_r50_8x8x1_256e_kinetics400_rgb_20200704-73547d2b.pth')
resume_from = None
find_unused_parameters = False | configs/detection/ava/slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py | model = dict(
type='FastRCNN',
backbone=dict(
type='ResNet3dSlowFast',
pretrained=None,
resample_rate=4,
speed_ratio=4,
channel_ratio=8,
slow_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=True,
fusion_kernel=7,
conv1_kernel=(1, 7, 7),
dilations=(1, 1, 1, 1),
conv1_stride_t=1,
pool1_stride_t=1,
inflate=(0, 0, 1, 1),
spatial_strides=(1, 2, 2, 1)),
fast_pathway=dict(
type='resnet3d',
depth=50,
pretrained=None,
lateral=False,
base_channels=8,
conv1_kernel=(5, 7, 7),
conv1_stride_t=1,
pool1_stride_t=1,
spatial_strides=(1, 2, 2, 1))),
roi_head=dict(
type='AVARoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor3D',
roi_layer_type='RoIAlign',
output_size=8,
with_temporal_pool=True),
bbox_head=dict(
type='BBoxHeadAVA',
in_channels=2304,
num_classes=81,
multilabel=True,
dropout_ratio=0.5)),
train_cfg=dict(
rcnn=dict(
assigner=dict(
type='MaxIoUAssignerAVA',
pos_iou_thr=0.9,
neg_iou_thr=0.9,
min_pos_iou=0.9),
sampler=dict(
type='RandomSampler',
num=32,
pos_fraction=1,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=1.0,
debug=False)),
test_cfg=dict(rcnn=dict(action_thr=0.002)))
dataset_type = 'AVADataset'
data_root = 'data/ava/rawframes'
anno_root = 'data/ava/annotations'
ann_file_train = f'{anno_root}/ava_train_v2.1.csv'
ann_file_val = f'{anno_root}/ava_val_v2.1.csv'
exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.1.csv'
exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.1.csv'
label_file = f'{anno_root}/ava_action_list_v2.1_for_activitynet_2018.pbtxt'
proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.'
'recall_93.9.pkl')
proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleAVAFrames', clip_len=32, frame_interval=2),
dict(type='RawFrameDecode'),
dict(type='RandomRescale', scale_range=(256, 320)),
dict(type='RandomCrop', size=256),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW', collapse=True),
# Rename is needed to use mmdet detectors
dict(type='Rename', mapping=dict(imgs='img')),
dict(type='ToTensor', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
dict(
type='ToDataContainer',
fields=[
dict(key=['proposals', 'gt_bboxes', 'gt_labels'], stack=False)
]),
dict(
type='Collect',
keys=['img', 'proposals', 'gt_bboxes', 'gt_labels'],
meta_keys=['scores', 'entity_ids'])
]
# The testing is w/o. any cropping / flipping
val_pipeline = [
dict(
type='SampleAVAFrames', clip_len=32, frame_interval=2, test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW', collapse=True),
# Rename is needed to use mmdet detectors
dict(type='Rename', mapping=dict(imgs='img')),
dict(type='ToTensor', keys=['img', 'proposals']),
dict(type='ToDataContainer', fields=[dict(key='proposals', stack=False)]),
dict(
type='Collect',
keys=['img', 'proposals'],
meta_keys=['scores', 'img_shape'],
nested=True)
]
data = dict(
videos_per_gpu=5,
workers_per_gpu=2,
val_dataloader=dict(videos_per_gpu=1),
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
exclude_file=exclude_file_train,
pipeline=train_pipeline,
label_file=label_file,
proposal_file=proposal_file_train,
person_det_score_thr=0.9,
data_prefix=data_root),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
exclude_file=exclude_file_val,
pipeline=val_pipeline,
label_file=label_file,
proposal_file=proposal_file_val,
person_det_score_thr=0.9,
data_prefix=data_root))
data['test'] = data['val']
optimizer = dict(type='SGD', lr=0.075, momentum=0.9, weight_decay=0.00001)
# this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
step=[10, 15],
warmup='linear',
warmup_by_epoch=True,
warmup_iters=5,
warmup_ratio=0.1)
total_epochs = 20
checkpoint_config = dict(interval=1)
workflow = [('train', 1)]
evaluation = dict(interval=1, save_best='mAP@0.5IOU')
log_config = dict(
interval=20, hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = ('./work_dirs/ava/'
'slowfast_kinetics_pretrained_r50_8x8x1_20e_ava_rgb')
load_from = ('https://download.openmmlab.com/mmaction/recognition/slowfast/'
'slowfast_r50_8x8x1_256e_kinetics400_rgb/'
'slowfast_r50_8x8x1_256e_kinetics400_rgb_20200704-73547d2b.pth')
resume_from = None
find_unused_parameters = False | 0.580709 | 0.206774 |
from torchtext import data
from torchtext.data import Field, RawField
from typing import List, Tuple
import pickle
import gzip
import numpy as np
import torch
def load_dataset_file(filename):
with gzip.open(filename, "rb") as f:
loaded_object = pickle.load(f)
return loaded_object
class SignTranslationDataset(data.Dataset):
"""Defines a dataset for machine translation."""
@staticmethod
def sort_key(ex):
return data.interleave_keys(len(ex.sgn), len(ex.txt))
def __init__(
self,
path: str,
fields: Tuple[RawField, RawField, Field, Field, Field, Field],
**kwargs
):
"""Create a SignTranslationDataset given paths and fields.
Arguments:
path: Common prefix of paths to the data files for both languages.
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
Remaining keyword arguments: Passed to the constructor of
data.Dataset.
"""
if not isinstance(fields[0], (tuple, list)):
fields = [
("sequence", fields[0]),
("signer", fields[1]),
("sgn", fields[2]),
("features", fields[3]),
("gls", fields[4]),
("txt", fields[5]),
]
if not isinstance(path, list):
path = [path]
name = 'cnn_features_max'
samples = {}
for annotation_file in path:
tmp = load_dataset_file(annotation_file)
for _, s in tmp.items():
seq_id = s["name"]
if seq_id in samples:
assert samples[seq_id]["name"] == s["name"]
assert samples[seq_id]["signer"] == s["signer"]
assert samples[seq_id]["gloss"] == s["gloss"]
assert samples[seq_id]["features"] == s[name]
assert samples[seq_id]["text"] == s["text"]
samples[seq_id]["sign"] = torch.cat([samples[seq_id]["sign"], s["sign"]], axis=1)
samples[seq_id]["features"] = torch.cat([samples[seq_id]["features"], s[name]], axis=1)
else:
samples[seq_id] = {
"name": s["name"],
"signer": s["signer"],
"gloss": s["gloss"],
"text": s["text"],
"sign": s["sign"],
"features": s[name]
}
examples = []
for s in samples:
sample = samples[s]
examples.append(
data.Example.fromlist(
[
sample["name"],
sample["signer"],
# This is for numerical stability
sample["sign"] + 1e-8,
sample['features'] + 1e-8,
sample["gloss"].strip(),
sample["text"].strip(),
],
fields,
)
)
super().__init__(examples, fields, **kwargs) | signjoey_codes_experiments/signjoey_features_early_concat/dataset.py | from torchtext import data
from torchtext.data import Field, RawField
from typing import List, Tuple
import pickle
import gzip
import numpy as np
import torch
def load_dataset_file(filename):
with gzip.open(filename, "rb") as f:
loaded_object = pickle.load(f)
return loaded_object
class SignTranslationDataset(data.Dataset):
"""Defines a dataset for machine translation."""
@staticmethod
def sort_key(ex):
return data.interleave_keys(len(ex.sgn), len(ex.txt))
def __init__(
self,
path: str,
fields: Tuple[RawField, RawField, Field, Field, Field, Field],
**kwargs
):
"""Create a SignTranslationDataset given paths and fields.
Arguments:
path: Common prefix of paths to the data files for both languages.
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
Remaining keyword arguments: Passed to the constructor of
data.Dataset.
"""
if not isinstance(fields[0], (tuple, list)):
fields = [
("sequence", fields[0]),
("signer", fields[1]),
("sgn", fields[2]),
("features", fields[3]),
("gls", fields[4]),
("txt", fields[5]),
]
if not isinstance(path, list):
path = [path]
name = 'cnn_features_max'
samples = {}
for annotation_file in path:
tmp = load_dataset_file(annotation_file)
for _, s in tmp.items():
seq_id = s["name"]
if seq_id in samples:
assert samples[seq_id]["name"] == s["name"]
assert samples[seq_id]["signer"] == s["signer"]
assert samples[seq_id]["gloss"] == s["gloss"]
assert samples[seq_id]["features"] == s[name]
assert samples[seq_id]["text"] == s["text"]
samples[seq_id]["sign"] = torch.cat([samples[seq_id]["sign"], s["sign"]], axis=1)
samples[seq_id]["features"] = torch.cat([samples[seq_id]["features"], s[name]], axis=1)
else:
samples[seq_id] = {
"name": s["name"],
"signer": s["signer"],
"gloss": s["gloss"],
"text": s["text"],
"sign": s["sign"],
"features": s[name]
}
examples = []
for s in samples:
sample = samples[s]
examples.append(
data.Example.fromlist(
[
sample["name"],
sample["signer"],
# This is for numerical stability
sample["sign"] + 1e-8,
sample['features'] + 1e-8,
sample["gloss"].strip(),
sample["text"].strip(),
],
fields,
)
)
super().__init__(examples, fields, **kwargs) | 0.880759 | 0.348562 |
import asyncio
from datetime import date
import logging
import re
import traceback
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.dispatcher import FSMContext
from aiogram.utils import exceptions, executor
from aiogram.utils.markdown import text
import config
import regexps
from deliverer import Deliverer
from exceptions import NoTimeInStringException
from log_manager import LogManager
from scheduler import Scheduler
from states import Form
from vk_manager import VKM
def setup_logging():
# create logger
logger = logging.getLogger('memstrual_log')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(config.LOG_PATH)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
loop = asyncio.get_event_loop()
bot = Bot(token=config.API_TOKEN, loop=loop)
storage = RedisStorage2(host=config.REDIS_HOST,
port=config.REDIS_PORT,
password=config.REDIS_PASSWORD)
dp = Dispatcher(bot, storage=storage)
vk = VKM()
scheduler = Scheduler()
log_manager = LogManager(bot)
deliverer = Deliverer.get_instance(bot, dp, vk)
url_regexp = re.compile(regexps.WEB_URL_REGEX)
logger = setup_logging()
async def manage_post_sending(state, chat_id, user_id, seconds):
if seconds < 0:
await bot.send_message(chat_id,
'Это время уже прошло, введи другое.')
return
elif seconds > 0:
post_time = scheduler.get_str_datetime_in_future(seconds)
post_date_message = 'Будет отправлено ' + post_time + '.'
await bot.send_message(chat_id,
post_date_message)
async with state.proxy() as data:
await deliverer.append(
post_time=scheduler.get_datetime_in_future(seconds),
chat_id=chat_id,
message_id=data['message_to_schedule_id'],
user_id=user_id)
data['message_to_schedule_id'] = None
# вернем рабочий режим
await Form.operational_mode.set()
@dp.message_handler(commands=['start'])
async def cmd_start(message: types.Message):
"""
Conversation's entry point
"""
logger.info('Старт работы бота.')
# Set state
await Form.initial.set()
line1 = 'Привет, этот бот автоматически постит посылаемый ему контент ' +\
'в заданные тобой группу ВК и канал в телеграме.'
line2 = 'Для начала нужно настроить подключение.'
line3 = 'Жми /vk или /channel и следуй инструкциям.'
instructions = text(text(line1), text(line2), '', text(line3), sep='\n')
await bot.send_message(message.chat.id,
instructions)
@dp.message_handler(commands=['getlog'], state='*')
async def cmd_getlog(message: types.Message):
logger.info('Отдаю лог.')
await log_manager.send_log(chat_id=message.chat.id)
@dp.message_handler(commands=['dellog'], state='*')
async def cmd_dellog(message: types.Message):
log_manager.wipe_log()
logger.info('Удалил лог.')
await bot.send_message(message.chat.id, 'Удалил лог.')
@dp.message_handler(commands=['reset'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'reset', state='*')
async def cmd_reset(message: types.Message, state: FSMContext):
# Get current state
logger.info('Сброс.')
await state.finish()
await Form.initial.set()
await bot.send_message(message.chat.id,
'Стер себе память, настраивай заново теперь.')
@dp.message_handler(commands=['channel'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'channel',
state='*')
async def cmd_channel(message: types.Message):
logger.info('Настраиваем канал.')
await Form.channel_name.set()
line1 = 'Сперва сделай бота админом канала.'
line2 = 'Потом пришли мне имя канала в формате @название_канала.'
instructions = text(text(line1), text(line2), sep='\n')
await bot.send_message(message.chat.id, instructions)
@dp.message_handler(commands=['vk'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'vk', state='*')
async def cmd_vk(message: types.Message):
logger.info('Настраиваем ВК.')
await Form.vk_token.set()
line2 = 'Перейди по ссылке и скопируй из адресной строки весь ' +\
'текст, находящийся между \"access_token=\" и \"&\".'
instructions = text(
text('Для вк нужно получить токен (если его еще у тебя нет).'),
text(line2),
text('В результате получится длинная строка из букв и цифр.'),
sep='\n')
# настроим клавиатуру
keyboard = types.InlineKeyboardMarkup(row_width=2)
url_button = types.InlineKeyboardButton(text="Получить токен",
url=config.VK_TOKEN_LINK)
keyboard.add(url_button)
await bot.send_message(message.chat.id,
instructions,
reply_markup=keyboard)
await bot.send_message(message.chat.id, "Введи токен:")
@dp.message_handler(state=Form.channel_name)
async def process_channel(message: types.Message, state: FSMContext):
"""
Process user channel name
"""
logger.info('Обрабатываем ввод имени канала.')
# Save name to storage and go to next step
channel_tg = message.text.strip()
if channel_tg[0] != '@':
await bot.send_message(message.chat.id, 'Нет @ в начале имени.')
return
async with state.proxy() as data:
data['channel_tg'] = channel_tg
await bot.send_message(message.chat.id, 'Можно попробовать слать мемы.')
await Form.operational_mode.set()
@dp.message_handler(state=Form.vk_token)
async def process_token(message: types.Message, state: FSMContext):
"""
Process user token
"""
logger.info('Обрабатываем ввод токена ВК.')
vk_token = message.text
async with state.proxy() as data:
data['vk_token'] = vk_token
test_result, test_message = await vk.test_token(vk_token)
await bot.send_message(message.chat.id, test_message)
if test_result:
await Form.group_id.set()
await bot.send_message(message.chat.id, 'Введи ID группы:')
else:
# Авторизация чето не удалась
await bot.send_message(
message.chat.id,
'Авторизация чето не удалась, я хз, повтори')
@dp.message_handler(state=Form.group_id)
async def process_group_id(message: types.Message, state: FSMContext):
logger.info('Обрабатываем ввод ИД группы ВК.')
group_id = message.text
async with state.proxy() as data:
data['group_id'] = group_id
vk_token = data['vk_token']
test_result, test_message = await vk.test_group_id(group_id, vk_token)
await bot.send_message(message.chat.id, test_message)
if test_result:
await Form.operational_mode.set()
await bot.send_message(message.chat.id,
'Можно попробовать слать мемы.')
else:
# Авторизация чето не удалась
await bot.send_message(message.chat.id,
'Авторизация чето не удалась, я хз, повтори')
@dp.callback_query_handler(state=Form.datetime_input)
async def callback_inline(call, state: FSMContext):
logger.info('Обрабатываем нажатие кнопки дня публикации.')
if call.data == "сегодня":
post_date = scheduler.get_today_date()
elif call.data == "завтра":
post_date = scheduler.get_today_date(1)
elif call.data == "послезавтра":
post_date = scheduler.get_today_date(2)
elif call.data == "сейчас":
await manage_post_sending(state,
call.message.chat.id,
call.message.chat.id,
seconds=0)
return
else:
post_date = scheduler.get_today_date()
post_date = date.isoformat(post_date)
async with state.proxy() as data:
data['post_date'] = post_date
keyboard = scheduler.get_day_selection(call.data)
try:
await bot.edit_message_reply_markup(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
reply_markup=keyboard)
except exceptions.MessageNotModified:
keyboard = scheduler.get_day_selection()
post_date = scheduler.get_today_date()
post_date = date.isoformat(post_date)
async with state.proxy() as data:
data['post_date'] = post_date
await bot.edit_message_reply_markup(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
reply_markup=keyboard)
@dp.message_handler(state=Form.datetime_input,
content_types=types.ContentType.TEXT)
async def process_postdate(message: types.Message, state: FSMContext):
logger.info('Обрабатываем ввод времени публикации (или сброс ввода).')
# Если в сообщении есть ссылка, то это очевидно новый псто, забей на старый
if url_regexp.split(message.text)[1:]:
# очистим на всякий пожарный поле для отлаживаемого поста
async with state.proxy() as data:
data['message_to_schedule_id'] = None
# и вызовем обработчик ссылок
await process_text(message, state)
return
else:
# если ссылки нет, то будем парсить время на куда отложить
async with state.proxy() as data:
post_date = date.fromisoformat(data['post_date'])
try:
seconds = scheduler.parse_time_input(post_date, message.text)
except NoTimeInStringException:
# в тексте нет ничего похожего на время, поэтому пошлем
# сообщение в обработчик постов
await process_text(message, state)
return
await manage_post_sending(state,
message.chat.id,
message.from_user.id,
seconds)
@dp.message_handler(state=Form.datetime_input,
content_types=types.ContentType.PHOTO)
async def break_input_by_photo(message: types.Message, state: FSMContext):
logger.info('Обрабатываем сброс ввода времени через новую картинку.')
# Update user's state
await Form.operational_mode.set()
await process_photos(message, state)
@dp.message_handler(state=Form.operational_mode,
content_types=types.ContentType.PHOTO)
async def process_photos(message: types.Message, state: FSMContext):
logger.info('Обрабатываем посылку картинки.')
try:
await scheduler.schedule_post(state, message)
except Exception:
with open(config.LOG_PATH, "a") as logfile:
traceback.print_exc(file=logfile)
traceback.print_exc()
await log_manager.panic_sending()
@dp.message_handler(state=Form.operational_mode,
content_types=types.ContentType.TEXT)
async def process_text(message: types.Message, state: FSMContext):
logger.info('Обрабатываем посылку текста.')
try:
await scheduler.schedule_post(state, message)
except Exception:
with open(config.LOG_PATH, "a") as logfile:
traceback.print_exc(file=logfile)
traceback.print_exc()
await log_manager.panic_sending()
@dp.message_handler(state=Form.initial)
async def no_way(message: types.Message):
logger.info('Обработка ввода при ненастроенных получателях.')
line1 = 'Пока не настроишь места для пересылки, тут будет скучновато.'
line2 = 'Жми /vk или /channel и следуй инструкциям.'
instructions = text(text(line1), text(line2), sep='\n')
await bot.send_message(message.chat.id,
instructions)
@dp.message_handler(state=None)
async def to_start(message: types.Message):
logger.info('При вводе любого сообщения стартуем.')
await cmd_start(message)
async def checking_queue_after_pause():
await asyncio.sleep(5)
await deliverer.start_checking()
async def checking_log_after_pause():
await asyncio.sleep(20)
await log_manager.start_checking()
async def startup(dispatcher: Dispatcher):
logger.info('Старт бота.')
# запускаем проверку очереди сразу, все необходимое у нас есть
asyncio.run_coroutine_threadsafe(checking_queue_after_pause(), loop)
# запускаем проверку переполненности лога
asyncio.run_coroutine_threadsafe(checking_log_after_pause(), loop)
async def shutdown(dispatcher: Dispatcher):
logger.info('Убиваем бота.')
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
if __name__ == '__main__':
executor.start_polling(dp,
loop=loop,
skip_updates=True,
on_startup=startup,
on_shutdown=shutdown) | main.py | import asyncio
from datetime import date
import logging
import re
import traceback
from aiogram import Bot, Dispatcher, types
from aiogram.contrib.fsm_storage.redis import RedisStorage2
from aiogram.dispatcher import FSMContext
from aiogram.utils import exceptions, executor
from aiogram.utils.markdown import text
import config
import regexps
from deliverer import Deliverer
from exceptions import NoTimeInStringException
from log_manager import LogManager
from scheduler import Scheduler
from states import Form
from vk_manager import VKM
def setup_logging():
# create logger
logger = logging.getLogger('memstrual_log')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(config.LOG_PATH)
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
loop = asyncio.get_event_loop()
bot = Bot(token=config.API_TOKEN, loop=loop)
storage = RedisStorage2(host=config.REDIS_HOST,
port=config.REDIS_PORT,
password=config.REDIS_PASSWORD)
dp = Dispatcher(bot, storage=storage)
vk = VKM()
scheduler = Scheduler()
log_manager = LogManager(bot)
deliverer = Deliverer.get_instance(bot, dp, vk)
url_regexp = re.compile(regexps.WEB_URL_REGEX)
logger = setup_logging()
async def manage_post_sending(state, chat_id, user_id, seconds):
if seconds < 0:
await bot.send_message(chat_id,
'Это время уже прошло, введи другое.')
return
elif seconds > 0:
post_time = scheduler.get_str_datetime_in_future(seconds)
post_date_message = 'Будет отправлено ' + post_time + '.'
await bot.send_message(chat_id,
post_date_message)
async with state.proxy() as data:
await deliverer.append(
post_time=scheduler.get_datetime_in_future(seconds),
chat_id=chat_id,
message_id=data['message_to_schedule_id'],
user_id=user_id)
data['message_to_schedule_id'] = None
# вернем рабочий режим
await Form.operational_mode.set()
@dp.message_handler(commands=['start'])
async def cmd_start(message: types.Message):
"""
Conversation's entry point
"""
logger.info('Старт работы бота.')
# Set state
await Form.initial.set()
line1 = 'Привет, этот бот автоматически постит посылаемый ему контент ' +\
'в заданные тобой группу ВК и канал в телеграме.'
line2 = 'Для начала нужно настроить подключение.'
line3 = 'Жми /vk или /channel и следуй инструкциям.'
instructions = text(text(line1), text(line2), '', text(line3), sep='\n')
await bot.send_message(message.chat.id,
instructions)
@dp.message_handler(commands=['getlog'], state='*')
async def cmd_getlog(message: types.Message):
logger.info('Отдаю лог.')
await log_manager.send_log(chat_id=message.chat.id)
@dp.message_handler(commands=['dellog'], state='*')
async def cmd_dellog(message: types.Message):
log_manager.wipe_log()
logger.info('Удалил лог.')
await bot.send_message(message.chat.id, 'Удалил лог.')
@dp.message_handler(commands=['reset'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'reset', state='*')
async def cmd_reset(message: types.Message, state: FSMContext):
# Get current state
logger.info('Сброс.')
await state.finish()
await Form.initial.set()
await bot.send_message(message.chat.id,
'Стер себе память, настраивай заново теперь.')
@dp.message_handler(commands=['channel'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'channel',
state='*')
async def cmd_channel(message: types.Message):
logger.info('Настраиваем канал.')
await Form.channel_name.set()
line1 = 'Сперва сделай бота админом канала.'
line2 = 'Потом пришли мне имя канала в формате @название_канала.'
instructions = text(text(line1), text(line2), sep='\n')
await bot.send_message(message.chat.id, instructions)
@dp.message_handler(commands=['vk'], state='*')
@dp.message_handler(lambda message: message.text.lower() == 'vk', state='*')
async def cmd_vk(message: types.Message):
logger.info('Настраиваем ВК.')
await Form.vk_token.set()
line2 = 'Перейди по ссылке и скопируй из адресной строки весь ' +\
'текст, находящийся между \"access_token=\" и \"&\".'
instructions = text(
text('Для вк нужно получить токен (если его еще у тебя нет).'),
text(line2),
text('В результате получится длинная строка из букв и цифр.'),
sep='\n')
# настроим клавиатуру
keyboard = types.InlineKeyboardMarkup(row_width=2)
url_button = types.InlineKeyboardButton(text="Получить токен",
url=config.VK_TOKEN_LINK)
keyboard.add(url_button)
await bot.send_message(message.chat.id,
instructions,
reply_markup=keyboard)
await bot.send_message(message.chat.id, "Введи токен:")
@dp.message_handler(state=Form.channel_name)
async def process_channel(message: types.Message, state: FSMContext):
"""
Process user channel name
"""
logger.info('Обрабатываем ввод имени канала.')
# Save name to storage and go to next step
channel_tg = message.text.strip()
if channel_tg[0] != '@':
await bot.send_message(message.chat.id, 'Нет @ в начале имени.')
return
async with state.proxy() as data:
data['channel_tg'] = channel_tg
await bot.send_message(message.chat.id, 'Можно попробовать слать мемы.')
await Form.operational_mode.set()
@dp.message_handler(state=Form.vk_token)
async def process_token(message: types.Message, state: FSMContext):
"""
Process user token
"""
logger.info('Обрабатываем ввод токена ВК.')
vk_token = message.text
async with state.proxy() as data:
data['vk_token'] = vk_token
test_result, test_message = await vk.test_token(vk_token)
await bot.send_message(message.chat.id, test_message)
if test_result:
await Form.group_id.set()
await bot.send_message(message.chat.id, 'Введи ID группы:')
else:
# Авторизация чето не удалась
await bot.send_message(
message.chat.id,
'Авторизация чето не удалась, я хз, повтори')
@dp.message_handler(state=Form.group_id)
async def process_group_id(message: types.Message, state: FSMContext):
logger.info('Обрабатываем ввод ИД группы ВК.')
group_id = message.text
async with state.proxy() as data:
data['group_id'] = group_id
vk_token = data['vk_token']
test_result, test_message = await vk.test_group_id(group_id, vk_token)
await bot.send_message(message.chat.id, test_message)
if test_result:
await Form.operational_mode.set()
await bot.send_message(message.chat.id,
'Можно попробовать слать мемы.')
else:
# Авторизация чето не удалась
await bot.send_message(message.chat.id,
'Авторизация чето не удалась, я хз, повтори')
@dp.callback_query_handler(state=Form.datetime_input)
async def callback_inline(call, state: FSMContext):
logger.info('Обрабатываем нажатие кнопки дня публикации.')
if call.data == "сегодня":
post_date = scheduler.get_today_date()
elif call.data == "завтра":
post_date = scheduler.get_today_date(1)
elif call.data == "послезавтра":
post_date = scheduler.get_today_date(2)
elif call.data == "сейчас":
await manage_post_sending(state,
call.message.chat.id,
call.message.chat.id,
seconds=0)
return
else:
post_date = scheduler.get_today_date()
post_date = date.isoformat(post_date)
async with state.proxy() as data:
data['post_date'] = post_date
keyboard = scheduler.get_day_selection(call.data)
try:
await bot.edit_message_reply_markup(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
reply_markup=keyboard)
except exceptions.MessageNotModified:
keyboard = scheduler.get_day_selection()
post_date = scheduler.get_today_date()
post_date = date.isoformat(post_date)
async with state.proxy() as data:
data['post_date'] = post_date
await bot.edit_message_reply_markup(
chat_id=call.message.chat.id,
message_id=call.message.message_id,
reply_markup=keyboard)
@dp.message_handler(state=Form.datetime_input,
content_types=types.ContentType.TEXT)
async def process_postdate(message: types.Message, state: FSMContext):
logger.info('Обрабатываем ввод времени публикации (или сброс ввода).')
# Если в сообщении есть ссылка, то это очевидно новый псто, забей на старый
if url_regexp.split(message.text)[1:]:
# очистим на всякий пожарный поле для отлаживаемого поста
async with state.proxy() as data:
data['message_to_schedule_id'] = None
# и вызовем обработчик ссылок
await process_text(message, state)
return
else:
# если ссылки нет, то будем парсить время на куда отложить
async with state.proxy() as data:
post_date = date.fromisoformat(data['post_date'])
try:
seconds = scheduler.parse_time_input(post_date, message.text)
except NoTimeInStringException:
# в тексте нет ничего похожего на время, поэтому пошлем
# сообщение в обработчик постов
await process_text(message, state)
return
await manage_post_sending(state,
message.chat.id,
message.from_user.id,
seconds)
@dp.message_handler(state=Form.datetime_input,
content_types=types.ContentType.PHOTO)
async def break_input_by_photo(message: types.Message, state: FSMContext):
logger.info('Обрабатываем сброс ввода времени через новую картинку.')
# Update user's state
await Form.operational_mode.set()
await process_photos(message, state)
@dp.message_handler(state=Form.operational_mode,
content_types=types.ContentType.PHOTO)
async def process_photos(message: types.Message, state: FSMContext):
logger.info('Обрабатываем посылку картинки.')
try:
await scheduler.schedule_post(state, message)
except Exception:
with open(config.LOG_PATH, "a") as logfile:
traceback.print_exc(file=logfile)
traceback.print_exc()
await log_manager.panic_sending()
@dp.message_handler(state=Form.operational_mode,
content_types=types.ContentType.TEXT)
async def process_text(message: types.Message, state: FSMContext):
logger.info('Обрабатываем посылку текста.')
try:
await scheduler.schedule_post(state, message)
except Exception:
with open(config.LOG_PATH, "a") as logfile:
traceback.print_exc(file=logfile)
traceback.print_exc()
await log_manager.panic_sending()
@dp.message_handler(state=Form.initial)
async def no_way(message: types.Message):
logger.info('Обработка ввода при ненастроенных получателях.')
line1 = 'Пока не настроишь места для пересылки, тут будет скучновато.'
line2 = 'Жми /vk или /channel и следуй инструкциям.'
instructions = text(text(line1), text(line2), sep='\n')
await bot.send_message(message.chat.id,
instructions)
@dp.message_handler(state=None)
async def to_start(message: types.Message):
logger.info('При вводе любого сообщения стартуем.')
await cmd_start(message)
async def checking_queue_after_pause():
await asyncio.sleep(5)
await deliverer.start_checking()
async def checking_log_after_pause():
await asyncio.sleep(20)
await log_manager.start_checking()
async def startup(dispatcher: Dispatcher):
logger.info('Старт бота.')
# запускаем проверку очереди сразу, все необходимое у нас есть
asyncio.run_coroutine_threadsafe(checking_queue_after_pause(), loop)
# запускаем проверку переполненности лога
asyncio.run_coroutine_threadsafe(checking_log_after_pause(), loop)
async def shutdown(dispatcher: Dispatcher):
logger.info('Убиваем бота.')
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
if __name__ == '__main__':
executor.start_polling(dp,
loop=loop,
skip_updates=True,
on_startup=startup,
on_shutdown=shutdown) | 0.303009 | 0.096068 |
import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader
import torchvision
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from tqdm import tqdm
from absolute_pooling import MaxAbsPool2d
from sharpened_cosine_similarity import SharpenedCosineSimilarity
batch_size = 128
max_lr = .05
n_classes = 10
n_epochs = 100
n_runs = 1000
n_units_1 = 32
n_units_2 = 64
n_units_3 = 128
# Allow for a version to be provided at the command line, as in
if len(sys.argv) > 1:
version = sys.argv[1]
else:
version = "test"
# Lay out the desitinations for all the results.
accuracy_results_path = os.path.join(f"results", f"accuracy_{version}.npy")
accuracy_history_path = os.path.join(
"results", f"accuracy_history_{version}.npy")
loss_results_path = os.path.join("results", f"loss_{version}.npy")
os.makedirs("results", exist_ok=True)
training_set = CIFAR10(
root=os.path.join('.', 'data', 'CIFAR10'),
train=True,
download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
# transforms.ColorJitter(
# brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]))
testing_set = CIFAR10(
root=os.path.join('.', 'data', 'CIFAR10'),
train=False,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
training_loader = DataLoader(
training_set,
batch_size=batch_size,
shuffle=True)
testing_loader = DataLoader(
testing_set,
batch_size=batch_size,
shuffle=False)
class Network(nn.Module):
def __init__(self):
super().__init__()
self.scs1 = SharpenedCosineSimilarity(
in_channels=3, out_channels=n_units_1, kernel_size=3, padding=0)
self.pool1 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.scs2 = SharpenedCosineSimilarity(
in_channels=n_units_1, out_channels=n_units_2, kernel_size=3, padding=1)
self.pool2 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.scs3 = SharpenedCosineSimilarity(
in_channels=n_units_2, out_channels=n_units_3, kernel_size=3, padding=1)
self.pool3 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.out = nn.Linear(in_features=n_units_3*4*4, out_features=n_classes)
def forward(self, t):
t = self.scs1(t)
t = self.pool1(t)
t = self.scs2(t)
t = self.pool2(t)
t = self.scs3(t)
t = self.pool3(t)
t = t.reshape(-1, n_units_3*4*4)
t = self.out(t)
return t
# Restore any previously generated results.
try:
accuracy_results = np.load(accuracy_results_path).tolist()
accuracy_histories = np.load(accuracy_history_path).tolist()
loss_results = np.load(loss_results_path).tolist()
except Exception:
loss_results = []
accuracy_results = []
accuracy_histories = []
steps_per_epoch = len(training_loader)
for i_run in range(n_runs):
network = Network()
optimizer = optim.Adam(network.parameters(), lr=max_lr)
scheduler = OneCycleLR(
optimizer,
max_lr=max_lr,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs)
epoch_accuracy_history = []
for i_epoch in range(n_epochs):
epoch_start_time = time.time()
epoch_training_loss = 0
epoch_testing_loss = 0
epoch_training_num_correct = 0
epoch_testing_num_correct = 0
with tqdm(enumerate(training_loader)) as tqdm_training_loader:
for batch_idx, batch in tqdm_training_loader:
images, labels = batch
preds = network(images)
loss = F.cross_entropy(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
epoch_training_loss += loss.item() * training_loader.batch_size
epoch_training_num_correct += (preds.argmax(dim=1).eq(labels).sum().item())
tqdm_training_loader.set_description(
f'Step: {batch_idx + 1}/{steps_per_epoch}, '
f'Epoch: {i_epoch + 1}/{n_epochs}, '
f'Run: {i_run + 1}/{n_runs}'
)
epoch_duration = time.time() - epoch_start_time
training_loss = epoch_training_loss / len(training_loader.dataset)
training_accuracy = (epoch_training_num_correct / len(training_loader.dataset))
# At the end of each epoch run the testing data through an
# evaluation pass to see how the model is doing.
# Specify no_grad() to prevent a nasty out-of-memory condition.
with torch.no_grad():
for batch in testing_loader:
images, labels = batch
preds = network(images)
loss = F.cross_entropy(preds, labels)
epoch_testing_loss += loss.item() * testing_loader.batch_size
epoch_testing_num_correct += (preds.argmax(dim=1).eq(labels).sum().item())
testing_loss = epoch_testing_loss / len(testing_loader.dataset)
testing_accuracy = (epoch_testing_num_correct / len(testing_loader.dataset))
epoch_accuracy_history.append(testing_accuracy)
print(
f"run: {i_run} "
f"epoch: {i_epoch} "
f"duration: {epoch_duration:.04} "
f"training loss: {training_loss:.04} "
f"testing loss: {testing_loss:.04} "
f"training accuracy: {100 * training_accuracy:.04}% "
f"testing accuracy: {100 * testing_accuracy:.04}%"
)
accuracy_histories.append(epoch_accuracy_history)
accuracy_results.append(testing_accuracy)
loss_results.append(testing_loss)
np.save(accuracy_history_path, np.array(accuracy_histories))
np.save(accuracy_results_path, np.array(accuracy_results))
np.save(loss_results_path, np.array(loss_results)) | model_cifar10_15_9.py | import os
import sys
import time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader
import torchvision
from torchvision.datasets import CIFAR10
import torchvision.transforms as transforms
from tqdm import tqdm
from absolute_pooling import MaxAbsPool2d
from sharpened_cosine_similarity import SharpenedCosineSimilarity
batch_size = 128
max_lr = .05
n_classes = 10
n_epochs = 100
n_runs = 1000
n_units_1 = 32
n_units_2 = 64
n_units_3 = 128
# Allow for a version to be provided at the command line, as in
if len(sys.argv) > 1:
version = sys.argv[1]
else:
version = "test"
# Lay out the desitinations for all the results.
accuracy_results_path = os.path.join(f"results", f"accuracy_{version}.npy")
accuracy_history_path = os.path.join(
"results", f"accuracy_history_{version}.npy")
loss_results_path = os.path.join("results", f"loss_{version}.npy")
os.makedirs("results", exist_ok=True)
training_set = CIFAR10(
root=os.path.join('.', 'data', 'CIFAR10'),
train=True,
download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
# transforms.ColorJitter(
# brightness=0.2, contrast=0.2, saturation=0.2, hue=0.2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()
]))
testing_set = CIFAR10(
root=os.path.join('.', 'data', 'CIFAR10'),
train=False,
download=True,
transform=transforms.Compose([transforms.ToTensor()]))
training_loader = DataLoader(
training_set,
batch_size=batch_size,
shuffle=True)
testing_loader = DataLoader(
testing_set,
batch_size=batch_size,
shuffle=False)
class Network(nn.Module):
def __init__(self):
super().__init__()
self.scs1 = SharpenedCosineSimilarity(
in_channels=3, out_channels=n_units_1, kernel_size=3, padding=0)
self.pool1 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.scs2 = SharpenedCosineSimilarity(
in_channels=n_units_1, out_channels=n_units_2, kernel_size=3, padding=1)
self.pool2 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.scs3 = SharpenedCosineSimilarity(
in_channels=n_units_2, out_channels=n_units_3, kernel_size=3, padding=1)
self.pool3 = MaxAbsPool2d(kernel_size=2, stride=2, ceil_mode=True)
self.out = nn.Linear(in_features=n_units_3*4*4, out_features=n_classes)
def forward(self, t):
t = self.scs1(t)
t = self.pool1(t)
t = self.scs2(t)
t = self.pool2(t)
t = self.scs3(t)
t = self.pool3(t)
t = t.reshape(-1, n_units_3*4*4)
t = self.out(t)
return t
# Restore any previously generated results.
try:
accuracy_results = np.load(accuracy_results_path).tolist()
accuracy_histories = np.load(accuracy_history_path).tolist()
loss_results = np.load(loss_results_path).tolist()
except Exception:
loss_results = []
accuracy_results = []
accuracy_histories = []
steps_per_epoch = len(training_loader)
for i_run in range(n_runs):
network = Network()
optimizer = optim.Adam(network.parameters(), lr=max_lr)
scheduler = OneCycleLR(
optimizer,
max_lr=max_lr,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs)
epoch_accuracy_history = []
for i_epoch in range(n_epochs):
epoch_start_time = time.time()
epoch_training_loss = 0
epoch_testing_loss = 0
epoch_training_num_correct = 0
epoch_testing_num_correct = 0
with tqdm(enumerate(training_loader)) as tqdm_training_loader:
for batch_idx, batch in tqdm_training_loader:
images, labels = batch
preds = network(images)
loss = F.cross_entropy(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
epoch_training_loss += loss.item() * training_loader.batch_size
epoch_training_num_correct += (preds.argmax(dim=1).eq(labels).sum().item())
tqdm_training_loader.set_description(
f'Step: {batch_idx + 1}/{steps_per_epoch}, '
f'Epoch: {i_epoch + 1}/{n_epochs}, '
f'Run: {i_run + 1}/{n_runs}'
)
epoch_duration = time.time() - epoch_start_time
training_loss = epoch_training_loss / len(training_loader.dataset)
training_accuracy = (epoch_training_num_correct / len(training_loader.dataset))
# At the end of each epoch run the testing data through an
# evaluation pass to see how the model is doing.
# Specify no_grad() to prevent a nasty out-of-memory condition.
with torch.no_grad():
for batch in testing_loader:
images, labels = batch
preds = network(images)
loss = F.cross_entropy(preds, labels)
epoch_testing_loss += loss.item() * testing_loader.batch_size
epoch_testing_num_correct += (preds.argmax(dim=1).eq(labels).sum().item())
testing_loss = epoch_testing_loss / len(testing_loader.dataset)
testing_accuracy = (epoch_testing_num_correct / len(testing_loader.dataset))
epoch_accuracy_history.append(testing_accuracy)
print(
f"run: {i_run} "
f"epoch: {i_epoch} "
f"duration: {epoch_duration:.04} "
f"training loss: {training_loss:.04} "
f"testing loss: {testing_loss:.04} "
f"training accuracy: {100 * training_accuracy:.04}% "
f"testing accuracy: {100 * testing_accuracy:.04}%"
)
accuracy_histories.append(epoch_accuracy_history)
accuracy_results.append(testing_accuracy)
loss_results.append(testing_loss)
np.save(accuracy_history_path, np.array(accuracy_histories))
np.save(accuracy_results_path, np.array(accuracy_results))
np.save(loss_results_path, np.array(loss_results)) | 0.735547 | 0.311348 |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from flask import Flask, Blueprint, send_file, jsonify, request
import os
import atexit
import signal
import csv
import sys
import time
import threading
def run(
build_dir,
port,
output,
csv_headers,
json=False,
database_task_name=None,
debug=False,
):
global index_file, app
global ready_for_next, current_data, finished, index_file
global counter
if not debug or output == "":
# disable noisy logging of flask, https://stackoverflow.com/a/18379764
import logging
flask_log = logging.getLogger("werkzeug")
flask_log.disabled = True
flask_cli = sys.modules["flask.cli"]
flask_cli.show_server_banner = lambda *x: None
app = Flask(
__name__,
root_path=os.getcwd(),
static_url_path="/static",
static_folder=build_dir + "/static",
)
def json_reader(f):
import json
for jsonline in f:
yield json.loads(jsonline)
def mephistoDBReader():
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
db = LocalMephistoDB()
mephisto_data_browser = MephistoDataBrowser(db=db)
def format_data_for_review(data):
contents = data["data"]
return f"{data}"
units = mephisto_data_browser.get_units_for_task_name(database_task_name)
for unit in units:
yield format_data_for_review(mephisto_data_browser.get_data_from_unit(unit))
def consume_data():
global ready_for_next, current_data, finished, counter
if database_task_name is not None:
data_source = mephistoDBReader()
elif json:
data_source = json_reader(iter(sys.stdin.readline, ""))
else:
data_source = csv.reader(iter(sys.stdin.readline, ""))
if csv_headers:
next(data_source)
finished = False
counter = 0
for row in data_source:
ready_for_next = threading.Event()
current_data = row
counter += 1
ready_for_next.wait()
finished = True
@app.route("/data_for_current_task")
def data():
global current_data, finished
if finished:
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
return jsonify(
{"finished": finished, "data": current_data if not finished else None}
)
@app.route("/submit_current_task", methods=["GET", "POST"])
def next_task():
global current_data, ready_for_next, finished, counter
result = (
request.get_json(force=True)
if request.method == "POST"
else request.ags.get("result")
)
if output == "":
sys.stdout.write("{}\n".format(result))
sys.stdout.flush()
else:
with open(output, "a+") as f:
f.write("{}\n".format(result))
ready_for_next.set()
time.sleep(0)
return jsonify({"finished": finished, "counter": counter})
@app.route("/")
def index():
global index_file
return send_file(build_dir + "/index.html")
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add(
"Access-Control-Allow-Headers", "Content-Type,Authorization"
)
response.headers.add(
"Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS"
)
response.headers.add("Cache-Control", "no-store")
return response
thread = threading.Thread(target=consume_data)
thread.start()
if sys.stdout.isatty():
print("Running on http://127.0.0.1:{}/ (Press CTRL+C to quit)".format(port))
app.run(debug=False, port=port) | mephisto/client/review/review_server.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from flask import Flask, Blueprint, send_file, jsonify, request
import os
import atexit
import signal
import csv
import sys
import time
import threading
def run(
build_dir,
port,
output,
csv_headers,
json=False,
database_task_name=None,
debug=False,
):
global index_file, app
global ready_for_next, current_data, finished, index_file
global counter
if not debug or output == "":
# disable noisy logging of flask, https://stackoverflow.com/a/18379764
import logging
flask_log = logging.getLogger("werkzeug")
flask_log.disabled = True
flask_cli = sys.modules["flask.cli"]
flask_cli.show_server_banner = lambda *x: None
app = Flask(
__name__,
root_path=os.getcwd(),
static_url_path="/static",
static_folder=build_dir + "/static",
)
def json_reader(f):
import json
for jsonline in f:
yield json.loads(jsonline)
def mephistoDBReader():
from mephisto.abstractions.databases.local_database import LocalMephistoDB
from mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser
db = LocalMephistoDB()
mephisto_data_browser = MephistoDataBrowser(db=db)
def format_data_for_review(data):
contents = data["data"]
return f"{data}"
units = mephisto_data_browser.get_units_for_task_name(database_task_name)
for unit in units:
yield format_data_for_review(mephisto_data_browser.get_data_from_unit(unit))
def consume_data():
global ready_for_next, current_data, finished, counter
if database_task_name is not None:
data_source = mephistoDBReader()
elif json:
data_source = json_reader(iter(sys.stdin.readline, ""))
else:
data_source = csv.reader(iter(sys.stdin.readline, ""))
if csv_headers:
next(data_source)
finished = False
counter = 0
for row in data_source:
ready_for_next = threading.Event()
current_data = row
counter += 1
ready_for_next.wait()
finished = True
@app.route("/data_for_current_task")
def data():
global current_data, finished
if finished:
func = request.environ.get("werkzeug.server.shutdown")
if func is None:
raise RuntimeError("Not running with the Werkzeug Server")
func()
return jsonify(
{"finished": finished, "data": current_data if not finished else None}
)
@app.route("/submit_current_task", methods=["GET", "POST"])
def next_task():
global current_data, ready_for_next, finished, counter
result = (
request.get_json(force=True)
if request.method == "POST"
else request.ags.get("result")
)
if output == "":
sys.stdout.write("{}\n".format(result))
sys.stdout.flush()
else:
with open(output, "a+") as f:
f.write("{}\n".format(result))
ready_for_next.set()
time.sleep(0)
return jsonify({"finished": finished, "counter": counter})
@app.route("/")
def index():
global index_file
return send_file(build_dir + "/index.html")
@app.after_request
def after_request(response):
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add(
"Access-Control-Allow-Headers", "Content-Type,Authorization"
)
response.headers.add(
"Access-Control-Allow-Methods", "GET,PUT,POST,DELETE,OPTIONS"
)
response.headers.add("Cache-Control", "no-store")
return response
thread = threading.Thread(target=consume_data)
thread.start()
if sys.stdout.isatty():
print("Running on http://127.0.0.1:{}/ (Press CTRL+C to quit)".format(port))
app.run(debug=False, port=port) | 0.516108 | 0.096025 |
import re
from django.contrib.auth import get_user_model, password_validation
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from rest_framework.authtoken.serializers import AuthTokenSerializer
from .models import (
ActionToken,
)
User = get_user_model()
# Validator for phone numbers
def phone_number_validator(phone):
reg = re.compile('^([+][0-9]{1,2})?[0-9]{9,10}$')
char_list = " -.()"
for i in char_list:
phone = phone.replace(i, '')
if not reg.match(phone):
raise serializers.ValidationError(_("Invalid format."))
return phone
class UserUpdateSerializer(serializers.HyperlinkedModelSerializer):
"""
Set certain fields such as university, academic_level and email to read
only.
"""
id = serializers.ReadOnlyField()
username = serializers.HiddenField(
default=None,
help_text=_("A valid username."),
)
new_password = serializers.CharField(
max_length=128,
required=False,
help_text=_("A valid password."),
)
phone = serializers.CharField(
allow_blank=True,
allow_null=True,
label=_('Phone number'),
max_length=17,
required=False,
help_text=_("A valid phone number."),
)
other_phone = serializers.CharField(
allow_blank=True,
allow_null=True,
label=_('Other number'),
max_length=17,
required=False,
help_text=_("A valid phone number."),
)
def validate_phone(self, value):
return phone_number_validator(value)
def validate_other_phone(self, value):
return phone_number_validator(value)
def update(self, instance, validated_data):
validated_data['username'] = instance.username
if 'new_password' in validated_data.keys():
try:
old_pw = validated_data.pop('password')
except KeyError:
raise serializers.ValidationError({
'password': _("This field is required.")
})
new_pw = validated_data.pop('new_password')
try:
password_validation.validate_password(password=new_pw)
except ValidationError as err:
raise serializers.ValidationError({
'new_password': err.messages
})
if instance.check_password(old_pw):
instance.set_password(new_pw)
instance.save()
else:
msg = {'password': _("Bad password")}
raise serializers.ValidationError(msg)
return super().update(instance, validated_data)
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password': {
'write_only': True,
'help_text': _("A valid password."),
},
'new_password': {'<PASSWORD>': True},
'birthdate': {
'help_text': _("Date in the format 'dd/mm/yyyy'"),
},
'gender': {
'allow_blank': False,
'help_text': _("(M)ale, (F)emale, (T)rans, (A)nonymous"),
},
'first_name': {
'allow_blank': False,
'help_text': _("A valid first name."),
},
'last_name': {
'allow_blank': False,
'help_text': _("A valid last name."),
},
}
read_only_fields = (
'id',
'url',
'is_staff',
'is_superuser',
'is_active',
'date_joined',
'last_login',
'groups',
'user_permissions',
'email',
)
class UserSerializer(UserUpdateSerializer):
"""
Complete serializer for user creation
"""
# Remove the new_password field.
new_password = None
email = serializers.EmailField(
label=_('Email address'),
max_length=254,
required=True,
validators=[
UniqueValidator(
queryset=User.objects.all(),
message=_(
"An account for the specified email "
"address already exists."
),
),
],
help_text=_("A valid email address."),
)
def validate_password(self, value):
try:
password_validation.validate_password(password=value)
except ValidationError as err:
raise serializers.ValidationError(err.messages)
return value
def validate(self, attrs):
attrs['username'] = attrs['email']
return attrs
def create(self, validated_data):
"""
Validate choosen password and create User object.
"""
user = User(**validated_data)
# Hash the user's password
user.set_password(validated_data['password'])
# Put user inactive by default
user.is_active = False
user.save()
# Create an ActivationToken to activate user in the future
ActionToken.objects.create(
user=user,
type='account_activation',
)
return user
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password': {
'style': {'input_type': 'password'},
'write_only': True,
'help_text': _("A valid password."),
},
'gender': {
'allow_blank': False,
'help_text': _("(M)ale, (F)emale, (T)rans, (A)nonymous")
},
'birthdate': {
'help_text': _("Date in the format 'dd/mm/yyyy'"),
},
'first_name': {
'allow_blank': False,
'help_text': _("A valid first name."),
},
'last_name': {
'allow_blank': False,
'help_text': _("A valid last name."),
},
}
read_only_fields = (
'id',
'url',
'is_staff',
'is_superuser',
'is_active',
'date_joined',
'last_login',
'groups',
'user_permissions',
)
class CustomAuthTokenSerializer(AuthTokenSerializer):
"""
Subclass of default AuthTokenSerializer to enable email authentication
"""
username = serializers.CharField(
label=_("Username"),
required=True,
help_text=_("A valid username."),
)
password = serializers.CharField(
label=_("Password"),
style={'input_type': 'password'},
trim_whitespace=False,
required=True,
help_text=_("A valid password."),
)
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
try:
user_obj = User.objects.get(email=username)
username = user_obj.username
except User.DoesNotExist:
pass
user = authenticate(request=self.context.get('request'),
username=username, password=password)
if not user:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class ResetPasswordSerializer(serializers.Serializer):
email = serializers.EmailField(
label=_('Email address'),
max_length=254,
required=True,
help_text=_("A valid email address."),
)
def validate_email(self, value):
if User.objects.filter(email=value):
return value
raise serializers.ValidationError(
_("No account associated to this email address.")
)
def validate(self, attrs):
return User.objects.get(email=attrs['email'])
class ChangePasswordSerializer(serializers.Serializer):
token = serializers.CharField(
required=True,
help_text=_("Action token authorizing password change."),
)
new_password = serializers.CharField(
required=True,
help_text=_("Desired password"),
)
class UsersActivationSerializer(serializers.Serializer):
activation_token = serializers.CharField(
required=True,
help_text=_("Action token authorizing user activation."),
) | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/serializers.py | import re
from django.contrib.auth import get_user_model, password_validation
from django.core.exceptions import ValidationError
from django.contrib.auth import authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from rest_framework.authtoken.serializers import AuthTokenSerializer
from .models import (
ActionToken,
)
User = get_user_model()
# Validator for phone numbers
def phone_number_validator(phone):
reg = re.compile('^([+][0-9]{1,2})?[0-9]{9,10}$')
char_list = " -.()"
for i in char_list:
phone = phone.replace(i, '')
if not reg.match(phone):
raise serializers.ValidationError(_("Invalid format."))
return phone
class UserUpdateSerializer(serializers.HyperlinkedModelSerializer):
"""
Set certain fields such as university, academic_level and email to read
only.
"""
id = serializers.ReadOnlyField()
username = serializers.HiddenField(
default=None,
help_text=_("A valid username."),
)
new_password = serializers.CharField(
max_length=128,
required=False,
help_text=_("A valid password."),
)
phone = serializers.CharField(
allow_blank=True,
allow_null=True,
label=_('Phone number'),
max_length=17,
required=False,
help_text=_("A valid phone number."),
)
other_phone = serializers.CharField(
allow_blank=True,
allow_null=True,
label=_('Other number'),
max_length=17,
required=False,
help_text=_("A valid phone number."),
)
def validate_phone(self, value):
return phone_number_validator(value)
def validate_other_phone(self, value):
return phone_number_validator(value)
def update(self, instance, validated_data):
validated_data['username'] = instance.username
if 'new_password' in validated_data.keys():
try:
old_pw = validated_data.pop('password')
except KeyError:
raise serializers.ValidationError({
'password': _("This field is required.")
})
new_pw = validated_data.pop('new_password')
try:
password_validation.validate_password(password=new_pw)
except ValidationError as err:
raise serializers.ValidationError({
'new_password': err.messages
})
if instance.check_password(old_pw):
instance.set_password(new_pw)
instance.save()
else:
msg = {'password': _("Bad password")}
raise serializers.ValidationError(msg)
return super().update(instance, validated_data)
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password': {
'write_only': True,
'help_text': _("A valid password."),
},
'new_password': {'<PASSWORD>': True},
'birthdate': {
'help_text': _("Date in the format 'dd/mm/yyyy'"),
},
'gender': {
'allow_blank': False,
'help_text': _("(M)ale, (F)emale, (T)rans, (A)nonymous"),
},
'first_name': {
'allow_blank': False,
'help_text': _("A valid first name."),
},
'last_name': {
'allow_blank': False,
'help_text': _("A valid last name."),
},
}
read_only_fields = (
'id',
'url',
'is_staff',
'is_superuser',
'is_active',
'date_joined',
'last_login',
'groups',
'user_permissions',
'email',
)
class UserSerializer(UserUpdateSerializer):
"""
Complete serializer for user creation
"""
# Remove the new_password field.
new_password = None
email = serializers.EmailField(
label=_('Email address'),
max_length=254,
required=True,
validators=[
UniqueValidator(
queryset=User.objects.all(),
message=_(
"An account for the specified email "
"address already exists."
),
),
],
help_text=_("A valid email address."),
)
def validate_password(self, value):
try:
password_validation.validate_password(password=value)
except ValidationError as err:
raise serializers.ValidationError(err.messages)
return value
def validate(self, attrs):
attrs['username'] = attrs['email']
return attrs
def create(self, validated_data):
"""
Validate choosen password and create User object.
"""
user = User(**validated_data)
# Hash the user's password
user.set_password(validated_data['password'])
# Put user inactive by default
user.is_active = False
user.save()
# Create an ActivationToken to activate user in the future
ActionToken.objects.create(
user=user,
type='account_activation',
)
return user
class Meta:
model = User
fields = '__all__'
extra_kwargs = {
'password': {
'style': {'input_type': 'password'},
'write_only': True,
'help_text': _("A valid password."),
},
'gender': {
'allow_blank': False,
'help_text': _("(M)ale, (F)emale, (T)rans, (A)nonymous")
},
'birthdate': {
'help_text': _("Date in the format 'dd/mm/yyyy'"),
},
'first_name': {
'allow_blank': False,
'help_text': _("A valid first name."),
},
'last_name': {
'allow_blank': False,
'help_text': _("A valid last name."),
},
}
read_only_fields = (
'id',
'url',
'is_staff',
'is_superuser',
'is_active',
'date_joined',
'last_login',
'groups',
'user_permissions',
)
class CustomAuthTokenSerializer(AuthTokenSerializer):
"""
Subclass of default AuthTokenSerializer to enable email authentication
"""
username = serializers.CharField(
label=_("Username"),
required=True,
help_text=_("A valid username."),
)
password = serializers.CharField(
label=_("Password"),
style={'input_type': 'password'},
trim_whitespace=False,
required=True,
help_text=_("A valid password."),
)
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
try:
user_obj = User.objects.get(email=username)
username = user_obj.username
except User.DoesNotExist:
pass
user = authenticate(request=self.context.get('request'),
username=username, password=password)
if not user:
msg = _('Unable to log in with provided credentials.')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
class ResetPasswordSerializer(serializers.Serializer):
email = serializers.EmailField(
label=_('Email address'),
max_length=254,
required=True,
help_text=_("A valid email address."),
)
def validate_email(self, value):
if User.objects.filter(email=value):
return value
raise serializers.ValidationError(
_("No account associated to this email address.")
)
def validate(self, attrs):
return User.objects.get(email=attrs['email'])
class ChangePasswordSerializer(serializers.Serializer):
token = serializers.CharField(
required=True,
help_text=_("Action token authorizing password change."),
)
new_password = serializers.CharField(
required=True,
help_text=_("Desired password"),
)
class UsersActivationSerializer(serializers.Serializer):
activation_token = serializers.CharField(
required=True,
help_text=_("Action token authorizing user activation."),
) | 0.52829 | 0.094385 |
from __future__ import annotations
import copy
from typing import List, Tuple, Union, TYPE_CHECKING
from .account import Account
if TYPE_CHECKING:
from ..strategy.base import BaseStrategy
from .executor import BaseExecutor
from .decision import BaseTradeDecision
from .position import Position
from .exchange import Exchange
from .backtest import backtest_loop
from .backtest import collect_data_loop
from .utils import CommonInfrastructure
from .decision import Order
from ..utils import init_instance_by_config
from ..log import get_module_logger
from ..config import C
# make import more user-friendly by adding `from qlib.backtest import STH`
logger = get_module_logger("backtest caller")
def get_exchange(
exchange=None,
freq="day",
start_time=None,
end_time=None,
codes="all",
subscribe_fields=[],
open_cost=0.0015,
close_cost=0.0025,
min_cost=5.0,
limit_threshold=None,
deal_price: Union[str, Tuple[str], List[str]] = None,
**kwargs,
):
"""get_exchange
Parameters
----------
# exchange related arguments
exchange: Exchange().
subscribe_fields: list
subscribe fields.
open_cost : float
open transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
close_cost : float
close transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
min_cost : float
min transaction cost. It is an absolute amount of cost instead of a ratio of your order's deal amount.
e.g. You must pay at least 5 yuan of commission regardless of your order's deal amount.
trade_unit : int
Included in kwargs. Please refer to the docs of `__init__` of `Exchange`
deal_price: Union[str, Tuple[str], List[str]]
The `deal_price` supports following two types of input
- <deal_price> : str
- (<buy_price>, <sell_price>): Tuple[str] or List[str]
<deal_price>, <buy_price> or <sell_price> := <price>
<price> := str
- for example '$close', '$open', '$vwap' ("close" is OK. `Exchange` will help to prepend
"$" to the expression)
limit_threshold : float
limit move 0.1 (10%) for example, long and short with same limit.
Returns
-------
:class: Exchange
an initialized Exchange object
"""
if limit_threshold is None:
limit_threshold = C.limit_threshold
if exchange is None:
logger.info("Create new exchange")
exchange = Exchange(
freq=freq,
start_time=start_time,
end_time=end_time,
codes=codes,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
**kwargs,
)
return exchange
else:
return init_instance_by_config(exchange, accept_types=Exchange)
def create_account_instance(
start_time, end_time, benchmark: str, account: Union[float, int, dict], pos_type: str = "Position"
) -> Account:
"""
# TODO: is very strange pass benchmark_config in the account(maybe for report)
# There should be a post-step to process the report.
Parameters
----------
start_time
start time of the benchmark
end_time
end time of the benchmark
benchmark : str
the benchmark for reporting
account : Union[
float,
{
"cash": float,
"stock1": Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
},
]
information for describing how to creating the account
For `float`:
Using Account with only initial cash
For `dict`:
key "cash" means initial cash.
key "stock1" means the information of first stock with amount and price(optional).
...
"""
if isinstance(account, (int, float)):
pos_kwargs = {"init_cash": account}
elif isinstance(account, dict):
init_cash = account["cash"]
del account["cash"]
pos_kwargs = {
"init_cash": init_cash,
"position_dict": account,
}
else:
raise ValueError("account must be in (int, float, Position)")
kwargs = {
"init_cash": account,
"benchmark_config": {
"benchmark": benchmark,
"start_time": start_time,
"end_time": end_time,
},
"pos_type": pos_type,
}
kwargs.update(pos_kwargs)
return Account(**kwargs)
def get_strategy_executor(
start_time,
end_time,
strategy: BaseStrategy,
executor: BaseExecutor,
benchmark: str = "SH000300",
account: Union[float, int, Position] = 1e9,
exchange_kwargs: dict = {},
pos_type: str = "Position",
):
# NOTE:
# - for avoiding recursive import
# - typing annotations is not reliable
from ..strategy.base import BaseStrategy
from .executor import BaseExecutor
trade_account = create_account_instance(
start_time=start_time, end_time=end_time, benchmark=benchmark, account=account, pos_type=pos_type
)
exchange_kwargs = copy.copy(exchange_kwargs)
if "start_time" not in exchange_kwargs:
exchange_kwargs["start_time"] = start_time
if "end_time" not in exchange_kwargs:
exchange_kwargs["end_time"] = end_time
trade_exchange = get_exchange(**exchange_kwargs)
common_infra = CommonInfrastructure(trade_account=trade_account, trade_exchange=trade_exchange)
trade_strategy = init_instance_by_config(strategy, accept_types=BaseStrategy, common_infra=common_infra)
trade_executor = init_instance_by_config(executor, accept_types=BaseExecutor, common_infra=common_infra)
return trade_strategy, trade_executor
def backtest(
start_time,
end_time,
strategy,
executor,
benchmark="SH000300",
account=1e9,
exchange_kwargs={},
pos_type: str = "Position",
):
"""initialize the strategy and executor, then backtest function for the interaction of the outermost strategy and executor in the nested decision execution
Parameters
----------
start_time : pd.Timestamp|str
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : pd.Timestamp|str
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
strategy : Union[str, dict, BaseStrategy]
for initializing outermost portfolio strategy. Please refer to the docs of init_instance_by_config for more information.
executor : Union[str, dict, BaseExecutor]
for initializing the outermost executor.
benchmark: str
the benchmark for reporting.
account : Union[float, int, Position]
information for describing how to creating the account
For `float` or `int`:
Using Account with only initial cash
For `Position`:
Using Account with a Position
exchange_kwargs : dict
the kwargs for initializing Exchange
pos_type : str
the type of Position.
Returns
-------
portfolio_metrics_dict: Dict[PortfolioMetrics]
it records the trading portfolio_metrics information
indicator_dict: Dict[Indicator]
it computes the trading indicator
It is organized in a dict format
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
portfolio_metrics, indicator = backtest_loop(start_time, end_time, trade_strategy, trade_executor)
return portfolio_metrics, indicator
def collect_data(
start_time,
end_time,
strategy,
executor,
benchmark="SH000300",
account=1e9,
exchange_kwargs={},
pos_type: str = "Position",
return_value: dict = None,
):
"""initialize the strategy and executor, then collect the trade decision data for rl training
please refer to the docs of the backtest for the explanation of the parameters
Yields
-------
object
trade decision
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
yield from collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value=return_value)
def format_decisions(
decisions: List[BaseTradeDecision],
) -> Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
"""
format the decisions collected by `qlib.backtest.collect_data`
The decisions will be organized into a tree-like structure.
Parameters
----------
decisions : List[BaseTradeDecision]
decisions collected by `qlib.backtest.collect_data`
Returns
-------
Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
reformat the list of decisions into a more user-friendly format
<decisions> := Tuple[<freq>, List[Tuple[<decision>, <sub decisions>]]]
- <sub decisions> := `<decisions> in lower level` | None
- <freq> := "day" | "30min" | "1min" | ...
- <decision> := <instance of BaseTradeDecision>
"""
if len(decisions) == 0:
return None
cur_freq = decisions[0].strategy.trade_calendar.get_freq()
res = (cur_freq, [])
last_dec_idx = 0
for i, dec in enumerate(decisions[1:], 1):
if dec.strategy.trade_calendar.get_freq() == cur_freq:
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 : i])))
last_dec_idx = i
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 :])))
return res | qlib/backtest/__init__.py | from __future__ import annotations
import copy
from typing import List, Tuple, Union, TYPE_CHECKING
from .account import Account
if TYPE_CHECKING:
from ..strategy.base import BaseStrategy
from .executor import BaseExecutor
from .decision import BaseTradeDecision
from .position import Position
from .exchange import Exchange
from .backtest import backtest_loop
from .backtest import collect_data_loop
from .utils import CommonInfrastructure
from .decision import Order
from ..utils import init_instance_by_config
from ..log import get_module_logger
from ..config import C
# make import more user-friendly by adding `from qlib.backtest import STH`
logger = get_module_logger("backtest caller")
def get_exchange(
exchange=None,
freq="day",
start_time=None,
end_time=None,
codes="all",
subscribe_fields=[],
open_cost=0.0015,
close_cost=0.0025,
min_cost=5.0,
limit_threshold=None,
deal_price: Union[str, Tuple[str], List[str]] = None,
**kwargs,
):
"""get_exchange
Parameters
----------
# exchange related arguments
exchange: Exchange().
subscribe_fields: list
subscribe fields.
open_cost : float
open transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
close_cost : float
close transaction cost. It is a ratio. The cost is proportional to your order's deal amount.
min_cost : float
min transaction cost. It is an absolute amount of cost instead of a ratio of your order's deal amount.
e.g. You must pay at least 5 yuan of commission regardless of your order's deal amount.
trade_unit : int
Included in kwargs. Please refer to the docs of `__init__` of `Exchange`
deal_price: Union[str, Tuple[str], List[str]]
The `deal_price` supports following two types of input
- <deal_price> : str
- (<buy_price>, <sell_price>): Tuple[str] or List[str]
<deal_price>, <buy_price> or <sell_price> := <price>
<price> := str
- for example '$close', '$open', '$vwap' ("close" is OK. `Exchange` will help to prepend
"$" to the expression)
limit_threshold : float
limit move 0.1 (10%) for example, long and short with same limit.
Returns
-------
:class: Exchange
an initialized Exchange object
"""
if limit_threshold is None:
limit_threshold = C.limit_threshold
if exchange is None:
logger.info("Create new exchange")
exchange = Exchange(
freq=freq,
start_time=start_time,
end_time=end_time,
codes=codes,
deal_price=deal_price,
subscribe_fields=subscribe_fields,
limit_threshold=limit_threshold,
open_cost=open_cost,
close_cost=close_cost,
min_cost=min_cost,
**kwargs,
)
return exchange
else:
return init_instance_by_config(exchange, accept_types=Exchange)
def create_account_instance(
start_time, end_time, benchmark: str, account: Union[float, int, dict], pos_type: str = "Position"
) -> Account:
"""
# TODO: is very strange pass benchmark_config in the account(maybe for report)
# There should be a post-step to process the report.
Parameters
----------
start_time
start time of the benchmark
end_time
end time of the benchmark
benchmark : str
the benchmark for reporting
account : Union[
float,
{
"cash": float,
"stock1": Union[
int, # it is equal to {"amount": int}
{"amount": int, "price"(optional): float},
]
},
]
information for describing how to creating the account
For `float`:
Using Account with only initial cash
For `dict`:
key "cash" means initial cash.
key "stock1" means the information of first stock with amount and price(optional).
...
"""
if isinstance(account, (int, float)):
pos_kwargs = {"init_cash": account}
elif isinstance(account, dict):
init_cash = account["cash"]
del account["cash"]
pos_kwargs = {
"init_cash": init_cash,
"position_dict": account,
}
else:
raise ValueError("account must be in (int, float, Position)")
kwargs = {
"init_cash": account,
"benchmark_config": {
"benchmark": benchmark,
"start_time": start_time,
"end_time": end_time,
},
"pos_type": pos_type,
}
kwargs.update(pos_kwargs)
return Account(**kwargs)
def get_strategy_executor(
start_time,
end_time,
strategy: BaseStrategy,
executor: BaseExecutor,
benchmark: str = "SH000300",
account: Union[float, int, Position] = 1e9,
exchange_kwargs: dict = {},
pos_type: str = "Position",
):
# NOTE:
# - for avoiding recursive import
# - typing annotations is not reliable
from ..strategy.base import BaseStrategy
from .executor import BaseExecutor
trade_account = create_account_instance(
start_time=start_time, end_time=end_time, benchmark=benchmark, account=account, pos_type=pos_type
)
exchange_kwargs = copy.copy(exchange_kwargs)
if "start_time" not in exchange_kwargs:
exchange_kwargs["start_time"] = start_time
if "end_time" not in exchange_kwargs:
exchange_kwargs["end_time"] = end_time
trade_exchange = get_exchange(**exchange_kwargs)
common_infra = CommonInfrastructure(trade_account=trade_account, trade_exchange=trade_exchange)
trade_strategy = init_instance_by_config(strategy, accept_types=BaseStrategy, common_infra=common_infra)
trade_executor = init_instance_by_config(executor, accept_types=BaseExecutor, common_infra=common_infra)
return trade_strategy, trade_executor
def backtest(
start_time,
end_time,
strategy,
executor,
benchmark="SH000300",
account=1e9,
exchange_kwargs={},
pos_type: str = "Position",
):
"""initialize the strategy and executor, then backtest function for the interaction of the outermost strategy and executor in the nested decision execution
Parameters
----------
start_time : pd.Timestamp|str
closed start time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
end_time : pd.Timestamp|str
closed end time for backtest
**NOTE**: This will be applied to the outmost executor's calendar.
E.g. Executor[day](Executor[1min]), setting `end_time == 20XX0301` will include all the minutes on 20XX0301
strategy : Union[str, dict, BaseStrategy]
for initializing outermost portfolio strategy. Please refer to the docs of init_instance_by_config for more information.
executor : Union[str, dict, BaseExecutor]
for initializing the outermost executor.
benchmark: str
the benchmark for reporting.
account : Union[float, int, Position]
information for describing how to creating the account
For `float` or `int`:
Using Account with only initial cash
For `Position`:
Using Account with a Position
exchange_kwargs : dict
the kwargs for initializing Exchange
pos_type : str
the type of Position.
Returns
-------
portfolio_metrics_dict: Dict[PortfolioMetrics]
it records the trading portfolio_metrics information
indicator_dict: Dict[Indicator]
it computes the trading indicator
It is organized in a dict format
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
portfolio_metrics, indicator = backtest_loop(start_time, end_time, trade_strategy, trade_executor)
return portfolio_metrics, indicator
def collect_data(
start_time,
end_time,
strategy,
executor,
benchmark="SH000300",
account=1e9,
exchange_kwargs={},
pos_type: str = "Position",
return_value: dict = None,
):
"""initialize the strategy and executor, then collect the trade decision data for rl training
please refer to the docs of the backtest for the explanation of the parameters
Yields
-------
object
trade decision
"""
trade_strategy, trade_executor = get_strategy_executor(
start_time,
end_time,
strategy,
executor,
benchmark,
account,
exchange_kwargs,
pos_type=pos_type,
)
yield from collect_data_loop(start_time, end_time, trade_strategy, trade_executor, return_value=return_value)
def format_decisions(
decisions: List[BaseTradeDecision],
) -> Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
"""
format the decisions collected by `qlib.backtest.collect_data`
The decisions will be organized into a tree-like structure.
Parameters
----------
decisions : List[BaseTradeDecision]
decisions collected by `qlib.backtest.collect_data`
Returns
-------
Tuple[str, List[Tuple[BaseTradeDecision, Union[Tuple, None]]]]:
reformat the list of decisions into a more user-friendly format
<decisions> := Tuple[<freq>, List[Tuple[<decision>, <sub decisions>]]]
- <sub decisions> := `<decisions> in lower level` | None
- <freq> := "day" | "30min" | "1min" | ...
- <decision> := <instance of BaseTradeDecision>
"""
if len(decisions) == 0:
return None
cur_freq = decisions[0].strategy.trade_calendar.get_freq()
res = (cur_freq, [])
last_dec_idx = 0
for i, dec in enumerate(decisions[1:], 1):
if dec.strategy.trade_calendar.get_freq() == cur_freq:
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 : i])))
last_dec_idx = i
res[1].append((decisions[last_dec_idx], format_decisions(decisions[last_dec_idx + 1 :])))
return res | 0.814607 | 0.32306 |
import os
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from data.loader import DataLoader
from model.rnn import RelationModel
from utils import scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--vocab_dir', type=str, default='dataset/vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--attn', dest='attn', action='store_true', help='Use attention layer.')
parser.add_argument('--no-attn', dest='attn', action='store_false')
parser.set_defaults(attn=True)
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--pe_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--optim', type=str, default='sgd', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=5, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# make opt
opt = vars(args)
opt['num_class'] = len(constant.LABEL_TO_ID)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_f1")
# print model info
helper.print_config(opt)
# model
model = RelationModel(opt, emb_matrix=emb_matrix)
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
dev_f1_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = model.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = model.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_f1))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
model.save(model_file, epoch)
if epoch == 1 or dev_f1 > max(dev_f1_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1] and \
opt['optim'] in ['sgd', 'adagrad']:
current_lr *= opt['lr_decay']
model.update_lr(current_lr)
dev_f1_history += [dev_f1]
print("")
print("Training ended with {} epochs.".format(epoch)) | train.py | import os
from datetime import datetime
import time
import numpy as np
import random
import argparse
from shutil import copyfile
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from data.loader import DataLoader
from model.rnn import RelationModel
from utils import scorer, constant, helper
from utils.vocab import Vocab
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='dataset/tacred')
parser.add_argument('--vocab_dir', type=str, default='dataset/vocab')
parser.add_argument('--emb_dim', type=int, default=300, help='Word embedding dimension.')
parser.add_argument('--ner_dim', type=int, default=30, help='NER embedding dimension.')
parser.add_argument('--pos_dim', type=int, default=30, help='POS embedding dimension.')
parser.add_argument('--hidden_dim', type=int, default=200, help='RNN hidden state size.')
parser.add_argument('--num_layers', type=int, default=2, help='Num of RNN layers.')
parser.add_argument('--dropout', type=float, default=0.5, help='Input and RNN dropout rate.')
parser.add_argument('--word_dropout', type=float, default=0.04, help='The rate at which randomly set a word to UNK.')
parser.add_argument('--topn', type=int, default=1e10, help='Only finetune top N embeddings.')
parser.add_argument('--lower', dest='lower', action='store_true', help='Lowercase all words.')
parser.add_argument('--no-lower', dest='lower', action='store_false')
parser.set_defaults(lower=False)
parser.add_argument('--attn', dest='attn', action='store_true', help='Use attention layer.')
parser.add_argument('--no-attn', dest='attn', action='store_false')
parser.set_defaults(attn=True)
parser.add_argument('--attn_dim', type=int, default=200, help='Attention size.')
parser.add_argument('--pe_dim', type=int, default=30, help='Position encoding dimension.')
parser.add_argument('--lr', type=float, default=1.0, help='Applies to SGD and Adagrad.')
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--optim', type=str, default='sgd', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--num_epoch', type=int, default=30)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--log', type=str, default='logs.txt', help='Write training log to file.')
parser.add_argument('--save_epoch', type=int, default=5, help='Save model checkpoints every k epochs.')
parser.add_argument('--save_dir', type=str, default='./saved_models', help='Root dir for saving models.')
parser.add_argument('--id', type=str, default='00', help='Model ID under which to save models.')
parser.add_argument('--info', type=str, default='', help='Optional info for the experiment.')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
args = parser.parse_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
if args.cpu:
args.cuda = False
elif args.cuda:
torch.cuda.manual_seed(args.seed)
# make opt
opt = vars(args)
opt['num_class'] = len(constant.LABEL_TO_ID)
# load vocab
vocab_file = opt['vocab_dir'] + '/vocab.pkl'
vocab = Vocab(vocab_file, load=True)
opt['vocab_size'] = vocab.size
emb_file = opt['vocab_dir'] + '/embedding.npy'
emb_matrix = np.load(emb_file)
assert emb_matrix.shape[0] == vocab.size
assert emb_matrix.shape[1] == opt['emb_dim']
# load data
print("Loading data from {} with batch size {}...".format(opt['data_dir'], opt['batch_size']))
train_batch = DataLoader(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab, evaluation=False)
dev_batch = DataLoader(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab, evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
helper.ensure_dir(model_save_dir, verbose=True)
# save config
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
vocab.save(model_save_dir + '/vocab.pkl')
file_logger = helper.FileLogger(model_save_dir + '/' + opt['log'], header="# epoch\ttrain_loss\tdev_loss\tdev_f1")
# print model info
helper.print_config(opt)
# model
model = RelationModel(opt, emb_matrix=emb_matrix)
id2label = dict([(v,k) for k,v in constant.LABEL_TO_ID.items()])
dev_f1_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch']+1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = model.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print("Evaluating on dev set...")
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = model.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
dev_p, dev_r, dev_f1 = scorer.score(dev_batch.gold(), predictions)
train_loss = train_loss / train_batch.num_examples * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_examples * opt['batch_size']
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
file_logger.log("{}\t{:.6f}\t{:.6f}\t{:.4f}".format(epoch, train_loss, dev_loss, dev_f1))
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
model.save(model_file, epoch)
if epoch == 1 or dev_f1 > max(dev_f1_history):
copyfile(model_file, model_save_dir + '/best_model.pt')
print("new best model saved.")
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_f1_history) > 10 and dev_f1 <= dev_f1_history[-1] and \
opt['optim'] in ['sgd', 'adagrad']:
current_lr *= opt['lr_decay']
model.update_lr(current_lr)
dev_f1_history += [dev_f1]
print("")
print("Training ended with {} epochs.".format(epoch)) | 0.526586 | 0.084758 |
from ikomia import utils, core, dataprocess
from ikomia.utils import pyqtutils, qtconversion
from train_torchvision_mask_rcnn.train_torchvision_mask_rcnn_process import TrainMaskRcnnParam
# PyQt GUI framework
from PyQt5.QtWidgets import *
# --------------------
# - Class which implements widget associated with the process
# - Inherits core.CProtocolTaskWidget from Ikomia API
# --------------------
class TrainMaskRcnnWidget(core.CWorkflowTaskWidget):
def __init__(self, param, parent):
core.CWorkflowTaskWidget.__init__(self, parent)
if param is None:
self.parameters = TrainMaskRcnnParam()
else:
self.parameters = param
# Create layout : QGridLayout by default
self.grid_layout = QGridLayout()
self.spin_workers = pyqtutils.append_spin(self.grid_layout, label="Data loader workers",
value=self.parameters.cfg["num_workers"], min=0, max=8, step=2)
self.spin_batch = pyqtutils.append_spin(self.grid_layout, label="Batch size",
value=self.parameters.cfg["batch_size"], min=1, max=1024, step=1)
self.spin_epoch = pyqtutils.append_spin(self.grid_layout, label="Epochs",
value=self.parameters.cfg["epochs"], min=1)
self.spin_size = pyqtutils.append_spin(self.grid_layout, label="Input size",
value=self.parameters.cfg["input_size"])
self.spin_lr = pyqtutils.append_double_spin(self.grid_layout, label="Learning rate",
value=self.parameters.cfg["learning_rate"], step=0.001)
label_model_format = QLabel("Model format")
row = self.grid_layout.rowCount()
self.grid_layout.addWidget(label_model_format, row, 0)
self.check_pth = QCheckBox("pth")
self.check_pth.setChecked(self.parameters.cfg["export_pth"])
self.grid_layout.addWidget(self.check_pth, row, 1)
self.check_onnx = QCheckBox("onnx")
self.check_onnx.setChecked(self.parameters.cfg["export_onnx"])
self.grid_layout.addWidget(self.check_onnx, row+1, 1)
self.browse_folder = pyqtutils.append_browse_file(self.grid_layout, label="Output folder",
path=self.parameters.cfg["output_folder"],
tooltip="Select output folder",
mode=QFileDialog.Directory)
# PyQt -> Qt wrapping
layout_ptr = qtconversion.PyQtToQt(self.grid_layout)
# Set widget layout
self.setLayout(layout_ptr)
def onApply(self):
# Apply button clicked slot
# Get parameters from widget
self.parameters.cfg["num_workers"] = self.spin_workers.value()
self.parameters.cfg["batch_size"] = self.spin_batch.value()
self.parameters.cfg["epochs"] = self.spin_epoch.value()
self.parameters.cfg["input_size"] = self.spin_size.value()
self.parameters.cfg["learning_rate"] = self.spin_lr.value()
self.parameters.cfg["export_pth"] = self.check_pth.isChecked()
self.parameters.cfg["export_onnx"] = self.check_onnx.isChecked()
self.parameters.cfg["output_folder"] = self.browse_folder.path
# Send signal to launch the process
self.emitApply(self.parameters)
# --------------------
# - Factory class to build process widget object
# - Inherits dataprocess.CWidgetFactory from Ikomia API
# --------------------
class TrainMaskRcnnWidgetFactory(dataprocess.CWidgetFactory):
def __init__(self):
dataprocess.CWidgetFactory.__init__(self)
# Set the name of the process -> it must be the same as the one declared in the process factory class
self.name = "train_torchvision_mask_rcnn"
def create(self, param):
# Create widget object
return TrainMaskRcnnWidget(param, None) | train_torchvision_mask_rcnn_widget.py | from ikomia import utils, core, dataprocess
from ikomia.utils import pyqtutils, qtconversion
from train_torchvision_mask_rcnn.train_torchvision_mask_rcnn_process import TrainMaskRcnnParam
# PyQt GUI framework
from PyQt5.QtWidgets import *
# --------------------
# - Class which implements widget associated with the process
# - Inherits core.CProtocolTaskWidget from Ikomia API
# --------------------
class TrainMaskRcnnWidget(core.CWorkflowTaskWidget):
def __init__(self, param, parent):
core.CWorkflowTaskWidget.__init__(self, parent)
if param is None:
self.parameters = TrainMaskRcnnParam()
else:
self.parameters = param
# Create layout : QGridLayout by default
self.grid_layout = QGridLayout()
self.spin_workers = pyqtutils.append_spin(self.grid_layout, label="Data loader workers",
value=self.parameters.cfg["num_workers"], min=0, max=8, step=2)
self.spin_batch = pyqtutils.append_spin(self.grid_layout, label="Batch size",
value=self.parameters.cfg["batch_size"], min=1, max=1024, step=1)
self.spin_epoch = pyqtutils.append_spin(self.grid_layout, label="Epochs",
value=self.parameters.cfg["epochs"], min=1)
self.spin_size = pyqtutils.append_spin(self.grid_layout, label="Input size",
value=self.parameters.cfg["input_size"])
self.spin_lr = pyqtutils.append_double_spin(self.grid_layout, label="Learning rate",
value=self.parameters.cfg["learning_rate"], step=0.001)
label_model_format = QLabel("Model format")
row = self.grid_layout.rowCount()
self.grid_layout.addWidget(label_model_format, row, 0)
self.check_pth = QCheckBox("pth")
self.check_pth.setChecked(self.parameters.cfg["export_pth"])
self.grid_layout.addWidget(self.check_pth, row, 1)
self.check_onnx = QCheckBox("onnx")
self.check_onnx.setChecked(self.parameters.cfg["export_onnx"])
self.grid_layout.addWidget(self.check_onnx, row+1, 1)
self.browse_folder = pyqtutils.append_browse_file(self.grid_layout, label="Output folder",
path=self.parameters.cfg["output_folder"],
tooltip="Select output folder",
mode=QFileDialog.Directory)
# PyQt -> Qt wrapping
layout_ptr = qtconversion.PyQtToQt(self.grid_layout)
# Set widget layout
self.setLayout(layout_ptr)
def onApply(self):
# Apply button clicked slot
# Get parameters from widget
self.parameters.cfg["num_workers"] = self.spin_workers.value()
self.parameters.cfg["batch_size"] = self.spin_batch.value()
self.parameters.cfg["epochs"] = self.spin_epoch.value()
self.parameters.cfg["input_size"] = self.spin_size.value()
self.parameters.cfg["learning_rate"] = self.spin_lr.value()
self.parameters.cfg["export_pth"] = self.check_pth.isChecked()
self.parameters.cfg["export_onnx"] = self.check_onnx.isChecked()
self.parameters.cfg["output_folder"] = self.browse_folder.path
# Send signal to launch the process
self.emitApply(self.parameters)
# --------------------
# - Factory class to build process widget object
# - Inherits dataprocess.CWidgetFactory from Ikomia API
# --------------------
class TrainMaskRcnnWidgetFactory(dataprocess.CWidgetFactory):
def __init__(self):
dataprocess.CWidgetFactory.__init__(self)
# Set the name of the process -> it must be the same as the one declared in the process factory class
self.name = "train_torchvision_mask_rcnn"
def create(self, param):
# Create widget object
return TrainMaskRcnnWidget(param, None) | 0.549278 | 0.155142 |
import copy
import uuid
import jsonschema
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.compiler import assemble
from qiskit.providers.basicaer import basicaerjob
from qiskit.qobj import (QasmQobj, PulseQobj, QobjHeader,
PulseQobjInstruction, PulseQobjExperiment,
PulseQobjConfig, QobjMeasurementOption,
PulseLibraryItem, QasmQobjInstruction,
QasmQobjExperiment, QasmQobjConfig)
from qiskit.qobj import validate_qobj_against_schema
from qiskit.validation.jsonschema.exceptions import SchemaValidationError
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeRueschlikon
class TestQASMQobj(QiskitTestCase):
"""Tests for QasmQobj."""
def setUp(self):
self.valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]}
]}
],
}
self.bad_qobj = copy.deepcopy(self.valid_qobj)
self.bad_qobj.experiments = []
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test Qobj and its subclass representations given a dictionary."""
test_parameters = {
QasmQobj: (
self.valid_qobj,
self.valid_dict
),
QasmQobjConfig: (
QasmQobjConfig(shots=1, memory_slots=2),
{'shots': 1, 'memory_slots': 2}
),
QasmQobjExperiment: (
QasmQobjExperiment(
instructions=[QasmQobjInstruction(name='u1', qubits=[1], params=[0.4])]),
{'instructions': [{'name': 'u1', 'qubits': [1], 'params': [0.4]}]}
),
QasmQobjInstruction: (
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
{'name': 'u1', 'qubits': [1], 'params': [0.4]}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_snapshot_instruction_to_dict(self):
"""Test snapshot instruction to dict."""
valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
res = valid_qobj.to_dict(validate=True)
expected_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
],
'config': {},
'header': {}}
],
}
self.assertEqual(expected_dict, res)
def test_snapshot_instruction_from_dict(self):
"""Test snapshot instruction from dict."""
expected_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
qobj_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
]}
],
}
self.assertEqual(expected_qobj, QasmQobj.from_dict(qobj_dict))
def test_simjob_raises_error_when_sending_bad_qobj(self):
"""Test SimulatorJob is denied resource request access when given an invalid Qobj instance.
"""
job_id = str(uuid.uuid4())
backend = FakeRueschlikon()
self.bad_qobj.header = QobjHeader(backend_name=backend.name())
with self.assertRaises(SchemaValidationError):
job = basicaerjob.BasicAerJob(backend, job_id, _nop, self.bad_qobj)
job.submit()
def test_change_qobj_after_compile(self):
"""Test modifying Qobj parameters after compile."""
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.cx(qr[0], qr[1])
qc1.cx(qr[0], qr[2])
qc2.h(qr)
qc1.measure(qr, cr)
qc2.measure(qr, cr)
circuits = [qc1, qc2]
qobj1 = assemble(circuits, shots=1024, seed=88)
qobj1.experiments[0].config.shots = 50
qobj1.experiments[1].config.shots = 1
self.assertTrue(qobj1.experiments[0].config.shots == 50)
self.assertTrue(qobj1.experiments[1].config.shots == 1)
self.assertTrue(qobj1.config.shots == 1024)
class TestPulseQobj(QiskitTestCase):
"""Tests for PulseQobj."""
def setUp(self):
self.valid_qobj = PulseQobj(
qobj_id='12345',
header=QobjHeader(),
config=PulseQobjConfig(shots=1024, memory_slots=2, max_credits=10,
meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0',
samples=[0.0 + 0.0j,
0.5 + 0.0j,
0.0 + 0.0j])
],
qubit_lo_freq=[4.9],
meas_lo_freq=[6.9],
rep_time=1000),
experiments=[
PulseQobjExperiment(instructions=[
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=1.57),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=0.),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase='P1'),
PulseQobjInstruction(name='pv', t0=10, ch='d0', val=0.1 + 0.0j),
PulseQobjInstruction(name='pv', t0=10, ch='d0', val='P1'),
PulseQobjInstruction(name='setp', t0=10, ch='d0', phase=3.14),
PulseQobjInstruction(name='setf', t0=10, ch='d0', frequency=8.0),
PulseQobjInstruction(name='shiftf', t0=10, ch='d0', frequency=4.0),
PulseQobjInstruction(name='acquire', t0=15, duration=5,
qubits=[0], memory_slot=[0],
kernels=[
QobjMeasurementOption(name='boxcar',
params={"start_window": 0,
"stop_window": 5})
])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'PULSE',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024,
'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0',
'samples': [0, 0.5, 0]}
],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
'experiments': [
{'instructions': [
{'name': 'pulse0', 't0': 0, 'ch': 'd0'},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 1.57},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 0},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 'P1'},
{'name': 'pv', 't0': 10, 'ch': 'd0', 'val': 0.1+0j},
{'name': 'pv', 't0': 10, 'ch': 'd0', 'val': 'P1'},
{'name': 'setp', 't0': 10, 'ch': 'd0', 'phase': 3.14},
{'name': 'setf', 't0': 10, 'ch': 'd0', 'frequency': 8.0},
{'name': 'shiftf', 't0': 10, 'ch': 'd0', 'frequency': 4.0},
{'name': 'acquire', 't0': 15, 'duration': 5,
'qubits': [0], 'memory_slot': [0],
'kernels': [{'name': 'boxcar',
'params': {'start_window': 0,
'stop_window': 5}}
]
}
]}
]
}
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test converting to Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1 + 0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_to_dict_per_class(self):
"""Test converting from Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1+0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item.to_dict(), expected_dict)
def _nop():
pass | test/python/qobj/test_qobj.py | import copy
import uuid
import jsonschema
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.compiler import assemble
from qiskit.providers.basicaer import basicaerjob
from qiskit.qobj import (QasmQobj, PulseQobj, QobjHeader,
PulseQobjInstruction, PulseQobjExperiment,
PulseQobjConfig, QobjMeasurementOption,
PulseLibraryItem, QasmQobjInstruction,
QasmQobjExperiment, QasmQobjConfig)
from qiskit.qobj import validate_qobj_against_schema
from qiskit.validation.jsonschema.exceptions import SchemaValidationError
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeRueschlikon
class TestQASMQobj(QiskitTestCase):
"""Tests for QasmQobj."""
def setUp(self):
self.valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]}
]}
],
}
self.bad_qobj = copy.deepcopy(self.valid_qobj)
self.bad_qobj.experiments = []
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test Qobj and its subclass representations given a dictionary."""
test_parameters = {
QasmQobj: (
self.valid_qobj,
self.valid_dict
),
QasmQobjConfig: (
QasmQobjConfig(shots=1, memory_slots=2),
{'shots': 1, 'memory_slots': 2}
),
QasmQobjExperiment: (
QasmQobjExperiment(
instructions=[QasmQobjInstruction(name='u1', qubits=[1], params=[0.4])]),
{'instructions': [{'name': 'u1', 'qubits': [1], 'params': [0.4]}]}
),
QasmQobjInstruction: (
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
{'name': 'u1', 'qubits': [1], 'params': [0.4]}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_snapshot_instruction_to_dict(self):
"""Test snapshot instruction to dict."""
valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
res = valid_qobj.to_dict(validate=True)
expected_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
],
'config': {},
'header': {}}
],
}
self.assertEqual(expected_dict, res)
def test_snapshot_instruction_from_dict(self):
"""Test snapshot instruction from dict."""
expected_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
qobj_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
]}
],
}
self.assertEqual(expected_qobj, QasmQobj.from_dict(qobj_dict))
def test_simjob_raises_error_when_sending_bad_qobj(self):
"""Test SimulatorJob is denied resource request access when given an invalid Qobj instance.
"""
job_id = str(uuid.uuid4())
backend = FakeRueschlikon()
self.bad_qobj.header = QobjHeader(backend_name=backend.name())
with self.assertRaises(SchemaValidationError):
job = basicaerjob.BasicAerJob(backend, job_id, _nop, self.bad_qobj)
job.submit()
def test_change_qobj_after_compile(self):
"""Test modifying Qobj parameters after compile."""
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.cx(qr[0], qr[1])
qc1.cx(qr[0], qr[2])
qc2.h(qr)
qc1.measure(qr, cr)
qc2.measure(qr, cr)
circuits = [qc1, qc2]
qobj1 = assemble(circuits, shots=1024, seed=88)
qobj1.experiments[0].config.shots = 50
qobj1.experiments[1].config.shots = 1
self.assertTrue(qobj1.experiments[0].config.shots == 50)
self.assertTrue(qobj1.experiments[1].config.shots == 1)
self.assertTrue(qobj1.config.shots == 1024)
class TestPulseQobj(QiskitTestCase):
"""Tests for PulseQobj."""
def setUp(self):
self.valid_qobj = PulseQobj(
qobj_id='12345',
header=QobjHeader(),
config=PulseQobjConfig(shots=1024, memory_slots=2, max_credits=10,
meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0',
samples=[0.0 + 0.0j,
0.5 + 0.0j,
0.0 + 0.0j])
],
qubit_lo_freq=[4.9],
meas_lo_freq=[6.9],
rep_time=1000),
experiments=[
PulseQobjExperiment(instructions=[
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=1.57),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=0.),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase='P1'),
PulseQobjInstruction(name='pv', t0=10, ch='d0', val=0.1 + 0.0j),
PulseQobjInstruction(name='pv', t0=10, ch='d0', val='P1'),
PulseQobjInstruction(name='setp', t0=10, ch='d0', phase=3.14),
PulseQobjInstruction(name='setf', t0=10, ch='d0', frequency=8.0),
PulseQobjInstruction(name='shiftf', t0=10, ch='d0', frequency=4.0),
PulseQobjInstruction(name='acquire', t0=15, duration=5,
qubits=[0], memory_slot=[0],
kernels=[
QobjMeasurementOption(name='boxcar',
params={"start_window": 0,
"stop_window": 5})
])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'PULSE',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024,
'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0',
'samples': [0, 0.5, 0]}
],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
'experiments': [
{'instructions': [
{'name': 'pulse0', 't0': 0, 'ch': 'd0'},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 1.57},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 0},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 'P1'},
{'name': 'pv', 't0': 10, 'ch': 'd0', 'val': 0.1+0j},
{'name': 'pv', 't0': 10, 'ch': 'd0', 'val': 'P1'},
{'name': 'setp', 't0': 10, 'ch': 'd0', 'phase': 3.14},
{'name': 'setf', 't0': 10, 'ch': 'd0', 'frequency': 8.0},
{'name': 'shiftf', 't0': 10, 'ch': 'd0', 'frequency': 4.0},
{'name': 'acquire', 't0': 15, 'duration': 5,
'qubits': [0], 'memory_slot': [0],
'kernels': [{'name': 'boxcar',
'params': {'start_window': 0,
'stop_window': 5}}
]
}
]}
]
}
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test converting to Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1 + 0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_to_dict_per_class(self):
"""Test converting from Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1+0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item.to_dict(), expected_dict)
def _nop():
pass | 0.721645 | 0.333775 |
from __future__ import division
import os.path
import numpy as np
from skimage._shared.testing import assert_equal, assert_almost_equal
from skimage._shared.testing import assert_array_almost_equal
from skimage._shared.testing import TestCase
from skimage import img_as_float, img_as_ubyte
from skimage.io import imread
from skimage.color import (rgb2hsv, hsv2rgb,
rgb2xyz, xyz2rgb,
rgb2hed, hed2rgb,
separate_stains,
combine_stains,
rgb2rgbcie, rgbcie2rgb,
convert_colorspace,
rgb2grey, gray2rgb,
xyz2lab, lab2xyz,
lab2rgb, rgb2lab,
xyz2luv, luv2xyz,
luv2rgb, rgb2luv,
lab2lch, lch2lab,
rgb2yuv, yuv2rgb,
rgb2yiq, yiq2rgb,
rgb2ypbpr, ypbpr2rgb,
rgb2ycbcr, ycbcr2rgb,
rgba2rgb,
guess_spatial_dimensions)
from skimage import data_dir
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
import colorsys
def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
assert_equal(guess_spatial_dimensions(im1), 2)
assert_equal(guess_spatial_dimensions(im2), 3)
assert_equal(guess_spatial_dimensions(im3), None)
assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
class TestColorconv(TestCase):
img_rgb = imread(os.path.join(data_dir, 'color.png'))
img_grayscale = imread(os.path.join(data_dir, 'camera.png'))
img_rgba = np.array([[[0, 0.5, 1, 0],
[0, 0.5, 1, 1],
[0, 0.5, 1, 0.5]]]).astype(np.float)
colbars = np.array([[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0]]).astype(np.float)
colbars_array = np.swapaxes(colbars.reshape(3, 4, 2), 0, 2)
colbars_point75 = colbars * 0.75
colbars_point75_array = np.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2)
xyz_array = np.array([[[0.4124, 0.21260, 0.01930]], # red
[[0, 0, 0]], # black
[[.9505, 1., 1.089]], # white
[[.1805, .0722, .9505]], # blue
[[.07719, .15438, .02573]], # green
])
lab_array = np.array([[[53.233, 80.109, 67.220]], # red
[[0., 0., 0.]], # black
[[100.0, 0.005, -0.010]], # white
[[32.303, 79.197, -107.864]], # blue
[[46.229, -51.7, 49.898]], # green
])
luv_array = np.array([[[53.233, 175.053, 37.751]], # red
[[0., 0., 0.]], # black
[[100., 0.001, -0.017]], # white
[[32.303, -9.400, -130.358]], # blue
[[46.228, -43.774, 56.589]], # green
])
# RGBA to RGB
def test_rgba2rgb_conversion(self):
rgba = self.img_rgba
rgb = rgba2rgb(rgba)
expected = np.array([[[1, 1, 1],
[0, 0.5, 1],
[0.5, 0.75, 1]]]).astype(np.float)
self.assertEqual(rgb.shape, expected.shape)
assert_almost_equal(rgb, expected)
def test_rgba2rgb_error_grayscale(self):
self.assertRaises(ValueError, rgba2rgb, self.img_grayscale)
def test_rgba2rgb_error_rgb(self):
self.assertRaises(ValueError, rgba2rgb, self.img_rgb)
# RGB to HSV
def test_rgb2hsv_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
hsv = rgb2hsv(rgb).reshape(-1, 3)
# ground truth from colorsys
gt = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]
)
assert_almost_equal(hsv, gt)
def test_rgb2hsv_error_grayscale(self):
self.assertRaises(ValueError, rgb2hsv, self.img_grayscale)
def test_rgb2hsv_error_one_element(self):
self.assertRaises(ValueError, rgb2hsv, self.img_rgb[0, 0])
# HSV to RGB
def test_hsv2rgb_conversion(self):
rgb = self.img_rgb.astype("float32")[::16, ::16]
# create HSV image with colorsys
hsv = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]).reshape(rgb.shape)
# convert back to RGB and compare with original.
# relative precision for RGB -> HSV roundtrip is about 1e-6
assert_almost_equal(rgb, hsv2rgb(hsv), decimal=4)
def test_hsv2rgb_error_grayscale(self):
self.assertRaises(ValueError, hsv2rgb, self.img_grayscale)
def test_hsv2rgb_error_one_element(self):
self.assertRaises(ValueError, hsv2rgb, self.img_rgb[0, 0])
# RGB to XYZ
def test_rgb2xyz_conversion(self):
gt = np.array([[[0.950456, 1. , 1.088754],
[0.538003, 0.787329, 1.06942 ],
[0.592876, 0.28484 , 0.969561],
[0.180423, 0.072169, 0.950227]],
[[0.770033, 0.927831, 0.138527],
[0.35758 , 0.71516 , 0.119193],
[0.412453, 0.212671, 0.019334],
[0. , 0. , 0. ]]])
assert_almost_equal(rgb2xyz(self.colbars_array), gt)
# stop repeating the "raises" checks for all other functions that are
# implemented with color._convert()
def test_rgb2xyz_error_grayscale(self):
self.assertRaises(ValueError, rgb2xyz, self.img_grayscale)
def test_rgb2xyz_error_one_element(self):
self.assertRaises(ValueError, rgb2xyz, self.img_rgb[0, 0])
# XYZ to RGB
def test_xyz2rgb_conversion(self):
assert_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)),
self.colbars_array)
# RGB<->XYZ roundtrip on another image
def test_xyz_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(xyz2rgb(rgb2xyz(img_rgb)), img_rgb)
# RGB<->HED roundtrip with ubyte image
def test_hed_rgb_roundtrip(self):
img_rgb = img_as_ubyte(self.img_rgb)
with expected_warnings(['precision loss']):
new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
assert_equal(new, img_rgb)
# RGB<->HED roundtrip with float image
def test_hed_rgb_float_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = self.img_rgb
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_equal(img_as_ubyte(conv), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = img_as_float(self.img_rgb)
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_array_almost_equal(conv, img_rgb)
# RGB to RGB CIE
def test_rgb2rgbcie_conversion(self):
gt = np.array([[[ 0.1488856 , 0.18288098, 0.19277574],
[ 0.01163224, 0.16649536, 0.18948516],
[ 0.12259182, 0.03308008, 0.17298223],
[-0.01466154, 0.01669446, 0.16969164]],
[[ 0.16354714, 0.16618652, 0.0230841 ],
[ 0.02629378, 0.1498009 , 0.01979351],
[ 0.13725336, 0.01638562, 0.00329059],
[ 0. , 0. , 0. ]]])
assert_almost_equal(rgb2rgbcie(self.colbars_array), gt)
# RGB CIE to RGB
def test_rgbcie2rgb_conversion(self):
# only roundtrip test, we checked rgb2rgbcie above already
assert_almost_equal(rgbcie2rgb(rgb2rgbcie(self.colbars_array)),
self.colbars_array)
def test_convert_colorspace(self):
colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr']
colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb, ycbcr2rgb, ypbpr2rgb]
colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz, rgb2ycbcr, rgb2ypbpr]
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', 'RGB'),
self.colbars_array)
for i, space in enumerate(colspaces):
gt = colfuncs_from[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, space, 'RGB'), gt)
gt = colfuncs_to[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', space), gt)
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'nokey', 'XYZ')
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'RGB', 'nokey')
def test_rgb2grey(self):
x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float)
g = rgb2grey(x)
assert_array_almost_equal(g, 1)
assert_equal(g.shape, (1, 1))
def test_rgb2grey_contiguous(self):
x = np.random.rand(10, 10, 3)
assert rgb2grey(x).flags["C_CONTIGUOUS"]
assert rgb2grey(x[:5, :5]).flags["C_CONTIGUOUS"]
def test_rgb2grey_alpha(self):
x = np.random.rand(10, 10, 4)
assert rgb2grey(x).ndim == 2
def test_rgb2grey_on_grey(self):
rgb2grey(np.random.rand(5, 5))
# test matrices for xyz2lab and lab2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2lab(self):
assert_array_almost_equal(xyz2lab(self.xyz_array),
self.lab_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, "2"),
decimal=2)
def test_lab2xyz(self):
assert_array_almost_equal(lab2xyz(self.lab_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
# And we include a call to test the exception handling in the code.
try:
xs = lab2xyz(lab_array_I_obs, "NaI", "2") # Not an illuminant
except ValueError:
pass
try:
xs = lab2xyz(lab_array_I_obs, "d50", "42") # Not a degree
except ValueError:
pass
def test_rgb2lab_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100,0,0],
[97.1393, -21.5537, 94.4780],
[91.1132, -48.0875, -14.1312],
[87.7347, -86.1827, 83.1793],
[60.3242, 98.2343, -60.8249],
[53.2408, 80.0925, 67.2032],
[32.2970, 79.1875, -107.8602],
[0,0,0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2)
def test_lab_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(lab2rgb(rgb2lab(img_rgb)), img_rgb)
# test matrices for xyz2luv and luv2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2luv(self):
assert_array_almost_equal(xyz2luv(self.xyz_array),
self.luv_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, "2"),
decimal=2)
def test_luv2xyz(self):
assert_array_almost_equal(luv2xyz(self.luv_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
def test_rgb2luv_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100, 0, 0],
[97.1393, 7.7056, 106.7866],
[91.1132, -70.4773, -15.2042],
[87.7347, -83.0776, 107.3985],
[60.3242, 84.0714, -108.6834],
[53.2408, 175.0151, 37.7564],
[32.2970, -9.4054, -130.3423],
[0, 0, 0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2luv(self.colbars_array),
gt_array, decimal=2)
def test_luv_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
def test_lab_rgb_outlier(self):
lab_array = np.ones((3, 1, 3))
lab_array[0] = [50, -12, 85]
lab_array[1] = [50, 12, -85]
lab_array[2] = [90, -4, -47]
rgb_array = np.array([[[0.501, 0.481, 0]],
[[0, 0.482, 1.]],
[[0.578, 0.914, 1.]],
])
assert_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3)
def test_lab_full_gamut(self):
a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100))
L = np.ones(a.shape)
lab = np.dstack((L, a, b))
for value in [0, 10, 20]:
lab[:, :, 0] = value
with expected_warnings(['Color data out of range']):
lab2xyz(lab)
def test_lab_lch_roundtrip(self):
rgb = img_as_float(self.img_rgb)
lab = rgb2lab(rgb)
lab2 = lch2lab(lab2lch(lab))
assert_array_almost_equal(lab2, lab)
def test_rgb_lch_roundtrip(self):
rgb = img_as_float(self.img_rgb)
lab = rgb2lab(rgb)
lch = lab2lch(lab)
lab2 = lch2lab(lch)
rgb2 = lab2rgb(lab2)
assert_array_almost_equal(rgb, rgb2)
def test_lab_lch_0d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch2 = lab2lch(lab0[None, None, :])
assert_array_almost_equal(lch0, lch2[0, 0, :])
def test_lab_lch_1d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch1 = lab2lch(lab0[None, :])
assert_array_almost_equal(lch0, lch1[0, :])
def test_lab_lch_3d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch3 = lab2lch(lab0[None, None, None, :])
assert_array_almost_equal(lch0, lch3[0, 0, 0, :])
def _get_lab0(self):
rgb = img_as_float(self.img_rgb[:1, :1, :])
return rgb2lab(rgb)[0, 0, :]
def test_yuv(self):
rgb = np.array([[[1.0, 1.0, 1.0]]])
assert_array_almost_equal(rgb2yuv(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]]))
rgb = np.array([[[0.0, 1.0, 0.0]]])
assert_array_almost_equal(rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]]))
assert_array_almost_equal(rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]]))
assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]]))
assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[144.553, 53.797, 34.214]]]))
def test_yuv_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)[::16, ::16]
assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
def test_rgb2yiq_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
yiq = rgb2yiq(rgb).reshape(-1, 3)
gt = np.array([colorsys.rgb_to_yiq(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]
)
assert_almost_equal(yiq, gt, decimal=2)
def test_gray2rgb():
x = np.array([0, 0.5, 1])
w = gray2rgb(x)
expected_output = np.array([[ 0, 0, 0 ],
[ 0.5, 0.5, 0.5, ],
[ 1, 1, 1 ]])
assert_equal(w, expected_output)
x = x.reshape((3, 1))
y = gray2rgb(x)
assert_equal(y.shape, (3, 1, 3))
assert_equal(y.dtype, x.dtype)
assert_equal(y[..., 0], x)
assert_equal(y[0, 0, :], [0, 0, 0])
x = np.array([[0, 128, 255]], dtype=np.uint8)
z = gray2rgb(x)
assert_equal(z.shape, (1, 3, 3))
assert_equal(z[..., 0], x)
assert_equal(z[0, 1, :], [128, 128, 128])
def test_gray2rgb_rgb():
x = np.random.rand(5, 5, 4)
y = gray2rgb(x)
assert_equal(x, y)
def test_gray2rgb_alpha():
x = np.random.random((5, 5, 4))
assert_equal(gray2rgb(x, alpha=None).shape, (5, 5, 4))
assert_equal(gray2rgb(x, alpha=False).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=True).shape, (5, 5, 4))
x = np.random.random((5, 5, 3))
assert_equal(gray2rgb(x, alpha=None).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=False).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=True).shape, (5, 5, 4))
assert_equal(gray2rgb(np.array([[1, 2], [3, 4.]]),
alpha=True)[0, 0, 3], 1)
assert_equal(gray2rgb(np.array([[1, 2], [3, 4]], dtype=np.uint8),
alpha=True)[0, 0, 3], 255) | skimage/color/tests/test_colorconv.py | from __future__ import division
import os.path
import numpy as np
from skimage._shared.testing import assert_equal, assert_almost_equal
from skimage._shared.testing import assert_array_almost_equal
from skimage._shared.testing import TestCase
from skimage import img_as_float, img_as_ubyte
from skimage.io import imread
from skimage.color import (rgb2hsv, hsv2rgb,
rgb2xyz, xyz2rgb,
rgb2hed, hed2rgb,
separate_stains,
combine_stains,
rgb2rgbcie, rgbcie2rgb,
convert_colorspace,
rgb2grey, gray2rgb,
xyz2lab, lab2xyz,
lab2rgb, rgb2lab,
xyz2luv, luv2xyz,
luv2rgb, rgb2luv,
lab2lch, lch2lab,
rgb2yuv, yuv2rgb,
rgb2yiq, yiq2rgb,
rgb2ypbpr, ypbpr2rgb,
rgb2ycbcr, ycbcr2rgb,
rgba2rgb,
guess_spatial_dimensions)
from skimage import data_dir
from skimage._shared._warnings import expected_warnings
from skimage._shared import testing
import colorsys
def test_guess_spatial_dimensions():
im1 = np.zeros((5, 5))
im2 = np.zeros((5, 5, 5))
im3 = np.zeros((5, 5, 3))
im4 = np.zeros((5, 5, 5, 3))
im5 = np.zeros((5,))
assert_equal(guess_spatial_dimensions(im1), 2)
assert_equal(guess_spatial_dimensions(im2), 3)
assert_equal(guess_spatial_dimensions(im3), None)
assert_equal(guess_spatial_dimensions(im4), 3)
with testing.raises(ValueError):
guess_spatial_dimensions(im5)
class TestColorconv(TestCase):
img_rgb = imread(os.path.join(data_dir, 'color.png'))
img_grayscale = imread(os.path.join(data_dir, 'camera.png'))
img_rgba = np.array([[[0, 0.5, 1, 0],
[0, 0.5, 1, 1],
[0, 0.5, 1, 0.5]]]).astype(np.float)
colbars = np.array([[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0]]).astype(np.float)
colbars_array = np.swapaxes(colbars.reshape(3, 4, 2), 0, 2)
colbars_point75 = colbars * 0.75
colbars_point75_array = np.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2)
xyz_array = np.array([[[0.4124, 0.21260, 0.01930]], # red
[[0, 0, 0]], # black
[[.9505, 1., 1.089]], # white
[[.1805, .0722, .9505]], # blue
[[.07719, .15438, .02573]], # green
])
lab_array = np.array([[[53.233, 80.109, 67.220]], # red
[[0., 0., 0.]], # black
[[100.0, 0.005, -0.010]], # white
[[32.303, 79.197, -107.864]], # blue
[[46.229, -51.7, 49.898]], # green
])
luv_array = np.array([[[53.233, 175.053, 37.751]], # red
[[0., 0., 0.]], # black
[[100., 0.001, -0.017]], # white
[[32.303, -9.400, -130.358]], # blue
[[46.228, -43.774, 56.589]], # green
])
# RGBA to RGB
def test_rgba2rgb_conversion(self):
rgba = self.img_rgba
rgb = rgba2rgb(rgba)
expected = np.array([[[1, 1, 1],
[0, 0.5, 1],
[0.5, 0.75, 1]]]).astype(np.float)
self.assertEqual(rgb.shape, expected.shape)
assert_almost_equal(rgb, expected)
def test_rgba2rgb_error_grayscale(self):
self.assertRaises(ValueError, rgba2rgb, self.img_grayscale)
def test_rgba2rgb_error_rgb(self):
self.assertRaises(ValueError, rgba2rgb, self.img_rgb)
# RGB to HSV
def test_rgb2hsv_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
hsv = rgb2hsv(rgb).reshape(-1, 3)
# ground truth from colorsys
gt = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]
)
assert_almost_equal(hsv, gt)
def test_rgb2hsv_error_grayscale(self):
self.assertRaises(ValueError, rgb2hsv, self.img_grayscale)
def test_rgb2hsv_error_one_element(self):
self.assertRaises(ValueError, rgb2hsv, self.img_rgb[0, 0])
# HSV to RGB
def test_hsv2rgb_conversion(self):
rgb = self.img_rgb.astype("float32")[::16, ::16]
# create HSV image with colorsys
hsv = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]).reshape(rgb.shape)
# convert back to RGB and compare with original.
# relative precision for RGB -> HSV roundtrip is about 1e-6
assert_almost_equal(rgb, hsv2rgb(hsv), decimal=4)
def test_hsv2rgb_error_grayscale(self):
self.assertRaises(ValueError, hsv2rgb, self.img_grayscale)
def test_hsv2rgb_error_one_element(self):
self.assertRaises(ValueError, hsv2rgb, self.img_rgb[0, 0])
# RGB to XYZ
def test_rgb2xyz_conversion(self):
gt = np.array([[[0.950456, 1. , 1.088754],
[0.538003, 0.787329, 1.06942 ],
[0.592876, 0.28484 , 0.969561],
[0.180423, 0.072169, 0.950227]],
[[0.770033, 0.927831, 0.138527],
[0.35758 , 0.71516 , 0.119193],
[0.412453, 0.212671, 0.019334],
[0. , 0. , 0. ]]])
assert_almost_equal(rgb2xyz(self.colbars_array), gt)
# stop repeating the "raises" checks for all other functions that are
# implemented with color._convert()
def test_rgb2xyz_error_grayscale(self):
self.assertRaises(ValueError, rgb2xyz, self.img_grayscale)
def test_rgb2xyz_error_one_element(self):
self.assertRaises(ValueError, rgb2xyz, self.img_rgb[0, 0])
# XYZ to RGB
def test_xyz2rgb_conversion(self):
assert_almost_equal(xyz2rgb(rgb2xyz(self.colbars_array)),
self.colbars_array)
# RGB<->XYZ roundtrip on another image
def test_xyz_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(xyz2rgb(rgb2xyz(img_rgb)), img_rgb)
# RGB<->HED roundtrip with ubyte image
def test_hed_rgb_roundtrip(self):
img_rgb = img_as_ubyte(self.img_rgb)
with expected_warnings(['precision loss']):
new = img_as_ubyte(hed2rgb(rgb2hed(img_rgb)))
assert_equal(new, img_rgb)
# RGB<->HED roundtrip with float image
def test_hed_rgb_float_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(hed2rgb(rgb2hed(img_rgb)), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = self.img_rgb
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_equal(img_as_ubyte(conv), img_rgb)
# RGB<->HDX roundtrip with ubyte image
def test_hdx_rgb_roundtrip(self):
from skimage.color.colorconv import hdx_from_rgb, rgb_from_hdx
img_rgb = img_as_float(self.img_rgb)
conv = combine_stains(separate_stains(img_rgb, hdx_from_rgb),
rgb_from_hdx)
assert_array_almost_equal(conv, img_rgb)
# RGB to RGB CIE
def test_rgb2rgbcie_conversion(self):
gt = np.array([[[ 0.1488856 , 0.18288098, 0.19277574],
[ 0.01163224, 0.16649536, 0.18948516],
[ 0.12259182, 0.03308008, 0.17298223],
[-0.01466154, 0.01669446, 0.16969164]],
[[ 0.16354714, 0.16618652, 0.0230841 ],
[ 0.02629378, 0.1498009 , 0.01979351],
[ 0.13725336, 0.01638562, 0.00329059],
[ 0. , 0. , 0. ]]])
assert_almost_equal(rgb2rgbcie(self.colbars_array), gt)
# RGB CIE to RGB
def test_rgbcie2rgb_conversion(self):
# only roundtrip test, we checked rgb2rgbcie above already
assert_almost_equal(rgbcie2rgb(rgb2rgbcie(self.colbars_array)),
self.colbars_array)
def test_convert_colorspace(self):
colspaces = ['HSV', 'RGB CIE', 'XYZ', 'YCbCr', 'YPbPr']
colfuncs_from = [hsv2rgb, rgbcie2rgb, xyz2rgb, ycbcr2rgb, ypbpr2rgb]
colfuncs_to = [rgb2hsv, rgb2rgbcie, rgb2xyz, rgb2ycbcr, rgb2ypbpr]
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', 'RGB'),
self.colbars_array)
for i, space in enumerate(colspaces):
gt = colfuncs_from[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, space, 'RGB'), gt)
gt = colfuncs_to[i](self.colbars_array)
assert_almost_equal(
convert_colorspace(self.colbars_array, 'RGB', space), gt)
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'nokey', 'XYZ')
self.assertRaises(ValueError, convert_colorspace,
self.colbars_array, 'RGB', 'nokey')
def test_rgb2grey(self):
x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float)
g = rgb2grey(x)
assert_array_almost_equal(g, 1)
assert_equal(g.shape, (1, 1))
def test_rgb2grey_contiguous(self):
x = np.random.rand(10, 10, 3)
assert rgb2grey(x).flags["C_CONTIGUOUS"]
assert rgb2grey(x[:5, :5]).flags["C_CONTIGUOUS"]
def test_rgb2grey_alpha(self):
x = np.random.rand(10, 10, 4)
assert rgb2grey(x).ndim == 2
def test_rgb2grey_on_grey(self):
rgb2grey(np.random.rand(5, 5))
# test matrices for xyz2lab and lab2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2lab(self):
assert_array_almost_equal(xyz2lab(self.xyz_array),
self.lab_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab_array_I_obs,
xyz2lab(self.xyz_array, I, "2"),
decimal=2)
def test_lab2xyz(self):
assert_array_almost_equal(lab2xyz(self.lab_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "lab_array_{0}_{1}.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "lab_array_{0}_2.npy".format(I, obs)
lab_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(lab2xyz(lab_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
# And we include a call to test the exception handling in the code.
try:
xs = lab2xyz(lab_array_I_obs, "NaI", "2") # Not an illuminant
except ValueError:
pass
try:
xs = lab2xyz(lab_array_I_obs, "d50", "42") # Not a degree
except ValueError:
pass
def test_rgb2lab_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100,0,0],
[97.1393, -21.5537, 94.4780],
[91.1132, -48.0875, -14.1312],
[87.7347, -86.1827, 83.1793],
[60.3242, 98.2343, -60.8249],
[53.2408, 80.0925, 67.2032],
[32.2970, 79.1875, -107.8602],
[0,0,0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2)
def test_lab_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(lab2rgb(rgb2lab(img_rgb)), img_rgb)
# test matrices for xyz2luv and luv2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2luv(self):
assert_array_almost_equal(xyz2luv(self.xyz_array),
self.luv_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, obs),
decimal=2)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv_array_I_obs,
xyz2luv(self.xyz_array, I, "2"),
decimal=2)
def test_luv2xyz(self):
assert_array_almost_equal(luv2xyz(self.luv_array),
self.xyz_array, decimal=3)
# Test the conversion with the rest of the illuminants.
for I in ["d50", "d55", "d65", "d75"]:
for obs in ["2", "10"]:
fname = "luv_array_{0}_{1}.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, obs),
self.xyz_array, decimal=3)
for I in ["a", "e"]:
fname = "luv_array_{0}_2.npy".format(I, obs)
luv_array_I_obs = np.load(
os.path.join(os.path.dirname(__file__), 'data', fname))
assert_array_almost_equal(luv2xyz(luv_array_I_obs, I, "2"),
self.xyz_array, decimal=3)
def test_rgb2luv_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
gt_for_colbars = np.array([
[100, 0, 0],
[97.1393, 7.7056, 106.7866],
[91.1132, -70.4773, -15.2042],
[87.7347, -83.0776, 107.3985],
[60.3242, 84.0714, -108.6834],
[53.2408, 175.0151, 37.7564],
[32.2970, -9.4054, -130.3423],
[0, 0, 0]]).T
gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(rgb2luv(self.colbars_array),
gt_array, decimal=2)
def test_luv_rgb_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)
assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
def test_lab_rgb_outlier(self):
lab_array = np.ones((3, 1, 3))
lab_array[0] = [50, -12, 85]
lab_array[1] = [50, 12, -85]
lab_array[2] = [90, -4, -47]
rgb_array = np.array([[[0.501, 0.481, 0]],
[[0, 0.482, 1.]],
[[0.578, 0.914, 1.]],
])
assert_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3)
def test_lab_full_gamut(self):
a, b = np.meshgrid(np.arange(-100, 100), np.arange(-100, 100))
L = np.ones(a.shape)
lab = np.dstack((L, a, b))
for value in [0, 10, 20]:
lab[:, :, 0] = value
with expected_warnings(['Color data out of range']):
lab2xyz(lab)
def test_lab_lch_roundtrip(self):
rgb = img_as_float(self.img_rgb)
lab = rgb2lab(rgb)
lab2 = lch2lab(lab2lch(lab))
assert_array_almost_equal(lab2, lab)
def test_rgb_lch_roundtrip(self):
rgb = img_as_float(self.img_rgb)
lab = rgb2lab(rgb)
lch = lab2lch(lab)
lab2 = lch2lab(lch)
rgb2 = lab2rgb(lab2)
assert_array_almost_equal(rgb, rgb2)
def test_lab_lch_0d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch2 = lab2lch(lab0[None, None, :])
assert_array_almost_equal(lch0, lch2[0, 0, :])
def test_lab_lch_1d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch1 = lab2lch(lab0[None, :])
assert_array_almost_equal(lch0, lch1[0, :])
def test_lab_lch_3d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch3 = lab2lch(lab0[None, None, None, :])
assert_array_almost_equal(lch0, lch3[0, 0, 0, :])
def _get_lab0(self):
rgb = img_as_float(self.img_rgb[:1, :1, :])
return rgb2lab(rgb)[0, 0, :]
def test_yuv(self):
rgb = np.array([[[1.0, 1.0, 1.0]]])
assert_array_almost_equal(rgb2yuv(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2yiq(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[235, 128, 128]]]))
rgb = np.array([[[0.0, 1.0, 0.0]]])
assert_array_almost_equal(rgb2yuv(rgb), np.array([[[0.587, -0.28886916, -0.51496512]]]))
assert_array_almost_equal(rgb2yiq(rgb), np.array([[[0.587, -0.27455667, -0.52273617]]]))
assert_array_almost_equal(rgb2ypbpr(rgb), np.array([[[0.587, -0.331264, -0.418688]]]))
assert_array_almost_equal(rgb2ycbcr(rgb), np.array([[[144.553, 53.797, 34.214]]]))
def test_yuv_roundtrip(self):
img_rgb = img_as_float(self.img_rgb)[::16, ::16]
assert_array_almost_equal(yuv2rgb(rgb2yuv(img_rgb)), img_rgb)
assert_array_almost_equal(yiq2rgb(rgb2yiq(img_rgb)), img_rgb)
assert_array_almost_equal(ypbpr2rgb(rgb2ypbpr(img_rgb)), img_rgb)
assert_array_almost_equal(ycbcr2rgb(rgb2ycbcr(img_rgb)), img_rgb)
def test_rgb2yiq_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
yiq = rgb2yiq(rgb).reshape(-1, 3)
gt = np.array([colorsys.rgb_to_yiq(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3)]
)
assert_almost_equal(yiq, gt, decimal=2)
def test_gray2rgb():
x = np.array([0, 0.5, 1])
w = gray2rgb(x)
expected_output = np.array([[ 0, 0, 0 ],
[ 0.5, 0.5, 0.5, ],
[ 1, 1, 1 ]])
assert_equal(w, expected_output)
x = x.reshape((3, 1))
y = gray2rgb(x)
assert_equal(y.shape, (3, 1, 3))
assert_equal(y.dtype, x.dtype)
assert_equal(y[..., 0], x)
assert_equal(y[0, 0, :], [0, 0, 0])
x = np.array([[0, 128, 255]], dtype=np.uint8)
z = gray2rgb(x)
assert_equal(z.shape, (1, 3, 3))
assert_equal(z[..., 0], x)
assert_equal(z[0, 1, :], [128, 128, 128])
def test_gray2rgb_rgb():
x = np.random.rand(5, 5, 4)
y = gray2rgb(x)
assert_equal(x, y)
def test_gray2rgb_alpha():
x = np.random.random((5, 5, 4))
assert_equal(gray2rgb(x, alpha=None).shape, (5, 5, 4))
assert_equal(gray2rgb(x, alpha=False).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=True).shape, (5, 5, 4))
x = np.random.random((5, 5, 3))
assert_equal(gray2rgb(x, alpha=None).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=False).shape, (5, 5, 3))
assert_equal(gray2rgb(x, alpha=True).shape, (5, 5, 4))
assert_equal(gray2rgb(np.array([[1, 2], [3, 4.]]),
alpha=True)[0, 0, 3], 1)
assert_equal(gray2rgb(np.array([[1, 2], [3, 4]], dtype=np.uint8),
alpha=True)[0, 0, 3], 255) | 0.787523 | 0.554893 |
# Package context -- the full module name for package imports
_py_package_context = None
@__builtin__
def get_magic():
return b'\x0c\xaf\xaf\xe1'
@__builtin__
def create_dynamic(module_spec, filename=None):
global _py_package_context
old_package_context = _py_package_context
_py_package_context = str(module_spec.name)
try:
return __create_dynamic__(module_spec, filename)
finally:
_py_package_context = old_package_context
@__builtin__
def exec_builtin(mod):
return None
@__builtin__
def init_frozen(name):
return None
@__builtin__
def is_frozen(name):
return False
@__builtin__
def get_frozen_object(name):
raise ImportError("No such frozen object named %s" % name)
is_frozen_package = get_frozen_object
@__builtin__
def cache_all_file_modules():
"""
Caches all modules loaded during initialization through the normal import
mechanism on the language, so that any additional contexts created in the
same engine can re-use the cached CallTargets. See the _imp module for
details on the module caching.
"""
import sys
for k,v in sys.modules.items():
if hasattr(v, "__file__"):
if not graal_python_has_cached_code(k):
freeze_module(v, k)
@__builtin__
def _patch_package_paths(paths):
import sys
return _sub_package_paths(paths, sys.graal_python_stdlib_home, "!stdlib!")
@__builtin__
def _unpatch_package_paths(paths):
import sys
return _sub_package_paths(paths, "!stdlib!", sys.graal_python_stdlib_home)
@__builtin__
def _sub_package_paths(paths, fro, to):
if paths is not None:
return [p.replace(fro, to) for p in paths]
@__builtin__
def freeze_module(mod, key=None):
"""
Freeze a module under the optional key in the language cache so that it can
be shared across multiple contexts. If the module is a package in the
standard library path, it's __path__ is substituted to not leak the standard
library path to other contexts.
"""
import sys
path = _patch_package_paths(getattr(mod, "__path__", None))
name = key or mod.__name__
graal_python_cache_module_code(key, mod.__file__, path)
class CachedImportFinder:
@staticmethod
def find_spec(fullname, path, target=None):
path = _unpatch_package_paths(graal_python_get_cached_code_path(fullname))
if path is not None:
if len(path) > 0:
submodule_search_locations = path
is_package = True
else:
submodule_search_locations = None
is_package = False
spec = CachedImportFinder.ModuleSpec(fullname, CachedLoader, is_package=is_package)
# we're not setting origin, so the module won't have a __file__
# attribute and will show up as built-in
spec.submodule_search_locations = submodule_search_locations
return spec
class CachedLoader:
import sys
@staticmethod
def create_module(spec):
pass
@staticmethod
def exec_module(module):
modulename = module.__name__
exec(graal_python_get_cached_code(modulename), module.__dict__)
CachedLoader.sys.modules[modulename] = module | graalpython/lib-graalpython/_imp.py |
# Package context -- the full module name for package imports
_py_package_context = None
@__builtin__
def get_magic():
return b'\x0c\xaf\xaf\xe1'
@__builtin__
def create_dynamic(module_spec, filename=None):
global _py_package_context
old_package_context = _py_package_context
_py_package_context = str(module_spec.name)
try:
return __create_dynamic__(module_spec, filename)
finally:
_py_package_context = old_package_context
@__builtin__
def exec_builtin(mod):
return None
@__builtin__
def init_frozen(name):
return None
@__builtin__
def is_frozen(name):
return False
@__builtin__
def get_frozen_object(name):
raise ImportError("No such frozen object named %s" % name)
is_frozen_package = get_frozen_object
@__builtin__
def cache_all_file_modules():
"""
Caches all modules loaded during initialization through the normal import
mechanism on the language, so that any additional contexts created in the
same engine can re-use the cached CallTargets. See the _imp module for
details on the module caching.
"""
import sys
for k,v in sys.modules.items():
if hasattr(v, "__file__"):
if not graal_python_has_cached_code(k):
freeze_module(v, k)
@__builtin__
def _patch_package_paths(paths):
import sys
return _sub_package_paths(paths, sys.graal_python_stdlib_home, "!stdlib!")
@__builtin__
def _unpatch_package_paths(paths):
import sys
return _sub_package_paths(paths, "!stdlib!", sys.graal_python_stdlib_home)
@__builtin__
def _sub_package_paths(paths, fro, to):
if paths is not None:
return [p.replace(fro, to) for p in paths]
@__builtin__
def freeze_module(mod, key=None):
"""
Freeze a module under the optional key in the language cache so that it can
be shared across multiple contexts. If the module is a package in the
standard library path, it's __path__ is substituted to not leak the standard
library path to other contexts.
"""
import sys
path = _patch_package_paths(getattr(mod, "__path__", None))
name = key or mod.__name__
graal_python_cache_module_code(key, mod.__file__, path)
class CachedImportFinder:
@staticmethod
def find_spec(fullname, path, target=None):
path = _unpatch_package_paths(graal_python_get_cached_code_path(fullname))
if path is not None:
if len(path) > 0:
submodule_search_locations = path
is_package = True
else:
submodule_search_locations = None
is_package = False
spec = CachedImportFinder.ModuleSpec(fullname, CachedLoader, is_package=is_package)
# we're not setting origin, so the module won't have a __file__
# attribute and will show up as built-in
spec.submodule_search_locations = submodule_search_locations
return spec
class CachedLoader:
import sys
@staticmethod
def create_module(spec):
pass
@staticmethod
def exec_module(module):
modulename = module.__name__
exec(graal_python_get_cached_code(modulename), module.__dict__)
CachedLoader.sys.modules[modulename] = module | 0.570212 | 0.08438 |
from vive_pb2 import *
from datetime import datetime
import socket
import time
from utils import *
import sys
from vive_provider import *
collection = GlobalCollection()
try:
vp = Vive_provider()
# addr = None
# addr = '<broadcast>'
addr = '192.168.0.255'
server = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.settimeout(0.2)
server.bind(("", 44444))
pb_msg = GlobalMsg()
last = time.time()
i = 0
while True:
# Collecting messages at maximum speed
trackers = vp.getTrackersInfos()
pb_msg.Clear()
pb_msg = trackersInfos_to_GlobalMsg(trackers, i)
collection.messages.extend([pb_msg])
i += 1
# Can be removed to have full logs, to test
time.sleep(0.01);
# Only sending network messages at ~100Hz
if time.time()-last > 0.01:
last = time.time()
# Converting message to bytes for network
trackersInfos = pb_msg.SerializeToString()
# Output debug infos
print('---')
print('* Tracking %d devices' % len(trackers['trackers']))
for id in trackers['trackers']:
p = trackers['trackers'][id]['pose']
rpy = np.array(convert_to_euler(
trackers['trackers'][id]['pose_matrix']))*180.0/math.pi
print('- %s (%s)' %
(id, trackers['trackers'][id]['device_type']))
print(' - x: %g, y: %g, z: %g' % (p[0], p[1], p[2]))
print(' - roll: %g, pitch: %f, yaw: %g' % tuple(rpy))
print()
if addr is not None:
bytes_sent = server.sendto(trackersInfos, (addr, 37020))
except KeyboardInterrupt:
fname = datetime.now().strftime('%Y_%m_%d-%Hh%Mm%Ss')+'_vive.bin'
print('Interrupted, saving the collection to %s ...' % fname)
f = open('logs/'+fname, 'wb')
s = collection.SerializeToString()
f.write(s)
f.close() | vive_server.py |
from vive_pb2 import *
from datetime import datetime
import socket
import time
from utils import *
import sys
from vive_provider import *
collection = GlobalCollection()
try:
vp = Vive_provider()
# addr = None
# addr = '<broadcast>'
addr = '192.168.0.255'
server = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
server.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.settimeout(0.2)
server.bind(("", 44444))
pb_msg = GlobalMsg()
last = time.time()
i = 0
while True:
# Collecting messages at maximum speed
trackers = vp.getTrackersInfos()
pb_msg.Clear()
pb_msg = trackersInfos_to_GlobalMsg(trackers, i)
collection.messages.extend([pb_msg])
i += 1
# Can be removed to have full logs, to test
time.sleep(0.01);
# Only sending network messages at ~100Hz
if time.time()-last > 0.01:
last = time.time()
# Converting message to bytes for network
trackersInfos = pb_msg.SerializeToString()
# Output debug infos
print('---')
print('* Tracking %d devices' % len(trackers['trackers']))
for id in trackers['trackers']:
p = trackers['trackers'][id]['pose']
rpy = np.array(convert_to_euler(
trackers['trackers'][id]['pose_matrix']))*180.0/math.pi
print('- %s (%s)' %
(id, trackers['trackers'][id]['device_type']))
print(' - x: %g, y: %g, z: %g' % (p[0], p[1], p[2]))
print(' - roll: %g, pitch: %f, yaw: %g' % tuple(rpy))
print()
if addr is not None:
bytes_sent = server.sendto(trackersInfos, (addr, 37020))
except KeyboardInterrupt:
fname = datetime.now().strftime('%Y_%m_%d-%Hh%Mm%Ss')+'_vive.bin'
print('Interrupted, saving the collection to %s ...' % fname)
f = open('logs/'+fname, 'wb')
s = collection.SerializeToString()
f.write(s)
f.close() | 0.268654 | 0.076718 |
import unittest
try:
from normatrix.test.unittests.header.tests import TestHeader
from normatrix.test.unittests.function_line.tests import TestFunctionLine
from normatrix.test.unittests.columns.tests import TestColumns
from normatrix.test.unittests.comma.tests import TestComma
from normatrix.test.unittests.indent.tests import TestIndent
from normatrix.test.unittests.libc_func.tests import TestLibCFunc
from normatrix.test.unittests.nb_params.tests import TestNbParams
from normatrix.test.unittests.nested_branches.tests import TestNestedBranches
from normatrix.test.unittests.newline_at_end_of_file.tests import TestNewlineAtEndOfFile
from normatrix.test.unittests.number_function.tests import TestNumberFunction
from normatrix.test.unittests.operators.tests import TestOperators
from normatrix.test.unittests.parenthesis.tests import TestParenthesis
from normatrix.test.unittests.preprocessor.tests import TestPreprocessor
from normatrix.test.unittests.snake_case.tests import TestSnakeCase
from normatrix.test.unittests.solo_space.tests import TestSoloSpace
from normatrix.test.unittests.statements.tests import TestStatements
from normatrix.test.unittests.trailing_newline.tests import TestTrailingNewline
from normatrix.test.unittests.two_space.tests import TestTwoSpace
exe = "normatrix.test.tests"
except ModuleNotFoundError:
from src.normatrix.test.unittests.header.tests import TestHeader
from src.normatrix.test.unittests.function_line.tests import TestFunctionLine
from src.normatrix.test.unittests.columns.tests import TestColumns
from src.normatrix.test.unittests.comma.tests import TestComma
from src.normatrix.test.unittests.indent.tests import TestIndent
from src.normatrix.test.unittests.libc_func.tests import TestLibCFunc
from src.normatrix.test.unittests.nb_params.tests import TestNbParams
from src.normatrix.test.unittests.nested_branches.tests import TestNestedBranches
from src.normatrix.test.unittests.newline_at_end_of_file.tests import TestNewlineAtEndOfFile
from src.normatrix.test.unittests.number_function.tests import TestNumberFunction
from src.normatrix.test.unittests.operators.tests import TestOperators
from src.normatrix.test.unittests.parenthesis.tests import TestParenthesis
from src.normatrix.test.unittests.preprocessor.tests import TestPreprocessor
from src.normatrix.test.unittests.snake_case.tests import TestSnakeCase
from src.normatrix.test.unittests.solo_space.tests import TestSoloSpace
from src.normatrix.test.unittests.statements.tests import TestStatements
from src.normatrix.test.unittests.trailing_newline.tests import TestTrailingNewline
from src.normatrix.test.unittests.two_space.tests import TestTwoSpace
exe = "src.normatrix.test.tests"
import subprocess
def run_tests():
ret = subprocess.run(["python3", "-m", "unittest", exe, "-v"])
return ret.returncode
if __name__ == "__main__":
unittest.main() | src/normatrix/test/tests.py | import unittest
try:
from normatrix.test.unittests.header.tests import TestHeader
from normatrix.test.unittests.function_line.tests import TestFunctionLine
from normatrix.test.unittests.columns.tests import TestColumns
from normatrix.test.unittests.comma.tests import TestComma
from normatrix.test.unittests.indent.tests import TestIndent
from normatrix.test.unittests.libc_func.tests import TestLibCFunc
from normatrix.test.unittests.nb_params.tests import TestNbParams
from normatrix.test.unittests.nested_branches.tests import TestNestedBranches
from normatrix.test.unittests.newline_at_end_of_file.tests import TestNewlineAtEndOfFile
from normatrix.test.unittests.number_function.tests import TestNumberFunction
from normatrix.test.unittests.operators.tests import TestOperators
from normatrix.test.unittests.parenthesis.tests import TestParenthesis
from normatrix.test.unittests.preprocessor.tests import TestPreprocessor
from normatrix.test.unittests.snake_case.tests import TestSnakeCase
from normatrix.test.unittests.solo_space.tests import TestSoloSpace
from normatrix.test.unittests.statements.tests import TestStatements
from normatrix.test.unittests.trailing_newline.tests import TestTrailingNewline
from normatrix.test.unittests.two_space.tests import TestTwoSpace
exe = "normatrix.test.tests"
except ModuleNotFoundError:
from src.normatrix.test.unittests.header.tests import TestHeader
from src.normatrix.test.unittests.function_line.tests import TestFunctionLine
from src.normatrix.test.unittests.columns.tests import TestColumns
from src.normatrix.test.unittests.comma.tests import TestComma
from src.normatrix.test.unittests.indent.tests import TestIndent
from src.normatrix.test.unittests.libc_func.tests import TestLibCFunc
from src.normatrix.test.unittests.nb_params.tests import TestNbParams
from src.normatrix.test.unittests.nested_branches.tests import TestNestedBranches
from src.normatrix.test.unittests.newline_at_end_of_file.tests import TestNewlineAtEndOfFile
from src.normatrix.test.unittests.number_function.tests import TestNumberFunction
from src.normatrix.test.unittests.operators.tests import TestOperators
from src.normatrix.test.unittests.parenthesis.tests import TestParenthesis
from src.normatrix.test.unittests.preprocessor.tests import TestPreprocessor
from src.normatrix.test.unittests.snake_case.tests import TestSnakeCase
from src.normatrix.test.unittests.solo_space.tests import TestSoloSpace
from src.normatrix.test.unittests.statements.tests import TestStatements
from src.normatrix.test.unittests.trailing_newline.tests import TestTrailingNewline
from src.normatrix.test.unittests.two_space.tests import TestTwoSpace
exe = "src.normatrix.test.tests"
import subprocess
def run_tests():
ret = subprocess.run(["python3", "-m", "unittest", exe, "-v"])
return ret.returncode
if __name__ == "__main__":
unittest.main() | 0.369315 | 0.6852 |
import numpy
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def getData(filename):
with open(filename) as file:
data = [[], []]
titles = [c.strip() for c in file.readline().split(",")]
print titles
for line in file:
point = [c.strip() for c in line.split(",")]
data[0].append(float(point[0]))
data[1].append(float(point[1]))
return data
def findPeaks(yvalues, window=None):
max_peak_index = 0
decrease_counter = 0
increase_counter = 0
peak_indices = []
if window == None:
window = len(yvalues) / 100
for i in range(0, len(yvalues)):
if yvalues[i] > yvalues[max_peak_index]:
max_peak_index = i
decrease_counter = 0
increase_counter += 1
elif yvalues[i] < yvalues[max_peak_index]:
decrease_counter += 1
if decrease_counter > window and increase_counter > window:
peak_indices.append(max_peak_index)
max_peak_index = i
decrease_counter, increase_counter = 0, 0
if len(peak_indices) == 0:
return [None]
return peak_indices
def gauss(x, *p):
# A is height, mu is center, and sigma is sigma
A, mu, sigma = p
return A * numpy.exp(-(x - mu)**2 / (2. * sigma**2))
def fitPeak(xvalues, yvalues, peak_index, fit_window=None):
# returns a array where the 0th index contain the xvalues and the 1 index contains the yvalues
if fit_window == None:
fit_window = len(xvalues) / 50
# This is where the peak initialization is for each peak
p0 = [yvalues[peak_index], xvalues[peak_index], 1.]
xvalues_to_fit = xvalues[peak_index - fit_window:peak_index + fit_window]
yvalues_to_fit = yvalues[peak_index - fit_window:peak_index + fit_window]
coeff, var_matrix = curve_fit(gauss, xvalues_to_fit, yvalues_to_fit, p0=p0)
# print(var_matrix)
return [xvalues, gauss(xvalues, *coeff)]
def subtractAndZero(curve1, curve2):
if len(curve1) != len(curve2):
raise Exception("Curves are not the same length")
final = [0 for i in range(len(curve1))]
for i in range(len(curve1)):
val = curve1[i] - curve2[i]
if val <= 0:
continue
else:
final[i] = val
return final
def initialAllPeakFits(data, expected_peaks=10):
yvalue_copy = [y for y in data[1]]
first_peak_index = findPeaks(yvalue_copy)[0]
gaussian_peaks = []
color_index = 0
# colors = 'rgbcm'
while first_peak_index != None and len(gaussian_peaks) < expected_peaks:
# print(first_peak_index, (data[0][first_peak_index], data[1][first_peak_index]))
gauss_data = fitPeak(data[0], yvalue_copy, first_peak_index)
gaussian_peaks.append(gauss_data[1])
yvalue_copy = subtractAndZero(yvalue_copy, gauss_data[1])
first_peak_index = findPeaks(yvalue_copy)[0]
# plt.plot(data[0][index], data[1][index], marker='o', markersize=3, color='red')
# plt.plot(gauss_data[0], gauss_data[1], '{}--'.format(colors[color_index]), label='fit-with-bounds')
color_index += 1
return gaussian_peaks
def refinePeaksOnce(data, peak_fits):
new_fits = []
color_index = 0
# colors = 'rgbcm'
for i in range(len(peak_fits)):
yvalue_copy = [y for y in data[1]]
for j in range(len(peak_fits)):
if i != j:
yvalue_copy = subtractAndZero(yvalue_copy, peak_fits[j])
peak_index = max(findPeaks(yvalue_copy))
if peak_index == None:
continue
gauss_data = fitPeak(data[0], yvalue_copy, peak_index)
new_fits.append(gauss_data[1])
# plt.plot(gauss_data[0], gauss_data[1], '{}--'.format(colors[color_index]), label='fit-with-bounds')
# plt.plot(data[0], yvalue_copy, '{}--'.format(colors[color_index]), label='fit-with-bounds')
color_index += 1
return new_fits
def fitFitness(exp_yval, fit_yval):
squared_residuals = 0
for i in range(len(exp_yval)):
squared_residuals += (exp_yval[i] - fit_yval[i])**2
return squared_residuals
def fitPeaksFitness(data, peak_fits):
fit_sums = [0 for i in range(len(data[0]))]
for i in range(len(data[0])):
fit_sums[i] = sum([fit[i] for fit in peak_fits])
return fitFitness(data[1], fit_sums)
def peakDeconvolution(data, expected_peaks=10, max_iterations=100):
peak_fits = initialAllPeakFits(data, expected_peaks)
best_fit = fitPeaksFitness(data, peak_fits)
best_peaks = peak_fits
for i in range(max_iterations):
peak_fits = refinePeaksOnce(data, peak_fits)
fitness = fitPeaksFitness(data, peak_fits)
if fitness < best_fit:
best_fit = fitness
best_peaks = peak_fits
return best_peaks
def main():
data = getData("test-deconvolve.csv")
peak_fits = peakDeconvolution(data, 4)
# The following is for coloring each fit and the total, along with the data set
color_index = 0
colors = 'rgbcm'
fit_sums = [0 for i in range(len(data[0]))]
for fit in peak_fits:
plt.plot(data[0], fit, '{}--'.format(colors[color_index]), label='fit-with-bounds')
fit_sums = [fit_sums[i] + fit[i] for i in range(len(fit))]
color_index += 1
plt.plot(data[0], fit_sums, 'black')
plt.plot(data[0], data[1])
plt.show()
if __name__ == '__main__':
main() | deconvolve4.py | import numpy
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def getData(filename):
with open(filename) as file:
data = [[], []]
titles = [c.strip() for c in file.readline().split(",")]
print titles
for line in file:
point = [c.strip() for c in line.split(",")]
data[0].append(float(point[0]))
data[1].append(float(point[1]))
return data
def findPeaks(yvalues, window=None):
max_peak_index = 0
decrease_counter = 0
increase_counter = 0
peak_indices = []
if window == None:
window = len(yvalues) / 100
for i in range(0, len(yvalues)):
if yvalues[i] > yvalues[max_peak_index]:
max_peak_index = i
decrease_counter = 0
increase_counter += 1
elif yvalues[i] < yvalues[max_peak_index]:
decrease_counter += 1
if decrease_counter > window and increase_counter > window:
peak_indices.append(max_peak_index)
max_peak_index = i
decrease_counter, increase_counter = 0, 0
if len(peak_indices) == 0:
return [None]
return peak_indices
def gauss(x, *p):
# A is height, mu is center, and sigma is sigma
A, mu, sigma = p
return A * numpy.exp(-(x - mu)**2 / (2. * sigma**2))
def fitPeak(xvalues, yvalues, peak_index, fit_window=None):
# returns a array where the 0th index contain the xvalues and the 1 index contains the yvalues
if fit_window == None:
fit_window = len(xvalues) / 50
# This is where the peak initialization is for each peak
p0 = [yvalues[peak_index], xvalues[peak_index], 1.]
xvalues_to_fit = xvalues[peak_index - fit_window:peak_index + fit_window]
yvalues_to_fit = yvalues[peak_index - fit_window:peak_index + fit_window]
coeff, var_matrix = curve_fit(gauss, xvalues_to_fit, yvalues_to_fit, p0=p0)
# print(var_matrix)
return [xvalues, gauss(xvalues, *coeff)]
def subtractAndZero(curve1, curve2):
if len(curve1) != len(curve2):
raise Exception("Curves are not the same length")
final = [0 for i in range(len(curve1))]
for i in range(len(curve1)):
val = curve1[i] - curve2[i]
if val <= 0:
continue
else:
final[i] = val
return final
def initialAllPeakFits(data, expected_peaks=10):
yvalue_copy = [y for y in data[1]]
first_peak_index = findPeaks(yvalue_copy)[0]
gaussian_peaks = []
color_index = 0
# colors = 'rgbcm'
while first_peak_index != None and len(gaussian_peaks) < expected_peaks:
# print(first_peak_index, (data[0][first_peak_index], data[1][first_peak_index]))
gauss_data = fitPeak(data[0], yvalue_copy, first_peak_index)
gaussian_peaks.append(gauss_data[1])
yvalue_copy = subtractAndZero(yvalue_copy, gauss_data[1])
first_peak_index = findPeaks(yvalue_copy)[0]
# plt.plot(data[0][index], data[1][index], marker='o', markersize=3, color='red')
# plt.plot(gauss_data[0], gauss_data[1], '{}--'.format(colors[color_index]), label='fit-with-bounds')
color_index += 1
return gaussian_peaks
def refinePeaksOnce(data, peak_fits):
new_fits = []
color_index = 0
# colors = 'rgbcm'
for i in range(len(peak_fits)):
yvalue_copy = [y for y in data[1]]
for j in range(len(peak_fits)):
if i != j:
yvalue_copy = subtractAndZero(yvalue_copy, peak_fits[j])
peak_index = max(findPeaks(yvalue_copy))
if peak_index == None:
continue
gauss_data = fitPeak(data[0], yvalue_copy, peak_index)
new_fits.append(gauss_data[1])
# plt.plot(gauss_data[0], gauss_data[1], '{}--'.format(colors[color_index]), label='fit-with-bounds')
# plt.plot(data[0], yvalue_copy, '{}--'.format(colors[color_index]), label='fit-with-bounds')
color_index += 1
return new_fits
def fitFitness(exp_yval, fit_yval):
squared_residuals = 0
for i in range(len(exp_yval)):
squared_residuals += (exp_yval[i] - fit_yval[i])**2
return squared_residuals
def fitPeaksFitness(data, peak_fits):
fit_sums = [0 for i in range(len(data[0]))]
for i in range(len(data[0])):
fit_sums[i] = sum([fit[i] for fit in peak_fits])
return fitFitness(data[1], fit_sums)
def peakDeconvolution(data, expected_peaks=10, max_iterations=100):
peak_fits = initialAllPeakFits(data, expected_peaks)
best_fit = fitPeaksFitness(data, peak_fits)
best_peaks = peak_fits
for i in range(max_iterations):
peak_fits = refinePeaksOnce(data, peak_fits)
fitness = fitPeaksFitness(data, peak_fits)
if fitness < best_fit:
best_fit = fitness
best_peaks = peak_fits
return best_peaks
def main():
data = getData("test-deconvolve.csv")
peak_fits = peakDeconvolution(data, 4)
# The following is for coloring each fit and the total, along with the data set
color_index = 0
colors = 'rgbcm'
fit_sums = [0 for i in range(len(data[0]))]
for fit in peak_fits:
plt.plot(data[0], fit, '{}--'.format(colors[color_index]), label='fit-with-bounds')
fit_sums = [fit_sums[i] + fit[i] for i in range(len(fit))]
color_index += 1
plt.plot(data[0], fit_sums, 'black')
plt.plot(data[0], data[1])
plt.show()
if __name__ == '__main__':
main() | 0.38168 | 0.629746 |
import os
import warnings
from importlib import reload
import environ
import requests
import sys
from corsheaders.defaults import default_headers
from app.utils import secret_key_gen
env = environ.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ENV = env('ENVIRONMENT', default='local')
if ENV not in ('local', 'dev', 'staging', 'production'):
warnings.warn('ENVIRONMENT env variable must be one of local, dev, staging or production')
if 'DJANGO_SECRET_KEY' not in os.environ:
secret_key_gen()
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
HOSTED_SEATS_LIMIT = int(os.environ.get('HOSTED_SEATS_LIMIT', 0))
# Google Analytics Configuration
GOOGLE_ANALYTICS_KEY = os.environ.get('GOOGLE_ANALYTICS_KEY', '')
GOOGLE_SERVICE_ACCOUNT = os.environ.get('GOOGLE_SERVICE_ACCOUNT')
if not GOOGLE_SERVICE_ACCOUNT:
warnings.warn("GOOGLE_SERVICE_ACCOUNT not configured, getting organisation usage will not work")
GA_TABLE_ID = os.environ.get('GA_TABLE_ID')
if not GA_TABLE_ID:
warnings.warn("GA_TABLE_ID not configured, getting organisation usage will not work")
INFLUXDB_TOKEN = env.str('INFLUXDB_TOKEN', default='')
INFLUXDB_BUCKET = env.str('INFLUXDB_BUCKET', default='')
INFLUXDB_URL = env.str('INFLUXDB_URL', default='')
INFLUXDB_ORG = env.str('INFLUXDB_ORG', default='')
if 'DJANGO_ALLOWED_HOSTS' in os.environ:
ALLOWED_HOSTS = os.environ['DJANGO_ALLOWED_HOSTS'].split(',')
else:
ALLOWED_HOSTS = []
INTERNAL_IPS = ['127.0.0.1',]
# In order to run a load balanced solution, we need to whitelist the internal ip
try:
internal_ip = requests.get('http://instance-data/latest/meta-data/local-ipv4').text
except requests.exceptions.ConnectionError:
pass
else:
ALLOWED_HOSTS.append(internal_ip)
del requests
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'django.contrib.sites',
'custom_auth',
'api',
'corsheaders',
'users',
'organisations',
'projects',
'environments',
'features',
'segments',
'e2etests',
'simple_history',
'debug_toolbar',
'drf_yasg',
'audit',
'permissions',
# 2FA
'trench',
# health check plugins
'health_check',
'health_check.db',
# Used for ordering models (e.g. FeatureSegment)
'ordered_model',
]
if GOOGLE_ANALYTICS_KEY or INFLUXDB_TOKEN:
INSTALLED_APPS.append('analytics')
SITE_ID = 1
# Initialise empty databases dict to be populated in environment settings
DATABASES = {}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'PAGE_SIZE': 10,
'UNICODE_JSON': False,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_THROTTLE_RATES': {
'login': '1/s'
}
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
if GOOGLE_ANALYTICS_KEY:
MIDDLEWARE.append('analytics.middleware.GoogleAnalyticsMiddleware')
if INFLUXDB_TOKEN:
MIDDLEWARE.append('analytics.middleware.InfluxDBMiddleware')
ALLOWED_ADMIN_IP_ADDRESSES = env.list('ALLOWED_ADMIN_IP_ADDRESSES', default=list())
if len(ALLOWED_ADMIN_IP_ADDRESSES) > 0:
warnings.warn('Restricting access to the admin site for ip addresses %s' % ', '.join(ALLOWED_ADMIN_IP_ADDRESSES))
MIDDLEWARE.append('app.middleware.AdminWhitelistMiddleware')
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, '../../static/')
# CORS settings
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = default_headers + (
'X-Environment-Key',
'X-E2E-Test-Auth-Token'
)
DEFAULT_FROM_EMAIL = "<EMAIL>"
SENDER_EMAIL = os.environ.get('SENDER_EMAIL', '<EMAIL>')
EMAIL_CONFIGURATION = {
# Invitations with name is anticipated to take two arguments. The persons name and the
# organisation name they are invited to.
'INVITE_SUBJECT_WITH_NAME': '%s has invited you to join the organisation \'%s\' on Bullet '
'Train',
# Invitations without a name is anticipated to take one arguments. The organisation name they
# are invited to.
'INVITE_SUBJECT_WITHOUT_NAME': 'You have been invited to join the organisation \'%s\' on '
'Bullet Train',
# The email address invitations will be sent from.
'INVITE_FROM_EMAIL': SENDER_EMAIL,
}
AWS_SES_REGION_NAME = os.environ.get('AWS_SES_REGION_NAME')
AWS_SES_REGION_ENDPOINT = os.environ.get('AWS_SES_REGION_ENDPOINT')
# Used on init to create admin user for the site, update accordingly before hitting /auth/init
ALLOW_ADMIN_INITIATION_VIA_URL = True
ADMIN_EMAIL = "<EMAIL>"
ADMIN_INITIAL_PASSWORD = "password"
AUTH_USER_MODEL = 'users.FFAdminUser'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'none' # TODO: configure email verification
# SendGrid
EMAIL_BACKEND = os.environ.get('EMAIL_BACKEND', 'sgbackend.SendGridBackend')
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
if EMAIL_BACKEND == 'sgbackend.SendGridBackend' and not SENDGRID_API_KEY:
warnings.warn(
"`SENDGRID_API_KEY` has not been configured. You will not receive emails.")
SWAGGER_SETTINGS = {
'SHOW_REQUEST_HEADERS': True,
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
}
LOGIN_URL = "/admin/login/"
LOGOUT_URL = "/admin/logout/"
# Email associated with user that is used by front end for end to end testing purposes
FE_E2E_TEST_USER_EMAIL = "<EMAIL>"
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Chargebee
ENABLE_CHARGEBEE = os.environ.get('ENABLE_CHARGEBEE', False)
CHARGEBEE_API_KEY = os.environ.get('CHARGEBEE_API_KEY')
CHARGEBEE_SITE = os.environ.get('CHARGEBEE_SITE')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console_format': {
'format': '%(name)-12s %(levelname)-8s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console_format',
},
},
'loggers': {
'django': {
'level': 'INFO',
'handlers': ['console']
},
'': {
'level': 'DEBUG',
'handlers': ['console'],
},
}
}
CACHE_FLAGS_SECONDS = int(os.environ.get('CACHE_FLAGS_SECONDS', 0))
FLAGS_CACHE_LOCATION = 'environment-flags'
ENVIRONMENT_CACHE_LOCATION = 'environment-objects'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
},
ENVIRONMENT_CACHE_LOCATION: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ENVIRONMENT_CACHE_LOCATION
},
FLAGS_CACHE_LOCATION: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': FLAGS_CACHE_LOCATION,
}
}
LOG_LEVEL = env.str('LOG_LEVEL', 'WARNING')
TRENCH_AUTH = {
'FROM_EMAIL': DEFAULT_FROM_EMAIL,
'BACKUP_CODES_QUANTITY': 5,
'BACKUP_CODES_LENGTH': 10, # keep (quantity * length) under 200
'BACKUP_CODES_CHARACTERS': (
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
),
'DEFAULT_VALIDITY_PERIOD': 30,
'CONFIRM_BACKUP_CODES_REGENERATION_WITH_CODE': True,
'APPLICATION_ISSUER_NAME': 'app.bullet-train.io',
'MFA_METHODS': {
'app': {
'VERBOSE_NAME': 'TOTP App',
'VALIDITY_PERIOD': 60 * 10,
'USES_THIRD_PARTY_CLIENT': True,
'HANDLER': 'custom_auth.mfa.backends.application.CustomApplicationBackend',
},
},
}
USER_CREATE_PERMISSIONS = env.list('USER_CREATE_PERMISSIONS', default=['rest_framework.permissions.AllowAny'])
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': 'password-reset/confirm/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
'SERIALIZERS': {
'token': 'custom_auth.serializers.CustomTokenSerializer',
'user_create': 'custom_auth.serializers.CustomUserCreateSerializer',
'current_user': 'users.serializers.CustomCurrentUserSerializer',
},
'SET_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'HIDE_USERS': True,
'PERMISSIONS': {
'user': ['custom_auth.permissions.CurrentUser'],
'user_list': ['custom_auth.permissions.CurrentUser'],
'user_create': USER_CREATE_PERMISSIONS,
}
}
# Github OAuth credentials
GITHUB_CLIENT_ID = env.str('GITHUB_CLIENT_ID', '')
GITHUB_CLIENT_SECRET = env.str('GITHUB_CLIENT_SECRET', '') | src/app/settings/common.py | import os
import warnings
from importlib import reload
import environ
import requests
import sys
from corsheaders.defaults import default_headers
from app.utils import secret_key_gen
env = environ.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ENV = env('ENVIRONMENT', default='local')
if ENV not in ('local', 'dev', 'staging', 'production'):
warnings.warn('ENVIRONMENT env variable must be one of local, dev, staging or production')
if 'DJANGO_SECRET_KEY' not in os.environ:
secret_key_gen()
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
HOSTED_SEATS_LIMIT = int(os.environ.get('HOSTED_SEATS_LIMIT', 0))
# Google Analytics Configuration
GOOGLE_ANALYTICS_KEY = os.environ.get('GOOGLE_ANALYTICS_KEY', '')
GOOGLE_SERVICE_ACCOUNT = os.environ.get('GOOGLE_SERVICE_ACCOUNT')
if not GOOGLE_SERVICE_ACCOUNT:
warnings.warn("GOOGLE_SERVICE_ACCOUNT not configured, getting organisation usage will not work")
GA_TABLE_ID = os.environ.get('GA_TABLE_ID')
if not GA_TABLE_ID:
warnings.warn("GA_TABLE_ID not configured, getting organisation usage will not work")
INFLUXDB_TOKEN = env.str('INFLUXDB_TOKEN', default='')
INFLUXDB_BUCKET = env.str('INFLUXDB_BUCKET', default='')
INFLUXDB_URL = env.str('INFLUXDB_URL', default='')
INFLUXDB_ORG = env.str('INFLUXDB_ORG', default='')
if 'DJANGO_ALLOWED_HOSTS' in os.environ:
ALLOWED_HOSTS = os.environ['DJANGO_ALLOWED_HOSTS'].split(',')
else:
ALLOWED_HOSTS = []
INTERNAL_IPS = ['127.0.0.1',]
# In order to run a load balanced solution, we need to whitelist the internal ip
try:
internal_ip = requests.get('http://instance-data/latest/meta-data/local-ipv4').text
except requests.exceptions.ConnectionError:
pass
else:
ALLOWED_HOSTS.append(internal_ip)
del requests
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding("utf-8")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'djoser',
'django.contrib.sites',
'custom_auth',
'api',
'corsheaders',
'users',
'organisations',
'projects',
'environments',
'features',
'segments',
'e2etests',
'simple_history',
'debug_toolbar',
'drf_yasg',
'audit',
'permissions',
# 2FA
'trench',
# health check plugins
'health_check',
'health_check.db',
# Used for ordering models (e.g. FeatureSegment)
'ordered_model',
]
if GOOGLE_ANALYTICS_KEY or INFLUXDB_TOKEN:
INSTALLED_APPS.append('analytics')
SITE_ID = 1
# Initialise empty databases dict to be populated in environment settings
DATABASES = {}
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'PAGE_SIZE': 10,
'UNICODE_JSON': False,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'DEFAULT_THROTTLE_RATES': {
'login': '1/s'
}
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
if GOOGLE_ANALYTICS_KEY:
MIDDLEWARE.append('analytics.middleware.GoogleAnalyticsMiddleware')
if INFLUXDB_TOKEN:
MIDDLEWARE.append('analytics.middleware.InfluxDBMiddleware')
ALLOWED_ADMIN_IP_ADDRESSES = env.list('ALLOWED_ADMIN_IP_ADDRESSES', default=list())
if len(ALLOWED_ADMIN_IP_ADDRESSES) > 0:
warnings.warn('Restricting access to the admin site for ip addresses %s' % ', '.join(ALLOWED_ADMIN_IP_ADDRESSES))
MIDDLEWARE.append('app.middleware.AdminWhitelistMiddleware')
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, '../../static/')
# CORS settings
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_HEADERS = default_headers + (
'X-Environment-Key',
'X-E2E-Test-Auth-Token'
)
DEFAULT_FROM_EMAIL = "<EMAIL>"
SENDER_EMAIL = os.environ.get('SENDER_EMAIL', '<EMAIL>')
EMAIL_CONFIGURATION = {
# Invitations with name is anticipated to take two arguments. The persons name and the
# organisation name they are invited to.
'INVITE_SUBJECT_WITH_NAME': '%s has invited you to join the organisation \'%s\' on Bullet '
'Train',
# Invitations without a name is anticipated to take one arguments. The organisation name they
# are invited to.
'INVITE_SUBJECT_WITHOUT_NAME': 'You have been invited to join the organisation \'%s\' on '
'Bullet Train',
# The email address invitations will be sent from.
'INVITE_FROM_EMAIL': SENDER_EMAIL,
}
AWS_SES_REGION_NAME = os.environ.get('AWS_SES_REGION_NAME')
AWS_SES_REGION_ENDPOINT = os.environ.get('AWS_SES_REGION_ENDPOINT')
# Used on init to create admin user for the site, update accordingly before hitting /auth/init
ALLOW_ADMIN_INITIATION_VIA_URL = True
ADMIN_EMAIL = "<EMAIL>"
ADMIN_INITIAL_PASSWORD = "password"
AUTH_USER_MODEL = 'users.FFAdminUser'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'none' # TODO: configure email verification
# SendGrid
EMAIL_BACKEND = os.environ.get('EMAIL_BACKEND', 'sgbackend.SendGridBackend')
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
if EMAIL_BACKEND == 'sgbackend.SendGridBackend' and not SENDGRID_API_KEY:
warnings.warn(
"`SENDGRID_API_KEY` has not been configured. You will not receive emails.")
SWAGGER_SETTINGS = {
'SHOW_REQUEST_HEADERS': True,
'SECURITY_DEFINITIONS': {
'api_key': {
'type': 'apiKey',
'in': 'header',
'name': 'Authorization'
}
}
}
LOGIN_URL = "/admin/login/"
LOGOUT_URL = "/admin/logout/"
# Email associated with user that is used by front end for end to end testing purposes
FE_E2E_TEST_USER_EMAIL = "<EMAIL>"
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Chargebee
ENABLE_CHARGEBEE = os.environ.get('ENABLE_CHARGEBEE', False)
CHARGEBEE_API_KEY = os.environ.get('CHARGEBEE_API_KEY')
CHARGEBEE_SITE = os.environ.get('CHARGEBEE_SITE')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console_format': {
'format': '%(name)-12s %(levelname)-8s %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'console_format',
},
},
'loggers': {
'django': {
'level': 'INFO',
'handlers': ['console']
},
'': {
'level': 'DEBUG',
'handlers': ['console'],
},
}
}
CACHE_FLAGS_SECONDS = int(os.environ.get('CACHE_FLAGS_SECONDS', 0))
FLAGS_CACHE_LOCATION = 'environment-flags'
ENVIRONMENT_CACHE_LOCATION = 'environment-objects'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
},
ENVIRONMENT_CACHE_LOCATION: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ENVIRONMENT_CACHE_LOCATION
},
FLAGS_CACHE_LOCATION: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': FLAGS_CACHE_LOCATION,
}
}
LOG_LEVEL = env.str('LOG_LEVEL', 'WARNING')
TRENCH_AUTH = {
'FROM_EMAIL': DEFAULT_FROM_EMAIL,
'BACKUP_CODES_QUANTITY': 5,
'BACKUP_CODES_LENGTH': 10, # keep (quantity * length) under 200
'BACKUP_CODES_CHARACTERS': (
'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
),
'DEFAULT_VALIDITY_PERIOD': 30,
'CONFIRM_BACKUP_CODES_REGENERATION_WITH_CODE': True,
'APPLICATION_ISSUER_NAME': 'app.bullet-train.io',
'MFA_METHODS': {
'app': {
'VERBOSE_NAME': 'TOTP App',
'VALIDITY_PERIOD': 60 * 10,
'USES_THIRD_PARTY_CLIENT': True,
'HANDLER': 'custom_auth.mfa.backends.application.CustomApplicationBackend',
},
},
}
USER_CREATE_PERMISSIONS = env.list('USER_CREATE_PERMISSIONS', default=['rest_framework.permissions.AllowAny'])
DJOSER = {
'PASSWORD_RESET_CONFIRM_URL': 'password-reset/confirm/{uid}/{token}',
'SEND_ACTIVATION_EMAIL': False,
'SERIALIZERS': {
'token': 'custom_auth.serializers.CustomTokenSerializer',
'user_create': 'custom_auth.serializers.CustomUserCreateSerializer',
'current_user': 'users.serializers.CustomCurrentUserSerializer',
},
'SET_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
'HIDE_USERS': True,
'PERMISSIONS': {
'user': ['custom_auth.permissions.CurrentUser'],
'user_list': ['custom_auth.permissions.CurrentUser'],
'user_create': USER_CREATE_PERMISSIONS,
}
}
# Github OAuth credentials
GITHUB_CLIENT_ID = env.str('GITHUB_CLIENT_ID', '')
GITHUB_CLIENT_SECRET = env.str('GITHUB_CLIENT_SECRET', '') | 0.272702 | 0.050331 |
import time
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
# change these as desired - they're the pins connected from the
# SPI port on the ADC to the Cobbler
SPICLK = 18
SPIMISO = 23
SPIMOSI = 24
SPICS = 25
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
while True:
# read the analog pin
data_8_channel = [readadc(pin, SPICLK, SPIMOSI, SPIMISO, SPICS) for pin in range(8)]
print (data_8_channel)
#Easy way is below
"""
CH0 = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH1 = readadc(1, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH2 = readadc(2, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH3 = readadc(3, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH4 = readadc(4, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH5 = readadc(5, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH6 = readadc(6, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH7 = readadc(7, SPICLK, SPIMOSI, SPIMISO, SPICS)
print("CH0:{0},CH1:{1},CH2:{2},CH3:{3},CH4:{4},CH5:{5},CH6:{6},CH7:{7}".format(CH0,CH1,CH2,CH3,CH4,CH5,CH6,CH7))
""" | 3ADC with Raspberry Pi using MCP3008/adc_1/adc.py | import time
import os
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
# read SPI data from MCP3008 chip, 8 possible adc's (0 thru 7)
def readadc(adcnum, clockpin, mosipin, misopin, cspin):
if ((adcnum > 7) or (adcnum < 0)):
return -1
GPIO.output(cspin, True)
GPIO.output(clockpin, False) # start clock low
GPIO.output(cspin, False) # bring CS low
commandout = adcnum
commandout |= 0x18 # start bit + single-ended bit
commandout <<= 3 # we only need to send 5 bits here
for i in range(5):
if (commandout & 0x80):
GPIO.output(mosipin, True)
else:
GPIO.output(mosipin, False)
commandout <<= 1
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout = 0
# read in one empty bit, one null bit and 10 ADC bits
for i in range(12):
GPIO.output(clockpin, True)
GPIO.output(clockpin, False)
adcout <<= 1
if (GPIO.input(misopin)):
adcout |= 0x1
GPIO.output(cspin, True)
adcout >>= 1 # first bit is 'null' so drop it
return adcout
# change these as desired - they're the pins connected from the
# SPI port on the ADC to the Cobbler
SPICLK = 18
SPIMISO = 23
SPIMOSI = 24
SPICS = 25
# set up the SPI interface pins
GPIO.setup(SPIMOSI, GPIO.OUT)
GPIO.setup(SPIMISO, GPIO.IN)
GPIO.setup(SPICLK, GPIO.OUT)
GPIO.setup(SPICS, GPIO.OUT)
while True:
# read the analog pin
data_8_channel = [readadc(pin, SPICLK, SPIMOSI, SPIMISO, SPICS) for pin in range(8)]
print (data_8_channel)
#Easy way is below
"""
CH0 = readadc(0, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH1 = readadc(1, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH2 = readadc(2, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH3 = readadc(3, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH4 = readadc(4, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH5 = readadc(5, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH6 = readadc(6, SPICLK, SPIMOSI, SPIMISO, SPICS)
CH7 = readadc(7, SPICLK, SPIMOSI, SPIMISO, SPICS)
print("CH0:{0},CH1:{1},CH2:{2},CH3:{3},CH4:{4},CH5:{5},CH6:{6},CH7:{7}".format(CH0,CH1,CH2,CH3,CH4,CH5,CH6,CH7))
""" | 0.247987 | 0.120439 |
"""Tests for tensorflow_hub.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
# pylint: enable=g-direct-tensorflow-import
class TextEmbedding(tf.train.Checkpoint):
def __init__(self, returns_dict=False):
embeddings = [
("", [0, 0, 0, 0]), # OOV items are mapped to this embedding.
("hello world", [1, 2, 3, 4]),
("pair-programming", [5, 5, 5, 5]),
]
keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)
indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)
tbl_init = KeyValueTensorInitializer(keys, indices)
self.table = HashTable(tbl_init, 0)
self.weights = tf.Variable(
list([item[1] for item in embeddings]), dtype=tf.float32)
self.variables = [self.weights]
self.trainable_variables = self.variables
self._returns_dict = returns_dict
@tf.function(input_signature=[
tf.TensorSpec(dtype=tf.string, name="text", shape=[None])
])
def __call__(self, text_tensor):
indices_tensor = self.table.lookup(text_tensor)
embedding_tensor = tf.gather(self.weights, indices_tensor)
return dict(
outputs=embedding_tensor) if self._returns_dict else embedding_tensor
class TextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
super(TextEmbeddingColumnTest, self).setUp()
self.model = os.path.join(self.get_temp_dir(), "model")
tf.saved_model.save(TextEmbedding(), self.model)
self.model_returning_dicts = os.path.join(self.get_temp_dir(),
"model_returning_dicts")
tf.saved_model.save(
TextEmbedding(returns_dict=True), self.model_returning_dicts)
def testParents(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=False)
self.assertEqual(["text"], text_column.parents)
def testMakeParseExampleSpec(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=False)
parsing_spec = tf.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec,
{"text": tf.io.FixedLenFeature([1], dtype=tf.string)})
def testFeatureColumnsIsV2(self):
feature_column = hub.text_embedding_column_v2("text_a", self.model)
self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))
def testConfig(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=True)
config = text_column.get_config()
cloned_column = hub.feature_column_v2._TextEmbeddingColumnV2.from_config(
config)
self.assertEqual(cloned_column.module_path, text_column.module_path)
def testDenseFeaturesDirectly(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column_v2("text_a", self.model, trainable=False),
hub.text_embedding_column_v2("text_b", self.model, trainable=False),
]
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
self.assertAllEqual(feature_layer_out,
[[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeaturesInKeras(self):
features = {
"text": np.array(["hello world", "pair-programming"]),
}
label = np.int64([0, 1])
feature_columns = [
hub.text_embedding_column_v2("text", self.model, trainable=True),
]
input_features = dict(
text=tf.keras.layers.Input(name="text", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model = tf.keras.Model(inputs=input_features, outputs=logits)
model.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x=features, y=label, epochs=10)
self.assertAllEqual(model.predict(features["text"]).shape, [2, 1])
def testLoadingDifferentFeatureColumnsFails(self):
features = [
np.array(["hello world", "pair-programming"]),
np.array(["hello world", "pair-programming"]),
]
label = np.int64([0, 1])
feature_columns = [
hub.text_embedding_column_v2("text_1", self.model, trainable=True),
]
# Build the first model.
input_features = dict(
text_1=tf.keras.layers.Input(
name="text_1", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model_1 = tf.keras.Model(inputs=input_features, outputs=logits)
model_1.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model_1.fit(x=features, y=label, epochs=10)
checkpoint_path = os.path.join(self.get_temp_dir(), "checkpoints",
"checkpoint-1")
model_1.save_weights(checkpoint_path)
# Build the second model with feature columns that have different names.
feature_columns = [
hub.text_embedding_column_v2("text_2", self.model, trainable=True),
]
input_features = dict(
text_2=tf.keras.layers.Input(
name="text_2", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model_2 = tf.keras.Model(inputs=input_features, outputs=logits)
model_2.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
# Loading of checkpoints from the first model into the second model should
# fail.
with self.assertRaisesRegexp(AssertionError,
".*Some Python objects were not bound.*"):
model_2.load_weights(checkpoint_path).assert_consumed()
def testWorksWithTF2DnnClassifier(self):
comment_embedding_column = hub.text_embedding_column_v2(
"comment", self.model, trainable=False)
upvotes = tf.feature_column.numeric_column("upvotes")
feature_columns = [comment_embedding_column, upvotes]
estimator = tf.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
def input_fn():
features = {
"comment": np.array([
["the quick brown fox"],
["spam spam spam"],
]),
"upvotes": np.array([
[20],
[1],
]),
}
labels = np.array([[1], [0]])
return features, labels
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
def testWorksWithDNNEstimatorAndDataset(self):
self.skipTest("b/154115879 - needs more investigation for timeout.")
description_embeddings = hub.text_embedding_column_v2(
"descriptions", self.model_returning_dicts, output_key="outputs")
def input_fn():
features = dict(descriptions=tf.constant([["sentence"]]))
labels = tf.constant([[1]])
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
data_batches = dataset.repeat().take(30).batch(5)
return data_batches
estimator = tf.estimator.DNNEstimator(
model_dir=os.path.join(self.get_temp_dir(), "estimator_export"),
hidden_units=[10],
head=tf.estimator.BinaryClassHead(),
feature_columns=[description_embeddings])
estimator.train(input_fn=input_fn, max_steps=1)
if __name__ == "__main__":
# This test is only supported in TF2 mode and only in TensorFlow version that
# has the following symbol:
# tensorflow.python.feature_column.feature_column_v2.StateManager.has_resource
if tf.executing_eagerly() and hasattr(feature_column_v2.StateManager,
"has_resource"):
logging.info("Using TF version: %s", tf.__version__)
tf.test.main()
else:
logging.warning("Skipping running tests for TF Version: %s", tf.__version__) | tensorflow_hub/feature_column_v2_test.py | """Tests for tensorflow_hub.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import os
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_hub as hub
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
# pylint: enable=g-direct-tensorflow-import
class TextEmbedding(tf.train.Checkpoint):
def __init__(self, returns_dict=False):
embeddings = [
("", [0, 0, 0, 0]), # OOV items are mapped to this embedding.
("hello world", [1, 2, 3, 4]),
("pair-programming", [5, 5, 5, 5]),
]
keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)
indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)
tbl_init = KeyValueTensorInitializer(keys, indices)
self.table = HashTable(tbl_init, 0)
self.weights = tf.Variable(
list([item[1] for item in embeddings]), dtype=tf.float32)
self.variables = [self.weights]
self.trainable_variables = self.variables
self._returns_dict = returns_dict
@tf.function(input_signature=[
tf.TensorSpec(dtype=tf.string, name="text", shape=[None])
])
def __call__(self, text_tensor):
indices_tensor = self.table.lookup(text_tensor)
embedding_tensor = tf.gather(self.weights, indices_tensor)
return dict(
outputs=embedding_tensor) if self._returns_dict else embedding_tensor
class TextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
super(TextEmbeddingColumnTest, self).setUp()
self.model = os.path.join(self.get_temp_dir(), "model")
tf.saved_model.save(TextEmbedding(), self.model)
self.model_returning_dicts = os.path.join(self.get_temp_dir(),
"model_returning_dicts")
tf.saved_model.save(
TextEmbedding(returns_dict=True), self.model_returning_dicts)
def testParents(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=False)
self.assertEqual(["text"], text_column.parents)
def testMakeParseExampleSpec(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=False)
parsing_spec = tf.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec,
{"text": tf.io.FixedLenFeature([1], dtype=tf.string)})
def testFeatureColumnsIsV2(self):
feature_column = hub.text_embedding_column_v2("text_a", self.model)
self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))
def testConfig(self):
text_column = hub.text_embedding_column_v2(
"text", self.model, trainable=True)
config = text_column.get_config()
cloned_column = hub.feature_column_v2._TextEmbeddingColumnV2.from_config(
config)
self.assertEqual(cloned_column.module_path, text_column.module_path)
def testDenseFeaturesDirectly(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column_v2("text_a", self.model, trainable=False),
hub.text_embedding_column_v2("text_b", self.model, trainable=False),
]
feature_layer = tf.keras.layers.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
self.assertAllEqual(feature_layer_out,
[[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeaturesInKeras(self):
features = {
"text": np.array(["hello world", "pair-programming"]),
}
label = np.int64([0, 1])
feature_columns = [
hub.text_embedding_column_v2("text", self.model, trainable=True),
]
input_features = dict(
text=tf.keras.layers.Input(name="text", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model = tf.keras.Model(inputs=input_features, outputs=logits)
model.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x=features, y=label, epochs=10)
self.assertAllEqual(model.predict(features["text"]).shape, [2, 1])
def testLoadingDifferentFeatureColumnsFails(self):
features = [
np.array(["hello world", "pair-programming"]),
np.array(["hello world", "pair-programming"]),
]
label = np.int64([0, 1])
feature_columns = [
hub.text_embedding_column_v2("text_1", self.model, trainable=True),
]
# Build the first model.
input_features = dict(
text_1=tf.keras.layers.Input(
name="text_1", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model_1 = tf.keras.Model(inputs=input_features, outputs=logits)
model_1.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
model_1.fit(x=features, y=label, epochs=10)
checkpoint_path = os.path.join(self.get_temp_dir(), "checkpoints",
"checkpoint-1")
model_1.save_weights(checkpoint_path)
# Build the second model with feature columns that have different names.
feature_columns = [
hub.text_embedding_column_v2("text_2", self.model, trainable=True),
]
input_features = dict(
text_2=tf.keras.layers.Input(
name="text_2", shape=[None], dtype=tf.string))
dense_features = tf.keras.layers.DenseFeatures(feature_columns)
x = dense_features(input_features)
x = tf.keras.layers.Dense(16, activation="relu")(x)
logits = tf.keras.layers.Dense(1, activation="linear")(x)
model_2 = tf.keras.Model(inputs=input_features, outputs=logits)
model_2.compile(
optimizer="rmsprop", loss="binary_crossentropy", metrics=["accuracy"])
# Loading of checkpoints from the first model into the second model should
# fail.
with self.assertRaisesRegexp(AssertionError,
".*Some Python objects were not bound.*"):
model_2.load_weights(checkpoint_path).assert_consumed()
def testWorksWithTF2DnnClassifier(self):
comment_embedding_column = hub.text_embedding_column_v2(
"comment", self.model, trainable=False)
upvotes = tf.feature_column.numeric_column("upvotes")
feature_columns = [comment_embedding_column, upvotes]
estimator = tf.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
def input_fn():
features = {
"comment": np.array([
["the quick brown fox"],
["spam spam spam"],
]),
"upvotes": np.array([
[20],
[1],
]),
}
labels = np.array([[1], [0]])
return features, labels
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
def testWorksWithDNNEstimatorAndDataset(self):
self.skipTest("b/154115879 - needs more investigation for timeout.")
description_embeddings = hub.text_embedding_column_v2(
"descriptions", self.model_returning_dicts, output_key="outputs")
def input_fn():
features = dict(descriptions=tf.constant([["sentence"]]))
labels = tf.constant([[1]])
dataset = tf.data.Dataset.from_tensor_slices((features, labels))
data_batches = dataset.repeat().take(30).batch(5)
return data_batches
estimator = tf.estimator.DNNEstimator(
model_dir=os.path.join(self.get_temp_dir(), "estimator_export"),
hidden_units=[10],
head=tf.estimator.BinaryClassHead(),
feature_columns=[description_embeddings])
estimator.train(input_fn=input_fn, max_steps=1)
if __name__ == "__main__":
# This test is only supported in TF2 mode and only in TensorFlow version that
# has the following symbol:
# tensorflow.python.feature_column.feature_column_v2.StateManager.has_resource
if tf.executing_eagerly() and hasattr(feature_column_v2.StateManager,
"has_resource"):
logging.info("Using TF version: %s", tf.__version__)
tf.test.main()
else:
logging.warning("Skipping running tests for TF Version: %s", tf.__version__) | 0.926653 | 0.331918 |
import os
import asyncio
import time
import typing
import logging
import binascii
from aiohttp.web import Request, StreamResponse, HTTPRequestRangeNotSatisfiable
from lbry.utils import generate_id
from lbry.error import DownloadSDTimeoutError
from lbry.schema.mime_types import guess_media_type
from lbry.stream.downloader import StreamDownloader
from lbry.stream.descriptor import StreamDescriptor, sanitize_file_name
from lbry.stream.reflector.client import StreamReflectorClient
from lbry.extras.daemon.storage import StoredContentClaim
from lbry.blob import MAX_BLOB_SIZE
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.schema.claim import Claim
from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_info import BlobInfo
from lbry.dht.node import Node
from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.wallet.transaction import Transaction
log = logging.getLogger(__name__)
def _get_next_available_file_name(download_directory: str, file_name: str) -> str:
base_name, ext = os.path.splitext(os.path.basename(file_name))
i = 0
while os.path.isfile(os.path.join(download_directory, file_name)):
i += 1
file_name = "%s_%i%s" % (base_name, i, ext)
return file_name
async def get_next_available_file_name(loop: asyncio.AbstractEventLoop, download_directory: str, file_name: str) -> str:
return await loop.run_in_executor(None, _get_next_available_file_name, download_directory, file_name)
class ManagedStream:
STATUS_RUNNING = "running"
STATUS_STOPPED = "stopped"
STATUS_FINISHED = "finished"
SAVING_ID = 1
STREAMING_ID = 2
__slots__ = [
'loop',
'config',
'blob_manager',
'sd_hash',
'download_directory',
'_file_name',
'_added_on',
'_status',
'stream_claim_info',
'download_id',
'rowid',
'content_fee',
'purchase_receipt',
'downloader',
'analytics_manager',
'fully_reflected',
'file_output_task',
'delayed_stop_task',
'streaming_responses',
'streaming',
'_running',
'saving',
'finished_writing',
'started_writing',
'finished_write_attempt'
]
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
sd_hash: str, download_directory: typing.Optional[str] = None, file_name: typing.Optional[str] = None,
status: typing.Optional[str] = STATUS_STOPPED, claim: typing.Optional[StoredContentClaim] = None,
download_id: typing.Optional[str] = None, rowid: typing.Optional[int] = None,
descriptor: typing.Optional[StreamDescriptor] = None,
content_fee: typing.Optional['Transaction'] = None,
analytics_manager: typing.Optional['AnalyticsManager'] = None,
added_on: typing.Optional[int] = None):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.sd_hash = sd_hash
self.download_directory = download_directory
self._file_name = file_name
self._status = status
self.stream_claim_info = claim
self.download_id = download_id or binascii.hexlify(generate_id()).decode()
self.rowid = rowid
self.content_fee = content_fee
self.purchase_receipt = None
self._added_on = added_on
self.downloader = StreamDownloader(self.loop, self.config, self.blob_manager, sd_hash, descriptor)
self.analytics_manager = analytics_manager
self.fully_reflected = asyncio.Event(loop=self.loop)
self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.streaming = asyncio.Event(loop=self.loop)
self._running = asyncio.Event(loop=self.loop)
self.saving = asyncio.Event(loop=self.loop)
self.finished_writing = asyncio.Event(loop=self.loop)
self.started_writing = asyncio.Event(loop=self.loop)
self.finished_write_attempt = asyncio.Event(loop=self.loop)
@property
def descriptor(self) -> StreamDescriptor:
return self.downloader.descriptor
@property
def stream_hash(self) -> str:
return self.descriptor.stream_hash
@property
def file_name(self) -> typing.Optional[str]:
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
@property
def added_on(self) -> typing.Optional[int]:
return self._added_on
@property
def status(self) -> str:
return self._status
@property
def written_bytes(self) -> int:
return 0 if not self.output_file_exists else os.stat(self.full_path).st_size
@property
def completed(self):
return self.written_bytes >= self.descriptor.lower_bound_decrypted_length()
@property
def stream_url(self):
return f"http://{self.config.streaming_host}:{self.config.streaming_port}/stream/{self.sd_hash}"
async def update_status(self, status: str):
assert status in [self.STATUS_RUNNING, self.STATUS_STOPPED, self.STATUS_FINISHED]
self._status = status
await self.blob_manager.storage.change_file_status(self.stream_hash, status)
@property
def finished(self) -> bool:
return self.status == self.STATUS_FINISHED
@property
def running(self) -> bool:
return self.status == self.STATUS_RUNNING
@property
def claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_id
@property
def txid(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.txid
@property
def nout(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.nout
@property
def outpoint(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.outpoint
@property
def claim_height(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.height
@property
def channel_claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_claim_id
@property
def channel_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_name
@property
def claim_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_name
@property
def metadata(self) -> typing.Optional[typing.Dict]:
return None if not self.stream_claim_info else self.stream_claim_info.claim.stream.to_dict()
@property
def metadata_protobuf(self) -> bytes:
if self.stream_claim_info:
return binascii.hexlify(self.stream_claim_info.claim.to_bytes())
@property
def blobs_completed(self) -> int:
return sum([1 if b.blob_hash in self.blob_manager.completed_blob_hashes else 0
for b in self.descriptor.blobs[:-1]])
@property
def blobs_in_stream(self) -> int:
return len(self.descriptor.blobs) - 1
@property
def blobs_remaining(self) -> int:
return self.blobs_in_stream - self.blobs_completed
@property
def full_path(self) -> typing.Optional[str]:
return os.path.join(self.download_directory, os.path.basename(self.file_name)) \
if self.file_name and self.download_directory else None
@property
def output_file_exists(self):
return os.path.isfile(self.full_path) if self.full_path else False
@property
def mime_type(self):
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
@classmethod
async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
file_path: str, key: typing.Optional[bytes] = None,
iv_generator: typing.Optional[typing.Generator[bytes, None, None]] = None) -> 'ManagedStream':
"""
Generate a stream from a file and save it to the db
"""
descriptor = await StreamDescriptor.create_stream(
loop, blob_manager.blob_dir, file_path, key=key, iv_generator=iv_generator,
blob_completed_callback=blob_manager.blob_completed
)
await blob_manager.storage.store_stream(
blob_manager.get_blob(descriptor.sd_hash), descriptor
)
row_id = await blob_manager.storage.save_published_file(descriptor.stream_hash, os.path.basename(file_path),
os.path.dirname(file_path), 0)
return cls(loop, config, blob_manager, descriptor.sd_hash, os.path.dirname(file_path),
os.path.basename(file_path), status=cls.STATUS_FINISHED, rowid=row_id, descriptor=descriptor)
async def start(self, node: typing.Optional['Node'] = None, timeout: typing.Optional[float] = None,
save_now: bool = False):
timeout = timeout or self.config.download_timeout
if self._running.is_set():
return
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set()
try:
await asyncio.wait_for(self.downloader.start(node), timeout, loop=self.loop)
except asyncio.TimeoutError:
self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash)
if self.delayed_stop_task and not self.delayed_stop_task.done():
self.delayed_stop_task.cancel()
self.delayed_stop_task = self.loop.create_task(self._delayed_stop())
if not await self.blob_manager.storage.file_exists(self.sd_hash):
if save_now:
file_name, download_dir = self._file_name, self.download_directory
else:
file_name, download_dir = None, None
self._added_on = int(time.time())
self.rowid = await self.blob_manager.storage.save_downloaded_file(
self.stream_hash, file_name, download_dir, 0.0, added_on=self._added_on
)
if self.status != self.STATUS_RUNNING:
await self.update_status(self.STATUS_RUNNING)
async def stop(self, finished: bool = False):
"""
Stop any running save/stream tasks as well as the downloader and update the status in the database
"""
self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
async def _aiter_read_stream(self, start_blob_num: typing.Optional[int] = 0, connection_id: int = 0)\
-> typing.AsyncIterator[typing.Tuple['BlobInfo', bytes]]:
if start_blob_num >= len(self.descriptor.blobs[:-1]):
raise IndexError(start_blob_num)
for i, blob_info in enumerate(self.descriptor.blobs[start_blob_num:-1]):
assert i + start_blob_num == blob_info.blob_num
if connection_id == self.STREAMING_ID:
decrypted = await self.downloader.cached_read_blob(blob_info)
else:
decrypted = await self.downloader.read_blob(blob_info, connection_id)
yield (blob_info, decrypted)
async def stream_file(self, request: Request, node: typing.Optional['Node'] = None) -> StreamResponse:
log.info("stream file to browser for lbry://%s#%s (sd hash %s...)", self.claim_name, self.claim_id,
self.sd_hash[:6])
headers, size, skip_blobs, first_blob_start_offset = self._prepare_range_response_headers(
request.headers.get('range', 'bytes=0-')
)
await self.start(node)
response = StreamResponse(
status=206,
headers=headers
)
await response.prepare(request)
self.streaming_responses.append((request, response))
self.streaming.set()
wrote = 0
try:
async for blob_info, decrypted in self._aiter_read_stream(skip_blobs, connection_id=self.STREAMING_ID):
if not wrote:
decrypted = decrypted[first_blob_start_offset:]
if (blob_info.blob_num == len(self.descriptor.blobs) - 2) or (len(decrypted) + wrote >= size):
decrypted += (b'\x00' * (size - len(decrypted) - wrote - (skip_blobs * (MAX_BLOB_SIZE - 1))))
log.debug("sending browser final blob (%i/%i)", blob_info.blob_num + 1,
len(self.descriptor.blobs) - 1)
await response.write_eof(decrypted)
else:
log.debug("sending browser blob (%i/%i)", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
await response.write(decrypted)
wrote += len(decrypted)
log.info("sent browser %sblob %i/%i", "(final) " if response._eof_sent else "",
blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
if response._eof_sent:
break
return response
except ConnectionResetError:
log.warning("connection was reset after sending browser %i blob bytes", wrote)
raise asyncio.CancelledError("range request transport was reset")
finally:
response.force_close()
if (request, response) in self.streaming_responses:
self.streaming_responses.remove((request, response))
if not self.streaming_responses:
self.streaming.clear()
@staticmethod
def _write_decrypted_blob(handle: typing.IO, data: bytes):
handle.write(data)
handle.flush()
async def _save_file(self, output_path: str):
log.info("save file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id, self.sd_hash[:6],
output_path)
self.saving.set()
self.finished_write_attempt.clear()
self.finished_writing.clear()
self.started_writing.clear()
try:
with open(output_path, 'wb') as file_write_handle:
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
await self.loop.run_in_executor(None, self._write_decrypted_blob, file_write_handle, decrypted)
if not self.started_writing.is_set():
self.started_writing.set()
await self.update_status(ManagedStream.STATUS_FINISHED)
if self.analytics_manager:
self.loop.create_task(self.analytics_manager.send_download_finished(
self.download_id, self.claim_name, self.sd_hash
))
self.finished_writing.set()
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash)
except Exception as err:
if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path)
if isinstance(err, asyncio.TimeoutError):
self.downloader.stop()
await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, None, None
)
self._file_name, self.download_directory = None, None
await self.blob_manager.storage.clear_saved_file(self.stream_hash)
await self.update_status(self.STATUS_STOPPED)
return
elif not isinstance(err, asyncio.CancelledError):
log.exception("unexpected error encountered writing file for stream %s", self.sd_hash)
raise err
finally:
self.saving.clear()
self.finished_write_attempt.set()
async def save_file(self, file_name: typing.Optional[str] = None, download_directory: typing.Optional[str] = None,
node: typing.Optional['Node'] = None):
await self.start(node)
if self.file_output_task and not self.file_output_task.done(): # cancel an already running save task
self.file_output_task.cancel()
self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory:
raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name(
self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
)
await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name
)
await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED)
def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel()
self.file_output_task = None
while self.streaming_responses:
req, response = self.streaming_responses.pop()
response.force_close()
req.transport.close()
self.downloader.stop()
self._running.clear()
async def upload_to_reflector(self, host: str, port: int) -> typing.List[str]:
sent = []
protocol = StreamReflectorClient(self.blob_manager, self.descriptor)
try:
await self.loop.create_connection(lambda: protocol, host, port)
await protocol.send_handshake()
sent_sd, needed = await protocol.send_descriptor()
if sent_sd:
sent.append(self.sd_hash)
if not sent_sd and not needed:
if not self.fully_reflected.is_set():
self.fully_reflected.set()
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
return []
we_have = [
blob_hash for blob_hash in needed if blob_hash in self.blob_manager.completed_blob_hashes
]
log.info("we have %i/%i needed blobs needed by reflector for lbry://%s#%s", len(we_have), len(needed),
self.claim_name, self.claim_id)
for blob_hash in we_have:
await protocol.send_blob(blob_hash)
sent.append(blob_hash)
except (asyncio.TimeoutError, ValueError):
return sent
except ConnectionRefusedError:
return sent
finally:
if protocol.transport:
protocol.transport.close()
if not self.fully_reflected.is_set():
self.fully_reflected.set()
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
return sent
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
self.stream_claim_info = StoredContentClaim(
f"{claim_info['txid']}:{claim_info['nout']}", claim_info['claim_id'],
claim_info['name'], claim_info['amount'], claim_info['height'],
binascii.hexlify(claim.to_bytes()).decode(), claim.signing_channel_id, claim_info['address'],
claim_info['claim_sequence'], claim_info.get('channel_name')
)
async def update_content_claim(self, claim_info: typing.Optional[typing.Dict] = None):
if not claim_info:
claim_info = await self.blob_manager.storage.get_content_claim(self.stream_hash)
self.set_claim(claim_info, claim_info['value'])
async def _delayed_stop(self):
stalled_count = 0
while self._running.is_set():
if self.saving.is_set() or self.streaming.is_set():
stalled_count = 0
else:
stalled_count += 1
if stalled_count > 1:
log.info("stopping inactive download for lbry://%s#%s (%s...)", self.claim_name, self.claim_id,
self.sd_hash[:6])
await self.stop()
return
await asyncio.sleep(1, loop=self.loop)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range:
get_range = get_range.split('=')[1]
start, end = get_range.split('-')
size = 0
for blob in self.descriptor.blobs[:-1]:
size += blob.length - 1
if self.stream_claim_info and self.stream_claim_info.claim.stream.source.size:
size_from_claim = int(self.stream_claim_info.claim.stream.source.size)
if not size_from_claim <= size <= size_from_claim + 16:
raise ValueError("claim contains implausible stream size")
log.debug("using stream size from claim")
size = size_from_claim
elif self.stream_claim_info:
log.debug("estimating stream size")
start = int(start)
if not 0 <= start < size:
raise HTTPRequestRangeNotSatisfiable()
end = int(end) if end else size - 1
if end >= size:
raise HTTPRequestRangeNotSatisfiable()
skip_blobs = start // (MAX_BLOB_SIZE - 2) # -2 because ... dont remember
skip = skip_blobs * (MAX_BLOB_SIZE - 1) # -1 because
skip_first_blob = start - skip
start = skip_first_blob + skip
final_size = end - start + 1
headers = {
'Accept-Ranges': 'bytes',
'Content-Range': f'bytes {start}-{end}/{size}',
'Content-Length': str(final_size),
'Content-Type': self.mime_type
}
return headers, size, skip_blobs, skip_first_blob | lbry/lbry/stream/managed_stream.py | import os
import asyncio
import time
import typing
import logging
import binascii
from aiohttp.web import Request, StreamResponse, HTTPRequestRangeNotSatisfiable
from lbry.utils import generate_id
from lbry.error import DownloadSDTimeoutError
from lbry.schema.mime_types import guess_media_type
from lbry.stream.downloader import StreamDownloader
from lbry.stream.descriptor import StreamDescriptor, sanitize_file_name
from lbry.stream.reflector.client import StreamReflectorClient
from lbry.extras.daemon.storage import StoredContentClaim
from lbry.blob import MAX_BLOB_SIZE
if typing.TYPE_CHECKING:
from lbry.conf import Config
from lbry.schema.claim import Claim
from lbry.blob.blob_manager import BlobManager
from lbry.blob.blob_info import BlobInfo
from lbry.dht.node import Node
from lbry.extras.daemon.analytics import AnalyticsManager
from lbry.wallet.transaction import Transaction
log = logging.getLogger(__name__)
def _get_next_available_file_name(download_directory: str, file_name: str) -> str:
base_name, ext = os.path.splitext(os.path.basename(file_name))
i = 0
while os.path.isfile(os.path.join(download_directory, file_name)):
i += 1
file_name = "%s_%i%s" % (base_name, i, ext)
return file_name
async def get_next_available_file_name(loop: asyncio.AbstractEventLoop, download_directory: str, file_name: str) -> str:
return await loop.run_in_executor(None, _get_next_available_file_name, download_directory, file_name)
class ManagedStream:
STATUS_RUNNING = "running"
STATUS_STOPPED = "stopped"
STATUS_FINISHED = "finished"
SAVING_ID = 1
STREAMING_ID = 2
__slots__ = [
'loop',
'config',
'blob_manager',
'sd_hash',
'download_directory',
'_file_name',
'_added_on',
'_status',
'stream_claim_info',
'download_id',
'rowid',
'content_fee',
'purchase_receipt',
'downloader',
'analytics_manager',
'fully_reflected',
'file_output_task',
'delayed_stop_task',
'streaming_responses',
'streaming',
'_running',
'saving',
'finished_writing',
'started_writing',
'finished_write_attempt'
]
def __init__(self, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
sd_hash: str, download_directory: typing.Optional[str] = None, file_name: typing.Optional[str] = None,
status: typing.Optional[str] = STATUS_STOPPED, claim: typing.Optional[StoredContentClaim] = None,
download_id: typing.Optional[str] = None, rowid: typing.Optional[int] = None,
descriptor: typing.Optional[StreamDescriptor] = None,
content_fee: typing.Optional['Transaction'] = None,
analytics_manager: typing.Optional['AnalyticsManager'] = None,
added_on: typing.Optional[int] = None):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.sd_hash = sd_hash
self.download_directory = download_directory
self._file_name = file_name
self._status = status
self.stream_claim_info = claim
self.download_id = download_id or binascii.hexlify(generate_id()).decode()
self.rowid = rowid
self.content_fee = content_fee
self.purchase_receipt = None
self._added_on = added_on
self.downloader = StreamDownloader(self.loop, self.config, self.blob_manager, sd_hash, descriptor)
self.analytics_manager = analytics_manager
self.fully_reflected = asyncio.Event(loop=self.loop)
self.file_output_task: typing.Optional[asyncio.Task] = None
self.delayed_stop_task: typing.Optional[asyncio.Task] = None
self.streaming_responses: typing.List[typing.Tuple[Request, StreamResponse]] = []
self.streaming = asyncio.Event(loop=self.loop)
self._running = asyncio.Event(loop=self.loop)
self.saving = asyncio.Event(loop=self.loop)
self.finished_writing = asyncio.Event(loop=self.loop)
self.started_writing = asyncio.Event(loop=self.loop)
self.finished_write_attempt = asyncio.Event(loop=self.loop)
@property
def descriptor(self) -> StreamDescriptor:
return self.downloader.descriptor
@property
def stream_hash(self) -> str:
return self.descriptor.stream_hash
@property
def file_name(self) -> typing.Optional[str]:
return self._file_name or (self.descriptor.suggested_file_name if self.descriptor else None)
@property
def added_on(self) -> typing.Optional[int]:
return self._added_on
@property
def status(self) -> str:
return self._status
@property
def written_bytes(self) -> int:
return 0 if not self.output_file_exists else os.stat(self.full_path).st_size
@property
def completed(self):
return self.written_bytes >= self.descriptor.lower_bound_decrypted_length()
@property
def stream_url(self):
return f"http://{self.config.streaming_host}:{self.config.streaming_port}/stream/{self.sd_hash}"
async def update_status(self, status: str):
assert status in [self.STATUS_RUNNING, self.STATUS_STOPPED, self.STATUS_FINISHED]
self._status = status
await self.blob_manager.storage.change_file_status(self.stream_hash, status)
@property
def finished(self) -> bool:
return self.status == self.STATUS_FINISHED
@property
def running(self) -> bool:
return self.status == self.STATUS_RUNNING
@property
def claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_id
@property
def txid(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.txid
@property
def nout(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.nout
@property
def outpoint(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.outpoint
@property
def claim_height(self) -> typing.Optional[int]:
return None if not self.stream_claim_info else self.stream_claim_info.height
@property
def channel_claim_id(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_claim_id
@property
def channel_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.channel_name
@property
def claim_name(self) -> typing.Optional[str]:
return None if not self.stream_claim_info else self.stream_claim_info.claim_name
@property
def metadata(self) -> typing.Optional[typing.Dict]:
return None if not self.stream_claim_info else self.stream_claim_info.claim.stream.to_dict()
@property
def metadata_protobuf(self) -> bytes:
if self.stream_claim_info:
return binascii.hexlify(self.stream_claim_info.claim.to_bytes())
@property
def blobs_completed(self) -> int:
return sum([1 if b.blob_hash in self.blob_manager.completed_blob_hashes else 0
for b in self.descriptor.blobs[:-1]])
@property
def blobs_in_stream(self) -> int:
return len(self.descriptor.blobs) - 1
@property
def blobs_remaining(self) -> int:
return self.blobs_in_stream - self.blobs_completed
@property
def full_path(self) -> typing.Optional[str]:
return os.path.join(self.download_directory, os.path.basename(self.file_name)) \
if self.file_name and self.download_directory else None
@property
def output_file_exists(self):
return os.path.isfile(self.full_path) if self.full_path else False
@property
def mime_type(self):
return guess_media_type(os.path.basename(self.descriptor.suggested_file_name))[0]
@classmethod
async def create(cls, loop: asyncio.AbstractEventLoop, config: 'Config', blob_manager: 'BlobManager',
file_path: str, key: typing.Optional[bytes] = None,
iv_generator: typing.Optional[typing.Generator[bytes, None, None]] = None) -> 'ManagedStream':
"""
Generate a stream from a file and save it to the db
"""
descriptor = await StreamDescriptor.create_stream(
loop, blob_manager.blob_dir, file_path, key=key, iv_generator=iv_generator,
blob_completed_callback=blob_manager.blob_completed
)
await blob_manager.storage.store_stream(
blob_manager.get_blob(descriptor.sd_hash), descriptor
)
row_id = await blob_manager.storage.save_published_file(descriptor.stream_hash, os.path.basename(file_path),
os.path.dirname(file_path), 0)
return cls(loop, config, blob_manager, descriptor.sd_hash, os.path.dirname(file_path),
os.path.basename(file_path), status=cls.STATUS_FINISHED, rowid=row_id, descriptor=descriptor)
async def start(self, node: typing.Optional['Node'] = None, timeout: typing.Optional[float] = None,
save_now: bool = False):
timeout = timeout or self.config.download_timeout
if self._running.is_set():
return
log.info("start downloader for stream (sd hash: %s)", self.sd_hash)
self._running.set()
try:
await asyncio.wait_for(self.downloader.start(node), timeout, loop=self.loop)
except asyncio.TimeoutError:
self._running.clear()
raise DownloadSDTimeoutError(self.sd_hash)
if self.delayed_stop_task and not self.delayed_stop_task.done():
self.delayed_stop_task.cancel()
self.delayed_stop_task = self.loop.create_task(self._delayed_stop())
if not await self.blob_manager.storage.file_exists(self.sd_hash):
if save_now:
file_name, download_dir = self._file_name, self.download_directory
else:
file_name, download_dir = None, None
self._added_on = int(time.time())
self.rowid = await self.blob_manager.storage.save_downloaded_file(
self.stream_hash, file_name, download_dir, 0.0, added_on=self._added_on
)
if self.status != self.STATUS_RUNNING:
await self.update_status(self.STATUS_RUNNING)
async def stop(self, finished: bool = False):
"""
Stop any running save/stream tasks as well as the downloader and update the status in the database
"""
self.stop_tasks()
if (finished and self.status != self.STATUS_FINISHED) or self.status == self.STATUS_RUNNING:
await self.update_status(self.STATUS_FINISHED if finished else self.STATUS_STOPPED)
async def _aiter_read_stream(self, start_blob_num: typing.Optional[int] = 0, connection_id: int = 0)\
-> typing.AsyncIterator[typing.Tuple['BlobInfo', bytes]]:
if start_blob_num >= len(self.descriptor.blobs[:-1]):
raise IndexError(start_blob_num)
for i, blob_info in enumerate(self.descriptor.blobs[start_blob_num:-1]):
assert i + start_blob_num == blob_info.blob_num
if connection_id == self.STREAMING_ID:
decrypted = await self.downloader.cached_read_blob(blob_info)
else:
decrypted = await self.downloader.read_blob(blob_info, connection_id)
yield (blob_info, decrypted)
async def stream_file(self, request: Request, node: typing.Optional['Node'] = None) -> StreamResponse:
log.info("stream file to browser for lbry://%s#%s (sd hash %s...)", self.claim_name, self.claim_id,
self.sd_hash[:6])
headers, size, skip_blobs, first_blob_start_offset = self._prepare_range_response_headers(
request.headers.get('range', 'bytes=0-')
)
await self.start(node)
response = StreamResponse(
status=206,
headers=headers
)
await response.prepare(request)
self.streaming_responses.append((request, response))
self.streaming.set()
wrote = 0
try:
async for blob_info, decrypted in self._aiter_read_stream(skip_blobs, connection_id=self.STREAMING_ID):
if not wrote:
decrypted = decrypted[first_blob_start_offset:]
if (blob_info.blob_num == len(self.descriptor.blobs) - 2) or (len(decrypted) + wrote >= size):
decrypted += (b'\x00' * (size - len(decrypted) - wrote - (skip_blobs * (MAX_BLOB_SIZE - 1))))
log.debug("sending browser final blob (%i/%i)", blob_info.blob_num + 1,
len(self.descriptor.blobs) - 1)
await response.write_eof(decrypted)
else:
log.debug("sending browser blob (%i/%i)", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
await response.write(decrypted)
wrote += len(decrypted)
log.info("sent browser %sblob %i/%i", "(final) " if response._eof_sent else "",
blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
if response._eof_sent:
break
return response
except ConnectionResetError:
log.warning("connection was reset after sending browser %i blob bytes", wrote)
raise asyncio.CancelledError("range request transport was reset")
finally:
response.force_close()
if (request, response) in self.streaming_responses:
self.streaming_responses.remove((request, response))
if not self.streaming_responses:
self.streaming.clear()
@staticmethod
def _write_decrypted_blob(handle: typing.IO, data: bytes):
handle.write(data)
handle.flush()
async def _save_file(self, output_path: str):
log.info("save file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id, self.sd_hash[:6],
output_path)
self.saving.set()
self.finished_write_attempt.clear()
self.finished_writing.clear()
self.started_writing.clear()
try:
with open(output_path, 'wb') as file_write_handle:
async for blob_info, decrypted in self._aiter_read_stream(connection_id=self.SAVING_ID):
log.info("write blob %i/%i", blob_info.blob_num + 1, len(self.descriptor.blobs) - 1)
await self.loop.run_in_executor(None, self._write_decrypted_blob, file_write_handle, decrypted)
if not self.started_writing.is_set():
self.started_writing.set()
await self.update_status(ManagedStream.STATUS_FINISHED)
if self.analytics_manager:
self.loop.create_task(self.analytics_manager.send_download_finished(
self.download_id, self.claim_name, self.sd_hash
))
self.finished_writing.set()
log.info("finished saving file for lbry://%s#%s (sd hash %s...) -> %s", self.claim_name, self.claim_id,
self.sd_hash[:6], self.full_path)
await self.blob_manager.storage.set_saved_file(self.stream_hash)
except Exception as err:
if os.path.isfile(output_path):
log.warning("removing incomplete download %s for %s", output_path, self.sd_hash)
os.remove(output_path)
if isinstance(err, asyncio.TimeoutError):
self.downloader.stop()
await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, None, None
)
self._file_name, self.download_directory = None, None
await self.blob_manager.storage.clear_saved_file(self.stream_hash)
await self.update_status(self.STATUS_STOPPED)
return
elif not isinstance(err, asyncio.CancelledError):
log.exception("unexpected error encountered writing file for stream %s", self.sd_hash)
raise err
finally:
self.saving.clear()
self.finished_write_attempt.set()
async def save_file(self, file_name: typing.Optional[str] = None, download_directory: typing.Optional[str] = None,
node: typing.Optional['Node'] = None):
await self.start(node)
if self.file_output_task and not self.file_output_task.done(): # cancel an already running save task
self.file_output_task.cancel()
self.download_directory = download_directory or self.download_directory or self.config.download_dir
if not self.download_directory:
raise ValueError("no directory to download to")
if not (file_name or self._file_name or self.descriptor.suggested_file_name):
raise ValueError("no file name to download to")
if not os.path.isdir(self.download_directory):
log.warning("download directory '%s' does not exist, attempting to make it", self.download_directory)
os.mkdir(self.download_directory)
self._file_name = await get_next_available_file_name(
self.loop, self.download_directory,
file_name or self._file_name or sanitize_file_name(self.descriptor.suggested_file_name)
)
await self.blob_manager.storage.change_file_download_dir_and_file_name(
self.stream_hash, self.download_directory, self.file_name
)
await self.update_status(ManagedStream.STATUS_RUNNING)
self.file_output_task = self.loop.create_task(self._save_file(self.full_path))
try:
await asyncio.wait_for(self.started_writing.wait(), self.config.download_timeout, loop=self.loop)
except asyncio.TimeoutError:
log.warning("timeout starting to write data for lbry://%s#%s", self.claim_name, self.claim_id)
self.stop_tasks()
await self.update_status(ManagedStream.STATUS_STOPPED)
def stop_tasks(self):
if self.file_output_task and not self.file_output_task.done():
self.file_output_task.cancel()
self.file_output_task = None
while self.streaming_responses:
req, response = self.streaming_responses.pop()
response.force_close()
req.transport.close()
self.downloader.stop()
self._running.clear()
async def upload_to_reflector(self, host: str, port: int) -> typing.List[str]:
sent = []
protocol = StreamReflectorClient(self.blob_manager, self.descriptor)
try:
await self.loop.create_connection(lambda: protocol, host, port)
await protocol.send_handshake()
sent_sd, needed = await protocol.send_descriptor()
if sent_sd:
sent.append(self.sd_hash)
if not sent_sd and not needed:
if not self.fully_reflected.is_set():
self.fully_reflected.set()
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
return []
we_have = [
blob_hash for blob_hash in needed if blob_hash in self.blob_manager.completed_blob_hashes
]
log.info("we have %i/%i needed blobs needed by reflector for lbry://%s#%s", len(we_have), len(needed),
self.claim_name, self.claim_id)
for blob_hash in we_have:
await protocol.send_blob(blob_hash)
sent.append(blob_hash)
except (asyncio.TimeoutError, ValueError):
return sent
except ConnectionRefusedError:
return sent
finally:
if protocol.transport:
protocol.transport.close()
if not self.fully_reflected.is_set():
self.fully_reflected.set()
await self.blob_manager.storage.update_reflected_stream(self.sd_hash, f"{host}:{port}")
return sent
def set_claim(self, claim_info: typing.Dict, claim: 'Claim'):
self.stream_claim_info = StoredContentClaim(
f"{claim_info['txid']}:{claim_info['nout']}", claim_info['claim_id'],
claim_info['name'], claim_info['amount'], claim_info['height'],
binascii.hexlify(claim.to_bytes()).decode(), claim.signing_channel_id, claim_info['address'],
claim_info['claim_sequence'], claim_info.get('channel_name')
)
async def update_content_claim(self, claim_info: typing.Optional[typing.Dict] = None):
if not claim_info:
claim_info = await self.blob_manager.storage.get_content_claim(self.stream_hash)
self.set_claim(claim_info, claim_info['value'])
async def _delayed_stop(self):
stalled_count = 0
while self._running.is_set():
if self.saving.is_set() or self.streaming.is_set():
stalled_count = 0
else:
stalled_count += 1
if stalled_count > 1:
log.info("stopping inactive download for lbry://%s#%s (%s...)", self.claim_name, self.claim_id,
self.sd_hash[:6])
await self.stop()
return
await asyncio.sleep(1, loop=self.loop)
def _prepare_range_response_headers(self, get_range: str) -> typing.Tuple[typing.Dict[str, str], int, int, int]:
if '=' in get_range:
get_range = get_range.split('=')[1]
start, end = get_range.split('-')
size = 0
for blob in self.descriptor.blobs[:-1]:
size += blob.length - 1
if self.stream_claim_info and self.stream_claim_info.claim.stream.source.size:
size_from_claim = int(self.stream_claim_info.claim.stream.source.size)
if not size_from_claim <= size <= size_from_claim + 16:
raise ValueError("claim contains implausible stream size")
log.debug("using stream size from claim")
size = size_from_claim
elif self.stream_claim_info:
log.debug("estimating stream size")
start = int(start)
if not 0 <= start < size:
raise HTTPRequestRangeNotSatisfiable()
end = int(end) if end else size - 1
if end >= size:
raise HTTPRequestRangeNotSatisfiable()
skip_blobs = start // (MAX_BLOB_SIZE - 2) # -2 because ... dont remember
skip = skip_blobs * (MAX_BLOB_SIZE - 1) # -1 because
skip_first_blob = start - skip
start = skip_first_blob + skip
final_size = end - start + 1
headers = {
'Accept-Ranges': 'bytes',
'Content-Range': f'bytes {start}-{end}/{size}',
'Content-Length': str(final_size),
'Content-Type': self.mime_type
}
return headers, size, skip_blobs, skip_first_blob | 0.573201 | 0.103976 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('membership_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('given_names', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('organization_name', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('street_address', self.gf('django.db.models.fields.CharField')(max_length=128)),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
('post_office', self.gf('django.db.models.fields.CharField')(max_length=128)),
('country', self.gf('django.db.models.fields.CharField')(max_length=128)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=64)),
('sms', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('homepage', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('membership', ['Contact'])
# Adding model 'Membership'
db.create_table('membership_membership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('status', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('approved', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('public_memberlist', self.gf('django.db.models.fields.BooleanField')(default=False)),
('municipality', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('nationality', self.gf('django.db.models.fields.CharField')(max_length=128)),
('birth_year', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('organization_registration_number', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_set', null=True, to=orm['membership.Contact'])),
('billing_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='billing_set', null=True, to=orm['membership.Contact'])),
('tech_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='tech_contact_set', null=True, to=orm['membership.Contact'])),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='organization_set', null=True, to=orm['membership.Contact'])),
('extra_info', self.gf('django.db.models.fields.TextField')(blank=True)),
('locked', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('membership', ['Membership'])
# Adding model 'Fee'
db.create_table('membership_fee', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('sum', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
))
db.send_create_signal('membership', ['Fee'])
# Adding model 'BillingCycle'
db.create_table('membership_billingcycle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('membership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Membership'])),
('start', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 9, 28, 0, 0))),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('sum', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
('is_paid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('reference_number', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('membership', ['BillingCycle'])
# Adding model 'Bill'
db.create_table('membership_bill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('billingcycle', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.BillingCycle'])),
('reminder_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('due_date', self.gf('django.db.models.fields.DateTimeField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(default='E', max_length=1)),
))
db.send_create_signal('membership', ['Bill'])
# Adding model 'Payment'
db.create_table('membership_payment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('billingcycle', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.BillingCycle'], null=True)),
('ignore', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)),
('reference_number', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('message', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('transaction_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('payment_day', self.gf('django.db.models.fields.DateTimeField')()),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('payer_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('duplicate', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('membership', ['Payment'])
# Adding model 'ApplicationPoll'
db.create_table('membership_applicationpoll', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('membership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Membership'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('answer', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal('membership', ['ApplicationPoll'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('membership_contact')
# Deleting model 'Membership'
db.delete_table('membership_membership')
# Deleting model 'Fee'
db.delete_table('membership_fee')
# Deleting model 'BillingCycle'
db.delete_table('membership_billingcycle')
# Deleting model 'Bill'
db.delete_table('membership_bill')
# Deleting model 'Payment'
db.delete_table('membership_payment')
# Deleting model 'ApplicationPoll'
db.delete_table('membership_applicationpoll')
models = {
'membership.applicationpoll': {
'Meta': {'object_name': 'ApplicationPoll'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Membership']"})
},
'membership.bill': {
'Meta': {'object_name': 'Bill'},
'billingcycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.BillingCycle']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'reminder_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'E'", 'max_length': '1'})
},
'membership.billingcycle': {
'Meta': {'object_name': 'BillingCycle'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Membership']"}),
'reference_number': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 28, 0, 0)'}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'})
},
'membership.contact': {
'Meta': {'object_name': 'Contact'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'given_names': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'post_office': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'sms': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'membership.fee': {
'Meta': {'object_name': 'Fee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'membership.membership': {
'Meta': {'object_name': 'Membership'},
'approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'billing_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'birth_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'organization_registration_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'public_memberlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'tech_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tech_contact_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'membership.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'billingcycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.BillingCycle']", 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'payer_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'payment_day': ('django.db.models.fields.DateTimeField', [], {}),
'reference_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['membership'] | membership/migrations/0001_initial.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Contact'
db.create_table('membership_contact', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('given_names', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('organization_name', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('street_address', self.gf('django.db.models.fields.CharField')(max_length=128)),
('postal_code', self.gf('django.db.models.fields.CharField')(max_length=10)),
('post_office', self.gf('django.db.models.fields.CharField')(max_length=128)),
('country', self.gf('django.db.models.fields.CharField')(max_length=128)),
('phone', self.gf('django.db.models.fields.CharField')(max_length=64)),
('sms', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('homepage', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)),
))
db.send_create_signal('membership', ['Contact'])
# Adding model 'Membership'
db.create_table('membership_membership', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('status', self.gf('django.db.models.fields.CharField')(default='N', max_length=1)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('approved', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('public_memberlist', self.gf('django.db.models.fields.BooleanField')(default=False)),
('municipality', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)),
('nationality', self.gf('django.db.models.fields.CharField')(max_length=128)),
('birth_year', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('organization_registration_number', self.gf('django.db.models.fields.CharField')(max_length=15, null=True, blank=True)),
('person', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='person_set', null=True, to=orm['membership.Contact'])),
('billing_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='billing_set', null=True, to=orm['membership.Contact'])),
('tech_contact', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='tech_contact_set', null=True, to=orm['membership.Contact'])),
('organization', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='organization_set', null=True, to=orm['membership.Contact'])),
('extra_info', self.gf('django.db.models.fields.TextField')(blank=True)),
('locked', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('membership', ['Membership'])
# Adding model 'Fee'
db.create_table('membership_fee', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('start', self.gf('django.db.models.fields.DateTimeField')()),
('sum', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
))
db.send_create_signal('membership', ['Fee'])
# Adding model 'BillingCycle'
db.create_table('membership_billingcycle', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('membership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Membership'])),
('start', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 9, 28, 0, 0))),
('end', self.gf('django.db.models.fields.DateTimeField')()),
('sum', self.gf('django.db.models.fields.DecimalField')(max_digits=6, decimal_places=2)),
('is_paid', self.gf('django.db.models.fields.BooleanField')(default=False)),
('reference_number', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('membership', ['BillingCycle'])
# Adding model 'Bill'
db.create_table('membership_bill', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('billingcycle', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.BillingCycle'])),
('reminder_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('due_date', self.gf('django.db.models.fields.DateTimeField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('last_changed', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('type', self.gf('django.db.models.fields.CharField')(default='E', max_length=1)),
))
db.send_create_signal('membership', ['Bill'])
# Adding model 'Payment'
db.create_table('membership_payment', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('billingcycle', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.BillingCycle'], null=True)),
('ignore', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=64, null=True)),
('reference_number', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)),
('message', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)),
('transaction_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('payment_day', self.gf('django.db.models.fields.DateTimeField')()),
('amount', self.gf('django.db.models.fields.DecimalField')(max_digits=9, decimal_places=2)),
('type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('payer_name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('duplicate', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('membership', ['Payment'])
# Adding model 'ApplicationPoll'
db.create_table('membership_applicationpoll', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('membership', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['membership.Membership'])),
('date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('answer', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal('membership', ['ApplicationPoll'])
def backwards(self, orm):
# Deleting model 'Contact'
db.delete_table('membership_contact')
# Deleting model 'Membership'
db.delete_table('membership_membership')
# Deleting model 'Fee'
db.delete_table('membership_fee')
# Deleting model 'BillingCycle'
db.delete_table('membership_billingcycle')
# Deleting model 'Bill'
db.delete_table('membership_bill')
# Deleting model 'Payment'
db.delete_table('membership_payment')
# Deleting model 'ApplicationPoll'
db.delete_table('membership_applicationpoll')
models = {
'membership.applicationpoll': {
'Meta': {'object_name': 'ApplicationPoll'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Membership']"})
},
'membership.bill': {
'Meta': {'object_name': 'Bill'},
'billingcycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.BillingCycle']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'reminder_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'E'", 'max_length': '1'})
},
'membership.billingcycle': {
'Meta': {'object_name': 'BillingCycle'},
'end': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.Membership']"}),
'reference_number': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 9, 28, 0, 0)'}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'})
},
'membership.contact': {
'Meta': {'object_name': 'Contact'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'given_names': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'organization_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'post_office': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'sms': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'street_address': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'membership.fee': {
'Meta': {'object_name': 'Fee'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'membership.membership': {
'Meta': {'object_name': 'Membership'},
'approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'billing_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'billing_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'birth_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'extra_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_changed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'municipality': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'nationality': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'organization_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'organization_registration_number': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'public_memberlist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'N'", 'max_length': '1'}),
'tech_contact': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tech_contact_set'", 'null': 'True', 'to': "orm['membership.Contact']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'})
},
'membership.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '9', 'decimal_places': '2'}),
'billingcycle': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['membership.BillingCycle']", 'null': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'duplicate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignore': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'payer_name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'payment_day': ('django.db.models.fields.DateTimeField', [], {}),
'reference_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['membership'] | 0.619356 | 0.058777 |
from config import Config
import time
import re
import asyncio
progress_pattern = re.compile(
r'(frame|fps|size|time|bitrate|speed)\s*\=\s*(\S+)'
)
def parse_progress(line):
items = {
key: value for key, value in progress_pattern.findall(line)
}
if not items:
return None
return items
async def readlines(stream):
pattern = re.compile(br'[\r\n]+')
data = bytearray()
while not stream.at_eof():
lines = pattern.split(data)
data[:] = lines.pop(-1)
for line in lines:
yield line
data.extend(await stream.read(1024))
async def read_stderr(start, msg, process):
async for line in readlines(process.stderr):
line = line.decode('utf-8')
progress = parse_progress(line)
if progress:
#Progress bar logic
now = time.time()
diff = start-now
text = 'PROGRESS\n'
text += 'Size : {}\n'.format(progress['size'])
text += 'Time : {}\n'.format(progress['time'])
text += 'Speed : {}\n'.format(progress['speed'])
if round(diff % 5)==0:
try:
await msg.edit( text )
except:
pass
async def softmux_vid(vid_filename, sub_filename, msg):
start = time.time()
vid = Config.DOWNLOAD_DIR+'/'+vid_filename
sub = Config.DOWNLOAD_DIR+'/'+sub_filename
out_file = '.'.join(vid_filename.split('.')[:-1])
output = out_file+'1.mkv'
out_location = Config.DOWNLOAD_DIR+'/'+output
sub_ext = sub_filename.split('.').pop()
command = [
'ffmpeg','-hide_banner',
'-i',vid,
'-i',sub,
'-map','1:0','-map','0',
'-disposition:s:0','default',
'-c:v','copy',
'-c:a','copy',
'-c:s',sub_ext,
'-y',out_location
]
process = await asyncio.create_subprocess_exec(
*command,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# https://github.com/jonghwanhyeon/python-ffmpeg/blob/ccfbba93c46dc0d2cafc1e40ecb71ebf3b5587d2/ffmpeg/ffmpeg.py#L114
await asyncio.wait([
read_stderr(start,msg, process),
process.wait(),
])
if process.returncode == 0:
await msg.edit('Muxing Completed Successfully!\n\nTime taken : {} seconds'.format(round(start-time.time())))
else:
await msg.edit('An Error occured while Muxing!')
return False
time.sleep(2)
return output
async def hardmux_vid(vid_filename, sub_filename, msg):
start = time.time()
vid = Config.DOWNLOAD_DIR+'/'+vid_filename
sub = Config.DOWNLOAD_DIR+'/'+sub_filename
out_file = '.'.join(vid_filename.split('.')[:-1])
output = out_file+'1.mp4'
out_location = Config.DOWNLOAD_DIR+'/'+output
command = [
'ffmpeg','-hide_banner',
'-i',vid,
'-vf',"subtitles="+sub+":fontsdir=fonts/font:force_style='Fontname=B Titr,Fontsize=28,PrimaryColour=&H0000FFFF'", #,SecondaryColour=&H0300FFFF'", #,OutlineColour=&H00000000,BackColour=&H02000000,ScaleX=100,ScaleY=100,BorderStyle=1,Outline=1,Alignment=2,MarginL=10,MarginR=10,MarginV=10,Encoding=1'",
'-c:v','h264',
'-map','0:v:0',
'-map','0:a:0?',
#'-preset','ultrafast',
'-y',out_location
]
process = await asyncio.create_subprocess_exec(
*command,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# https://github.com/jonghwanhyeon/python-ffmpeg/blob/ccfbba93c46dc0d2cafc1e40ecb71ebf3b5587d2/ffmpeg/ffmpeg.py#L114
await asyncio.wait([
read_stderr(start,msg, process),
process.wait(),
])
if process.returncode == 0:
await msg.edit('Muxing Completed Successfully!\n\nTime taken : {} seconds'.format(round(start-time.time())))
else:
await msg.edit('An Error occured while Muxing!')
return False
time.sleep(2)
return output | helper_func/mux.py | from config import Config
import time
import re
import asyncio
progress_pattern = re.compile(
r'(frame|fps|size|time|bitrate|speed)\s*\=\s*(\S+)'
)
def parse_progress(line):
items = {
key: value for key, value in progress_pattern.findall(line)
}
if not items:
return None
return items
async def readlines(stream):
pattern = re.compile(br'[\r\n]+')
data = bytearray()
while not stream.at_eof():
lines = pattern.split(data)
data[:] = lines.pop(-1)
for line in lines:
yield line
data.extend(await stream.read(1024))
async def read_stderr(start, msg, process):
async for line in readlines(process.stderr):
line = line.decode('utf-8')
progress = parse_progress(line)
if progress:
#Progress bar logic
now = time.time()
diff = start-now
text = 'PROGRESS\n'
text += 'Size : {}\n'.format(progress['size'])
text += 'Time : {}\n'.format(progress['time'])
text += 'Speed : {}\n'.format(progress['speed'])
if round(diff % 5)==0:
try:
await msg.edit( text )
except:
pass
async def softmux_vid(vid_filename, sub_filename, msg):
start = time.time()
vid = Config.DOWNLOAD_DIR+'/'+vid_filename
sub = Config.DOWNLOAD_DIR+'/'+sub_filename
out_file = '.'.join(vid_filename.split('.')[:-1])
output = out_file+'1.mkv'
out_location = Config.DOWNLOAD_DIR+'/'+output
sub_ext = sub_filename.split('.').pop()
command = [
'ffmpeg','-hide_banner',
'-i',vid,
'-i',sub,
'-map','1:0','-map','0',
'-disposition:s:0','default',
'-c:v','copy',
'-c:a','copy',
'-c:s',sub_ext,
'-y',out_location
]
process = await asyncio.create_subprocess_exec(
*command,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# https://github.com/jonghwanhyeon/python-ffmpeg/blob/ccfbba93c46dc0d2cafc1e40ecb71ebf3b5587d2/ffmpeg/ffmpeg.py#L114
await asyncio.wait([
read_stderr(start,msg, process),
process.wait(),
])
if process.returncode == 0:
await msg.edit('Muxing Completed Successfully!\n\nTime taken : {} seconds'.format(round(start-time.time())))
else:
await msg.edit('An Error occured while Muxing!')
return False
time.sleep(2)
return output
async def hardmux_vid(vid_filename, sub_filename, msg):
start = time.time()
vid = Config.DOWNLOAD_DIR+'/'+vid_filename
sub = Config.DOWNLOAD_DIR+'/'+sub_filename
out_file = '.'.join(vid_filename.split('.')[:-1])
output = out_file+'1.mp4'
out_location = Config.DOWNLOAD_DIR+'/'+output
command = [
'ffmpeg','-hide_banner',
'-i',vid,
'-vf',"subtitles="+sub+":fontsdir=fonts/font:force_style='Fontname=B Titr,Fontsize=28,PrimaryColour=&H0000FFFF'", #,SecondaryColour=&H0300FFFF'", #,OutlineColour=&H00000000,BackColour=&H02000000,ScaleX=100,ScaleY=100,BorderStyle=1,Outline=1,Alignment=2,MarginL=10,MarginR=10,MarginV=10,Encoding=1'",
'-c:v','h264',
'-map','0:v:0',
'-map','0:a:0?',
#'-preset','ultrafast',
'-y',out_location
]
process = await asyncio.create_subprocess_exec(
*command,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
# https://github.com/jonghwanhyeon/python-ffmpeg/blob/ccfbba93c46dc0d2cafc1e40ecb71ebf3b5587d2/ffmpeg/ffmpeg.py#L114
await asyncio.wait([
read_stderr(start,msg, process),
process.wait(),
])
if process.returncode == 0:
await msg.edit('Muxing Completed Successfully!\n\nTime taken : {} seconds'.format(round(start-time.time())))
else:
await msg.edit('An Error occured while Muxing!')
return False
time.sleep(2)
return output | 0.355216 | 0.138928 |
"""Transformer evaluation script."""
import os
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset.engine as de
import mindspore.dataset.transforms.c_transforms as deC
from mindspore import context
from src.transformer_model import TransformerModel
from src.eval_config import cfg, transformer_net_cfg
def load_test_data(batch_size=1, data_file=None):
"""
Load test dataset
"""
ds = de.MindDataset(data_file,
columns_list=["source_eos_ids", "source_eos_mask",
"target_sos_ids", "target_sos_mask",
"target_eos_ids", "target_eos_mask"],
shuffle=False)
type_cast_op = deC.TypeCast(mstype.int32)
ds = ds.map(operations=type_cast_op, input_columns="source_eos_ids")
ds = ds.map(operations=type_cast_op, input_columns="source_eos_mask")
ds = ds.map(operations=type_cast_op, input_columns="target_sos_ids")
ds = ds.map(operations=type_cast_op, input_columns="target_sos_mask")
ds = ds.map(operations=type_cast_op, input_columns="target_eos_ids")
ds = ds.map(operations=type_cast_op, input_columns="target_eos_mask")
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
ds.channel_name = 'transformer'
return ds
class TransformerInferCell(nn.Cell):
"""
Encapsulation class of transformer network infer.
"""
def __init__(self, network):
super(TransformerInferCell, self).__init__(auto_prefix=False)
self.network = network
def construct(self,
source_ids,
source_mask):
predicted_ids = self.network(source_ids, source_mask)
return predicted_ids
def load_weights(model_path):
"""
Load checkpoint as parameter dict, support both npz file and mindspore checkpoint file.
"""
if model_path.endswith(".npz"):
ms_ckpt = np.load(model_path)
is_npz = True
else:
ms_ckpt = load_checkpoint(model_path)
is_npz = False
weights = {}
for msname in ms_ckpt:
infer_name = msname
if "tfm_decoder" in msname:
infer_name = "tfm_decoder.decoder." + infer_name
if is_npz:
weights[infer_name] = ms_ckpt[msname]
else:
weights[infer_name] = ms_ckpt[msname].data.asnumpy()
weights["tfm_decoder.decoder.tfm_embedding_lookup.embedding_table"] = \
weights["tfm_embedding_lookup.embedding_table"]
parameter_dict = {}
for name in weights:
parameter_dict[name] = Parameter(Tensor(weights[name]), name=name)
return parameter_dict
def run_transformer_eval():
"""
Transformer evaluation.
"""
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False,
device_id=device_id)
dataset = load_test_data(batch_size=transformer_net_cfg.batch_size, data_file=cfg.data_file)
tfm_model = TransformerModel(config=transformer_net_cfg, is_training=False, use_one_hot_embeddings=False)
parameter_dict = load_weights(cfg.model_file)
load_param_into_net(tfm_model, parameter_dict)
tfm_infer = TransformerInferCell(tfm_model)
model = Model(tfm_infer)
predictions = []
source_sents = []
target_sents = []
for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
source_sents.append(batch["source_eos_ids"])
target_sents.append(batch["target_eos_ids"])
source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
predicted_ids = model.predict(source_ids, source_mask)
predictions.append(predicted_ids.asnumpy())
# decode and write to file
f = open(cfg.output_file, 'w')
for batch_out in predictions:
for i in range(transformer_net_cfg.batch_size):
if batch_out.ndim == 3:
batch_out = batch_out[:, 0]
token_ids = [str(x) for x in batch_out[i].tolist()]
f.write(" ".join(token_ids) + "\n")
f.close()
if __name__ == "__main__":
run_transformer_eval() | built-in/MindSpore/Official/nlp/Transformer_for_MindSpore/eval.py | """Transformer evaluation script."""
import os
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
import mindspore.dataset.engine as de
import mindspore.dataset.transforms.c_transforms as deC
from mindspore import context
from src.transformer_model import TransformerModel
from src.eval_config import cfg, transformer_net_cfg
def load_test_data(batch_size=1, data_file=None):
"""
Load test dataset
"""
ds = de.MindDataset(data_file,
columns_list=["source_eos_ids", "source_eos_mask",
"target_sos_ids", "target_sos_mask",
"target_eos_ids", "target_eos_mask"],
shuffle=False)
type_cast_op = deC.TypeCast(mstype.int32)
ds = ds.map(operations=type_cast_op, input_columns="source_eos_ids")
ds = ds.map(operations=type_cast_op, input_columns="source_eos_mask")
ds = ds.map(operations=type_cast_op, input_columns="target_sos_ids")
ds = ds.map(operations=type_cast_op, input_columns="target_sos_mask")
ds = ds.map(operations=type_cast_op, input_columns="target_eos_ids")
ds = ds.map(operations=type_cast_op, input_columns="target_eos_mask")
# apply batch operations
ds = ds.batch(batch_size, drop_remainder=True)
ds.channel_name = 'transformer'
return ds
class TransformerInferCell(nn.Cell):
"""
Encapsulation class of transformer network infer.
"""
def __init__(self, network):
super(TransformerInferCell, self).__init__(auto_prefix=False)
self.network = network
def construct(self,
source_ids,
source_mask):
predicted_ids = self.network(source_ids, source_mask)
return predicted_ids
def load_weights(model_path):
"""
Load checkpoint as parameter dict, support both npz file and mindspore checkpoint file.
"""
if model_path.endswith(".npz"):
ms_ckpt = np.load(model_path)
is_npz = True
else:
ms_ckpt = load_checkpoint(model_path)
is_npz = False
weights = {}
for msname in ms_ckpt:
infer_name = msname
if "tfm_decoder" in msname:
infer_name = "tfm_decoder.decoder." + infer_name
if is_npz:
weights[infer_name] = ms_ckpt[msname]
else:
weights[infer_name] = ms_ckpt[msname].data.asnumpy()
weights["tfm_decoder.decoder.tfm_embedding_lookup.embedding_table"] = \
weights["tfm_embedding_lookup.embedding_table"]
parameter_dict = {}
for name in weights:
parameter_dict[name] = Parameter(Tensor(weights[name]), name=name)
return parameter_dict
def run_transformer_eval():
"""
Transformer evaluation.
"""
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False,
device_id=device_id)
dataset = load_test_data(batch_size=transformer_net_cfg.batch_size, data_file=cfg.data_file)
tfm_model = TransformerModel(config=transformer_net_cfg, is_training=False, use_one_hot_embeddings=False)
parameter_dict = load_weights(cfg.model_file)
load_param_into_net(tfm_model, parameter_dict)
tfm_infer = TransformerInferCell(tfm_model)
model = Model(tfm_infer)
predictions = []
source_sents = []
target_sents = []
for batch in dataset.create_dict_iterator(output_numpy=True, num_epochs=1):
source_sents.append(batch["source_eos_ids"])
target_sents.append(batch["target_eos_ids"])
source_ids = Tensor(batch["source_eos_ids"], mstype.int32)
source_mask = Tensor(batch["source_eos_mask"], mstype.int32)
predicted_ids = model.predict(source_ids, source_mask)
predictions.append(predicted_ids.asnumpy())
# decode and write to file
f = open(cfg.output_file, 'w')
for batch_out in predictions:
for i in range(transformer_net_cfg.batch_size):
if batch_out.ndim == 3:
batch_out = batch_out[:, 0]
token_ids = [str(x) for x in batch_out[i].tolist()]
f.write(" ".join(token_ids) + "\n")
f.close()
if __name__ == "__main__":
run_transformer_eval() | 0.816882 | 0.262503 |
import yaml
import unittest
import sys
from rhos_bootstrap import exceptions
from unittest import mock
sys.modules["dnf"] = mock.MagicMock()
sys.modules["dnf.cli.cli"] = mock.MagicMock()
sys.modules["dnf.exceptions"] = mock.MagicMock()
sys.modules["dnf.logging"] = mock.MagicMock()
sys.modules["libdnf"] = mock.MagicMock()
from rhos_bootstrap import distribution
DUMMY_CENTOS_DATA = """
---
distros:
centos:
mirror:
- http://mirror.centos.org
versions:
- 8-stream
versions:
master: &master_branch
distros: &distros_centos8
- centos8-stream
repos:
centos8-stream:
- highavailability
- powertools
ceph:
- octopus
delorean:
- current-tripleo
- deps
modules:
container-tools: rhel8
virt: rhel
python36: 3.6
wallaby: *master_branch
"""
DUMMY_RHEL_DATA = """
---
distros:
redhat:
versions:
- 8.2
versions:
"16.1":
distros:
- rhel8.2
repos:
rhel8.2:
- rhel-8-for-x86_64-baseos-eus-rpms
- rhel-8-for-x86_64-appstream-eus-rpms
- rhel-8-for-x86_64-highavailability-eus-rpms
ansible:
- ansible-2.9-for-rhel-8-x86_64-rpms
virt:
- advanced-virt-for-rhel-8-x86_64-rpms
ceph:
- rhceph-4-tools-for-rhel-8-x86_64-rpms
openstack:
- openstack-16.1-for-rhel-8-x86_64-rpms
satellite:
- satellite-tools-6.5-for-rhel-8-x86_64-rpms
openvswitch:
- fast-datapath-for-rhel-8-x86_64-rpms
modules:
container-tools: 2.0
virt: rhel
python36: 3.6
"""
class TestDistributionInfo(unittest.TestCase):
@mock.patch("os.path.exists")
def test_data(self, exists_mock):
exists_mock.return_value = True
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
with mock.patch(
"builtins.open", mock.mock_open(read_data=DUMMY_CENTOS_DATA)
) as open_mock:
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
open_mock.assert_called_with(
"/usr/share/rhos-bootstrap/centos.yaml", "r", encoding="utf-8"
)
self.assertEqual(obj.distro_data, dummy_data)
self.assertEqual(obj.distro_id, "centos")
self.assertEqual(obj.distro_version_id, "8")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertTrue(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "")
self.assertEqual(obj.distro_name, "CentOS Stream")
self.assertEqual(obj.distros, dummy_data["distros"])
self.assertEqual(obj.versions, dummy_data["versions"])
self.assertEqual(obj.distro_normalized_id, "centos8-stream")
self.assertEqual(str(obj), "centos8-stream")
with mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data"):
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
self.assertEqual(obj.distro_id, "rhel")
self.assertEqual(obj.distro_version_id, "8.2")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertFalse(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "2")
self.assertEqual(obj.distro_normalized_id, "rhel8.2")
with mock.patch("subprocess.Popen") as popen_mock:
proc_mock = mock.MagicMock()
comm_mock = mock.MagicMock()
comm_mock.return_value = ["rhel\n8.2\nRed Hat Enterprise Linux"]
proc_mock.__enter__.return_value.communicate = comm_mock
popen_mock.return_value = proc_mock
obj = distribution.DistributionInfo()
self.assertEqual(obj.distro_id, "rhel")
self.assertEqual(obj.distro_version_id, "8.2")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertFalse(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "2")
exists_mock.return_value = False
self.assertRaises(
exceptions.DistroNotSupported,
distribution.DistributionInfo,
"foo",
"bar",
"baz",
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_validate_distro(self, load_mock):
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = yaml.safe_load(DUMMY_CENTOS_DATA)
self.assertFalse(obj.validate_distro("doesnotexist"))
self.assertTrue(obj.validate_distro("master"))
obj._distro_version_id = "9"
self.assertFalse(obj.validate_distro("master"))
@mock.patch("rhos_bootstrap.utils.rhsm.SubscriptionManager.instance")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_validate_distro_rhel(self, load_mock, submgr_mock):
inst_mock = mock.MagicMock()
status_mock = mock.MagicMock()
release_mock = mock.MagicMock()
inst_mock.status = status_mock
inst_mock.release = release_mock
submgr_mock.return_value = inst_mock
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = yaml.safe_load(DUMMY_RHEL_DATA)
self.assertFalse(obj.validate_distro("doesnotexist"))
release_mock.return_value = (0, "Release: 8.2", "")
self.assertTrue(obj.validate_distro("16.1"))
release_mock.return_value = (0, "Release: 8.3", "")
self.assertRaises(
exceptions.SubscriptionManagerConfigError, obj.validate_distro, "16.1"
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_version(self, lock_mock):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
self.assertRaises(exceptions.VersionNotSupported, obj.get_version, "999")
self.assertEqual(obj.get_version("16.1"), dummy_data["versions"]["16.1"])
@mock.patch("rhos_bootstrap.utils.repos.TripleoDeloreanRepos")
@mock.patch("rhos_bootstrap.utils.repos.TripleoCephRepo")
@mock.patch("rhos_bootstrap.utils.repos.TripleoCentosRepo")
@mock.patch("rhos_bootstrap.utils.repos.RhsmRepo")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_contruct_repos(
self, load_mock, rhsm_mock, centos_mock, ceph_mock, dlrn_mock
):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
obj.construct_repo("rhel8.2", "16.1", "rhel-repo")
rhsm_mock.assert_called_once_with("rhel-repo")
obj.construct_repo("ceph", "16.1", "ceph-repo")
rhsm_mock.assert_called_with("ceph-repo")
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = dummy_data
obj.construct_repo("centos8-stream", "master", "centos-repo")
centos_mock.assert_called_once_with("centos8-stream", "centos-repo")
obj.construct_repo("ceph", "master", "ceph-repo")
ceph_mock.assert_called_once_with("centos8-stream", "ceph-repo")
obj.construct_repo("delorean", "master", "dlrn-repo")
dlrn_mock.assert_called_once_with("centos8", "master", "dlrn-repo")
self.assertRaises(
exceptions.RepositoryNotSupported, obj.construct_repo, "nope", "foo", "bar"
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo.construct_repo")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_repos(self, load_mock, const_repo):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
res = obj.get_repos("16.1")
self.assertEqual(len(res), 8)
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = dummy_data
res = obj.get_repos("master")
self.assertEqual(len(res), 5)
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8.2", "CentOS Stream")
obj._distro_data = dummy_data
res = obj.get_repos("master")
self.assertEqual(len(res), 3)
@mock.patch("rhos_bootstrap.utils.dnf.DnfModule")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_modules(self, load_mock, mod_mock):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
self.assertEqual(
len(obj.get_modules("16.1")), len(dummy_data["versions"]["16.1"]["modules"])
)
mod_calls = [
mock.call(*i) for i in dummy_data["versions"]["16.1"]["modules"].items()
]
self.assertEqual(mod_mock.mock_calls, mod_calls) | rhos_bootstrap/tests/test_distribution.py |
import yaml
import unittest
import sys
from rhos_bootstrap import exceptions
from unittest import mock
sys.modules["dnf"] = mock.MagicMock()
sys.modules["dnf.cli.cli"] = mock.MagicMock()
sys.modules["dnf.exceptions"] = mock.MagicMock()
sys.modules["dnf.logging"] = mock.MagicMock()
sys.modules["libdnf"] = mock.MagicMock()
from rhos_bootstrap import distribution
DUMMY_CENTOS_DATA = """
---
distros:
centos:
mirror:
- http://mirror.centos.org
versions:
- 8-stream
versions:
master: &master_branch
distros: &distros_centos8
- centos8-stream
repos:
centos8-stream:
- highavailability
- powertools
ceph:
- octopus
delorean:
- current-tripleo
- deps
modules:
container-tools: rhel8
virt: rhel
python36: 3.6
wallaby: *master_branch
"""
DUMMY_RHEL_DATA = """
---
distros:
redhat:
versions:
- 8.2
versions:
"16.1":
distros:
- rhel8.2
repos:
rhel8.2:
- rhel-8-for-x86_64-baseos-eus-rpms
- rhel-8-for-x86_64-appstream-eus-rpms
- rhel-8-for-x86_64-highavailability-eus-rpms
ansible:
- ansible-2.9-for-rhel-8-x86_64-rpms
virt:
- advanced-virt-for-rhel-8-x86_64-rpms
ceph:
- rhceph-4-tools-for-rhel-8-x86_64-rpms
openstack:
- openstack-16.1-for-rhel-8-x86_64-rpms
satellite:
- satellite-tools-6.5-for-rhel-8-x86_64-rpms
openvswitch:
- fast-datapath-for-rhel-8-x86_64-rpms
modules:
container-tools: 2.0
virt: rhel
python36: 3.6
"""
class TestDistributionInfo(unittest.TestCase):
@mock.patch("os.path.exists")
def test_data(self, exists_mock):
exists_mock.return_value = True
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
with mock.patch(
"builtins.open", mock.mock_open(read_data=DUMMY_CENTOS_DATA)
) as open_mock:
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
open_mock.assert_called_with(
"/usr/share/rhos-bootstrap/centos.yaml", "r", encoding="utf-8"
)
self.assertEqual(obj.distro_data, dummy_data)
self.assertEqual(obj.distro_id, "centos")
self.assertEqual(obj.distro_version_id, "8")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertTrue(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "")
self.assertEqual(obj.distro_name, "CentOS Stream")
self.assertEqual(obj.distros, dummy_data["distros"])
self.assertEqual(obj.versions, dummy_data["versions"])
self.assertEqual(obj.distro_normalized_id, "centos8-stream")
self.assertEqual(str(obj), "centos8-stream")
with mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data"):
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
self.assertEqual(obj.distro_id, "rhel")
self.assertEqual(obj.distro_version_id, "8.2")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertFalse(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "2")
self.assertEqual(obj.distro_normalized_id, "rhel8.2")
with mock.patch("subprocess.Popen") as popen_mock:
proc_mock = mock.MagicMock()
comm_mock = mock.MagicMock()
comm_mock.return_value = ["rhel\n8.2\nRed Hat Enterprise Linux"]
proc_mock.__enter__.return_value.communicate = comm_mock
popen_mock.return_value = proc_mock
obj = distribution.DistributionInfo()
self.assertEqual(obj.distro_id, "rhel")
self.assertEqual(obj.distro_version_id, "8.2")
self.assertEqual(obj.distro_major_version_id, "8")
self.assertFalse(obj.is_stream)
self.assertEqual(obj.distro_minor_version_id, "2")
exists_mock.return_value = False
self.assertRaises(
exceptions.DistroNotSupported,
distribution.DistributionInfo,
"foo",
"bar",
"baz",
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_validate_distro(self, load_mock):
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = yaml.safe_load(DUMMY_CENTOS_DATA)
self.assertFalse(obj.validate_distro("doesnotexist"))
self.assertTrue(obj.validate_distro("master"))
obj._distro_version_id = "9"
self.assertFalse(obj.validate_distro("master"))
@mock.patch("rhos_bootstrap.utils.rhsm.SubscriptionManager.instance")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_validate_distro_rhel(self, load_mock, submgr_mock):
inst_mock = mock.MagicMock()
status_mock = mock.MagicMock()
release_mock = mock.MagicMock()
inst_mock.status = status_mock
inst_mock.release = release_mock
submgr_mock.return_value = inst_mock
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = yaml.safe_load(DUMMY_RHEL_DATA)
self.assertFalse(obj.validate_distro("doesnotexist"))
release_mock.return_value = (0, "Release: 8.2", "")
self.assertTrue(obj.validate_distro("16.1"))
release_mock.return_value = (0, "Release: 8.3", "")
self.assertRaises(
exceptions.SubscriptionManagerConfigError, obj.validate_distro, "16.1"
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_version(self, lock_mock):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
self.assertRaises(exceptions.VersionNotSupported, obj.get_version, "999")
self.assertEqual(obj.get_version("16.1"), dummy_data["versions"]["16.1"])
@mock.patch("rhos_bootstrap.utils.repos.TripleoDeloreanRepos")
@mock.patch("rhos_bootstrap.utils.repos.TripleoCephRepo")
@mock.patch("rhos_bootstrap.utils.repos.TripleoCentosRepo")
@mock.patch("rhos_bootstrap.utils.repos.RhsmRepo")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_contruct_repos(
self, load_mock, rhsm_mock, centos_mock, ceph_mock, dlrn_mock
):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
obj.construct_repo("rhel8.2", "16.1", "rhel-repo")
rhsm_mock.assert_called_once_with("rhel-repo")
obj.construct_repo("ceph", "16.1", "ceph-repo")
rhsm_mock.assert_called_with("ceph-repo")
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = dummy_data
obj.construct_repo("centos8-stream", "master", "centos-repo")
centos_mock.assert_called_once_with("centos8-stream", "centos-repo")
obj.construct_repo("ceph", "master", "ceph-repo")
ceph_mock.assert_called_once_with("centos8-stream", "ceph-repo")
obj.construct_repo("delorean", "master", "dlrn-repo")
dlrn_mock.assert_called_once_with("centos8", "master", "dlrn-repo")
self.assertRaises(
exceptions.RepositoryNotSupported, obj.construct_repo, "nope", "foo", "bar"
)
@mock.patch("rhos_bootstrap.distribution.DistributionInfo.construct_repo")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_repos(self, load_mock, const_repo):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
res = obj.get_repos("16.1")
self.assertEqual(len(res), 8)
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8", "CentOS Stream")
obj._distro_data = dummy_data
res = obj.get_repos("master")
self.assertEqual(len(res), 5)
dummy_data = yaml.safe_load(DUMMY_CENTOS_DATA)
obj = distribution.DistributionInfo("centos", "8.2", "CentOS Stream")
obj._distro_data = dummy_data
res = obj.get_repos("master")
self.assertEqual(len(res), 3)
@mock.patch("rhos_bootstrap.utils.dnf.DnfModule")
@mock.patch("rhos_bootstrap.distribution.DistributionInfo._load_data")
def test_modules(self, load_mock, mod_mock):
dummy_data = yaml.safe_load(DUMMY_RHEL_DATA)
obj = distribution.DistributionInfo("rhel", "8.2", "Red Hat")
obj._distro_data = dummy_data
self.assertEqual(
len(obj.get_modules("16.1")), len(dummy_data["versions"]["16.1"]["modules"])
)
mod_calls = [
mock.call(*i) for i in dummy_data["versions"]["16.1"]["modules"].items()
]
self.assertEqual(mod_mock.mock_calls, mod_calls) | 0.523908 | 0.162845 |
from .fhirbase import fhirbase
class DiagnosticReport(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
resourceType: This is a DiagnosticReport resource
identifier: Identifiers assigned to this report by the performer or
other systems.
basedOn: Details concerning a test or procedure requested.
status: The status of the diagnostic report as a whole.
category: A code that classifies the clinical discipline, department
or diagnostic service that created the report (e.g. cardiology,
biochemistry, hematology, MRI). This is used for searching, sorting
and display purposes.
code: A code or name that describes this diagnostic report.
subject: The subject of the report. Usually, but not always, this is a
patient. However diagnostic services also perform analyses on
specimens collected from a variety of other sources.
context: The healthcare event (e.g. a patient and healthcare provider
interaction) which this DiagnosticReport per is about.
effectiveDateTime: The time or time-period the observed values are
related to. When the subject of the report is a patient, this is
usually either the time of the procedure or of specimen collection(s),
but very often the source of the date/time is not known, only the
date/time itself.
effectivePeriod: The time or time-period the observed values are
related to. When the subject of the report is a patient, this is
usually either the time of the procedure or of specimen collection(s),
but very often the source of the date/time is not known, only the
date/time itself.
issued: The date and time that this version of the report was released
from the source diagnostic service.
performer: Indicates who or what participated in producing the report.
specimen: Details about the specimens on which this diagnostic report
is based.
result: Observations that are part of this diagnostic report.
Observations can be simple name/value pairs (e.g. "atomic" results),
or they can be grouping observations that include references to other
members of the group (e.g. "panels").
imagingStudy: One or more links to full details of any imaging
performed during the diagnostic investigation. Typically, this is
imaging performed by DICOM enabled modalities, but this is not
required. A fully enabled PACS viewer can use this information to
provide views of the source images.
image: A list of key images associated with this report. The images
are generally created during the diagnostic process, and may be
directly of the patient, or of treated specimens (i.e. slides of
interest).
conclusion: Concise and clinically contextualized impression / summary
of the diagnostic report.
codedDiagnosis: Codes for the conclusion.
presentedForm: Rich text representation of the entire result as issued
by the diagnostic service. Multiple formats are allowed but they SHALL
be semantically equivalent.
"""
__name__ = 'DiagnosticReport'
def __init__(self, dict_values=None):
self.resourceType = 'DiagnosticReport'
# type: str
# possible values: DiagnosticReport
self.basedOn = None
# type: list
# reference to Reference: identifier
self.status = None
# type: str
# possible values: registered, partial, preliminary, final,
# amended, corrected, appended, cancelled, entered-in-error, unknown
self.category = None
# reference to CodeableConcept
self.code = None
# reference to CodeableConcept
self.subject = None
# reference to Reference: identifier
self.context = None
# reference to Reference: identifier
self.effectiveDateTime = None
# type: str
self.effectivePeriod = None
# reference to Period
self.issued = None
# type: str
self.performer = None
# type: list
# reference to DiagnosticReport_Performer
self.specimen = None
# type: list
# reference to Reference: identifier
self.result = None
# type: list
# reference to Reference: identifier
self.imagingStudy = None
# type: list
# reference to Reference: identifier
self.image = None
# type: list
# reference to DiagnosticReport_Image
self.conclusion = None
# type: str
self.codedDiagnosis = None
# type: list
# reference to CodeableConcept
self.presentedForm = None
# type: list
# reference to Attachment
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'registered', 'partial', 'preliminary', 'final', 'amended',
'corrected', 'appended', 'cancelled', 'entered-in-error', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'registered, partial, preliminary, final, amended, corrected, '
'appended, cancelled, entered-in-error, unknown'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'imagingStudy'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'basedOn'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'code'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'subject'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'codedDiagnosis'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'context'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'result'},
{'parent_entity': 'DiagnosticReport_Performer',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'performer'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'presentedForm'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'identifier'},
{'parent_entity': 'DiagnosticReport_Image',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'image'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'effectivePeriod'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'category'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'specimen'},
]
class DiagnosticReport_Performer(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
role: Describes the type of participation (e.g. a responsible party,
author, or verifier).
actor: The reference to the practitioner or organization involved in
producing the report. For example, the diagnostic service that is
responsible for issuing the report.
"""
__name__ = 'DiagnosticReport_Performer'
def __init__(self, dict_values=None):
self.role = None
# reference to CodeableConcept
self.actor = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport_Performer',
'child_variable': 'actor'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport_Performer',
'child_variable': 'role'},
]
class DiagnosticReport_Image(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
comment: A comment about the image. Typically, this is used to provide
an explanation for why the image is included, or to draw the viewer's
attention to important features.
link: Reference to the image source.
"""
__name__ = 'DiagnosticReport_Image'
def __init__(self, dict_values=None):
self.comment = None
# type: str
self.link = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport_Image',
'child_variable': 'link'},
] | cardea/fhir/DiagnosticReport.py | from .fhirbase import fhirbase
class DiagnosticReport(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
resourceType: This is a DiagnosticReport resource
identifier: Identifiers assigned to this report by the performer or
other systems.
basedOn: Details concerning a test or procedure requested.
status: The status of the diagnostic report as a whole.
category: A code that classifies the clinical discipline, department
or diagnostic service that created the report (e.g. cardiology,
biochemistry, hematology, MRI). This is used for searching, sorting
and display purposes.
code: A code or name that describes this diagnostic report.
subject: The subject of the report. Usually, but not always, this is a
patient. However diagnostic services also perform analyses on
specimens collected from a variety of other sources.
context: The healthcare event (e.g. a patient and healthcare provider
interaction) which this DiagnosticReport per is about.
effectiveDateTime: The time or time-period the observed values are
related to. When the subject of the report is a patient, this is
usually either the time of the procedure or of specimen collection(s),
but very often the source of the date/time is not known, only the
date/time itself.
effectivePeriod: The time or time-period the observed values are
related to. When the subject of the report is a patient, this is
usually either the time of the procedure or of specimen collection(s),
but very often the source of the date/time is not known, only the
date/time itself.
issued: The date and time that this version of the report was released
from the source diagnostic service.
performer: Indicates who or what participated in producing the report.
specimen: Details about the specimens on which this diagnostic report
is based.
result: Observations that are part of this diagnostic report.
Observations can be simple name/value pairs (e.g. "atomic" results),
or they can be grouping observations that include references to other
members of the group (e.g. "panels").
imagingStudy: One or more links to full details of any imaging
performed during the diagnostic investigation. Typically, this is
imaging performed by DICOM enabled modalities, but this is not
required. A fully enabled PACS viewer can use this information to
provide views of the source images.
image: A list of key images associated with this report. The images
are generally created during the diagnostic process, and may be
directly of the patient, or of treated specimens (i.e. slides of
interest).
conclusion: Concise and clinically contextualized impression / summary
of the diagnostic report.
codedDiagnosis: Codes for the conclusion.
presentedForm: Rich text representation of the entire result as issued
by the diagnostic service. Multiple formats are allowed but they SHALL
be semantically equivalent.
"""
__name__ = 'DiagnosticReport'
def __init__(self, dict_values=None):
self.resourceType = 'DiagnosticReport'
# type: str
# possible values: DiagnosticReport
self.basedOn = None
# type: list
# reference to Reference: identifier
self.status = None
# type: str
# possible values: registered, partial, preliminary, final,
# amended, corrected, appended, cancelled, entered-in-error, unknown
self.category = None
# reference to CodeableConcept
self.code = None
# reference to CodeableConcept
self.subject = None
# reference to Reference: identifier
self.context = None
# reference to Reference: identifier
self.effectiveDateTime = None
# type: str
self.effectivePeriod = None
# reference to Period
self.issued = None
# type: str
self.performer = None
# type: list
# reference to DiagnosticReport_Performer
self.specimen = None
# type: list
# reference to Reference: identifier
self.result = None
# type: list
# reference to Reference: identifier
self.imagingStudy = None
# type: list
# reference to Reference: identifier
self.image = None
# type: list
# reference to DiagnosticReport_Image
self.conclusion = None
# type: str
self.codedDiagnosis = None
# type: list
# reference to CodeableConcept
self.presentedForm = None
# type: list
# reference to Attachment
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'registered', 'partial', 'preliminary', 'final', 'amended',
'corrected', 'appended', 'cancelled', 'entered-in-error', 'unknown']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'registered, partial, preliminary, final, amended, corrected, '
'appended, cancelled, entered-in-error, unknown'))
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'imagingStudy'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'basedOn'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'code'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'subject'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'codedDiagnosis'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'context'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'result'},
{'parent_entity': 'DiagnosticReport_Performer',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'performer'},
{'parent_entity': 'Attachment',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'presentedForm'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'identifier'},
{'parent_entity': 'DiagnosticReport_Image',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'image'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'effectivePeriod'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport',
'child_variable': 'category'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport',
'child_variable': 'specimen'},
]
class DiagnosticReport_Performer(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
role: Describes the type of participation (e.g. a responsible party,
author, or verifier).
actor: The reference to the practitioner or organization involved in
producing the report. For example, the diagnostic service that is
responsible for issuing the report.
"""
__name__ = 'DiagnosticReport_Performer'
def __init__(self, dict_values=None):
self.role = None
# reference to CodeableConcept
self.actor = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport_Performer',
'child_variable': 'actor'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'DiagnosticReport_Performer',
'child_variable': 'role'},
]
class DiagnosticReport_Image(fhirbase):
"""
The findings and interpretation of diagnostic tests performed on
patients, groups of patients, devices, and locations, and/or specimens
derived from these. The report includes clinical context such as
requesting and provider information, and some mix of atomic results,
images, textual and coded interpretations, and formatted
representation of diagnostic reports.
Args:
comment: A comment about the image. Typically, this is used to provide
an explanation for why the image is included, or to draw the viewer's
attention to important features.
link: Reference to the image source.
"""
__name__ = 'DiagnosticReport_Image'
def __init__(self, dict_values=None):
self.comment = None
# type: str
self.link = None
# reference to Reference: identifier
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'DiagnosticReport_Image',
'child_variable': 'link'},
] | 0.877391 | 0.583203 |
import ast
import collections
import math
from radon.raw import analyze
from radon.visitors import ComplexityVisitor, HalsteadVisitor
# Halstead metrics
HalsteadReport = collections.namedtuple(
'HalsteadReport',
'h1 h2 N1 N2 vocabulary length '
'calculated_length volume '
'difficulty effort time bugs',
)
# `total` is a HalsteadReport for the entire scanned file, while `functions` is
# a list of `HalsteadReport`s for each function in the file.
Halstead = collections.namedtuple("Halstead", "total functions")
def h_visit(code):
'''Compile the code into an AST tree and then pass it to
:func:`~radon.metrics.h_visit_ast`.
'''
return h_visit_ast(ast.parse(code))
def h_visit_ast(ast_node):
'''
Visit the AST node using the :class:`~radon.visitors.HalsteadVisitor`
visitor. The results are `HalsteadReport` namedtuples with the following
fields:
* h1: the number of distinct operators
* h2: the number of distinct operands
* N1: the total number of operators
* N2: the total number of operands
* h: the vocabulary, i.e. h1 + h2
* N: the length, i.e. N1 + N2
* calculated_length: h1 * log2(h1) + h2 * log2(h2)
* volume: V = N * log2(h)
* difficulty: D = h1 / 2 * N2 / h2
* effort: E = D * V
* time: T = E / 18 seconds
* bugs: B = V / 3000 - an estimate of the errors in the implementation
The actual return of this function is a namedtuple with the following
fields:
* total: a `HalsteadReport` namedtuple for the entire scanned file
* functions: a list of `HalsteadReport`s for each toplevel function
Nested functions are not tracked.
'''
visitor = HalsteadVisitor.from_ast(ast_node)
total = halstead_visitor_report(visitor)
functions = [
(v.context, halstead_visitor_report(v))
for v in visitor.function_visitors
]
return Halstead(total, functions)
def halstead_visitor_report(visitor):
"""Return a HalsteadReport from a HalsteadVisitor instance."""
h1, h2 = visitor.distinct_operators, visitor.distinct_operands
N1, N2 = visitor.operators, visitor.operands
h = h1 + h2
N = N1 + N2
if h1 and h2:
length = h1 * math.log(h1, 2) + h2 * math.log(h2, 2)
else:
length = 0
volume = N * math.log(h, 2) if h != 0 else 0
difficulty = (h1 * N2) / float(2 * h2) if h2 != 0 else 0
effort = difficulty * volume
return HalsteadReport(
h1,
h2,
N1,
N2,
h,
N,
length,
volume,
difficulty,
effort,
effort / 18.0,
volume / 3000.0,
)
def mi_compute(halstead_volume, complexity, sloc, comments):
'''Compute the Maintainability Index (MI) given the Halstead Volume, the
Cyclomatic Complexity, the SLOC number and the number of comment lines.
Usually it is not used directly but instead :func:`~radon.metrics.mi_visit`
is preferred.
'''
if any(metric <= 0 for metric in (halstead_volume, sloc)):
return 100.0
sloc_scale = math.log(sloc)
volume_scale = math.log(halstead_volume)
comments_scale = math.sqrt(2.46 * math.radians(comments))
# Non-normalized MI
nn_mi = (
171
- 5.2 * volume_scale
- 0.23 * complexity
- 16.2 * sloc_scale
+ 50 * math.sin(comments_scale)
)
return min(max(0.0, nn_mi * 100 / 171.0), 100.0)
def mi_parameters(code, count_multi=True):
'''Given a source code snippet, compute the necessary parameters to
compute the Maintainability Index metric. These include:
* the Halstead Volume
* the Cyclomatic Complexity
* the number of LLOC (Logical Lines of Code)
* the percent of lines of comment
:param multi: If True, then count multiline strings as comment lines as
well. This is not always safe because Python multiline strings are not
always docstrings.
'''
ast_node = ast.parse(code)
raw = analyze(code)
comments_lines = raw.comments + (raw.multi if count_multi else 0)
comments = comments_lines / float(raw.sloc) * 100 if raw.sloc != 0 else 0
return (
h_visit_ast(ast_node).total.volume,
ComplexityVisitor.from_ast(ast_node).total_complexity,
raw.lloc,
comments,
)
def mi_visit(code, multi):
'''Visit the code and compute the Maintainability Index (MI) from it.'''
return mi_compute(*mi_parameters(code, multi))
def mi_rank(score):
r'''Rank the score with a letter:
* A if :math:`\text{score} > 19`;
* B if :math:`9 < \text{score} \le 19`;
* C if :math:`\text{score} \le 9`.
'''
return chr(65 + (9 - score >= 0) + (19 - score >= 0)) | radon/metrics.py | import ast
import collections
import math
from radon.raw import analyze
from radon.visitors import ComplexityVisitor, HalsteadVisitor
# Halstead metrics
HalsteadReport = collections.namedtuple(
'HalsteadReport',
'h1 h2 N1 N2 vocabulary length '
'calculated_length volume '
'difficulty effort time bugs',
)
# `total` is a HalsteadReport for the entire scanned file, while `functions` is
# a list of `HalsteadReport`s for each function in the file.
Halstead = collections.namedtuple("Halstead", "total functions")
def h_visit(code):
'''Compile the code into an AST tree and then pass it to
:func:`~radon.metrics.h_visit_ast`.
'''
return h_visit_ast(ast.parse(code))
def h_visit_ast(ast_node):
'''
Visit the AST node using the :class:`~radon.visitors.HalsteadVisitor`
visitor. The results are `HalsteadReport` namedtuples with the following
fields:
* h1: the number of distinct operators
* h2: the number of distinct operands
* N1: the total number of operators
* N2: the total number of operands
* h: the vocabulary, i.e. h1 + h2
* N: the length, i.e. N1 + N2
* calculated_length: h1 * log2(h1) + h2 * log2(h2)
* volume: V = N * log2(h)
* difficulty: D = h1 / 2 * N2 / h2
* effort: E = D * V
* time: T = E / 18 seconds
* bugs: B = V / 3000 - an estimate of the errors in the implementation
The actual return of this function is a namedtuple with the following
fields:
* total: a `HalsteadReport` namedtuple for the entire scanned file
* functions: a list of `HalsteadReport`s for each toplevel function
Nested functions are not tracked.
'''
visitor = HalsteadVisitor.from_ast(ast_node)
total = halstead_visitor_report(visitor)
functions = [
(v.context, halstead_visitor_report(v))
for v in visitor.function_visitors
]
return Halstead(total, functions)
def halstead_visitor_report(visitor):
"""Return a HalsteadReport from a HalsteadVisitor instance."""
h1, h2 = visitor.distinct_operators, visitor.distinct_operands
N1, N2 = visitor.operators, visitor.operands
h = h1 + h2
N = N1 + N2
if h1 and h2:
length = h1 * math.log(h1, 2) + h2 * math.log(h2, 2)
else:
length = 0
volume = N * math.log(h, 2) if h != 0 else 0
difficulty = (h1 * N2) / float(2 * h2) if h2 != 0 else 0
effort = difficulty * volume
return HalsteadReport(
h1,
h2,
N1,
N2,
h,
N,
length,
volume,
difficulty,
effort,
effort / 18.0,
volume / 3000.0,
)
def mi_compute(halstead_volume, complexity, sloc, comments):
'''Compute the Maintainability Index (MI) given the Halstead Volume, the
Cyclomatic Complexity, the SLOC number and the number of comment lines.
Usually it is not used directly but instead :func:`~radon.metrics.mi_visit`
is preferred.
'''
if any(metric <= 0 for metric in (halstead_volume, sloc)):
return 100.0
sloc_scale = math.log(sloc)
volume_scale = math.log(halstead_volume)
comments_scale = math.sqrt(2.46 * math.radians(comments))
# Non-normalized MI
nn_mi = (
171
- 5.2 * volume_scale
- 0.23 * complexity
- 16.2 * sloc_scale
+ 50 * math.sin(comments_scale)
)
return min(max(0.0, nn_mi * 100 / 171.0), 100.0)
def mi_parameters(code, count_multi=True):
'''Given a source code snippet, compute the necessary parameters to
compute the Maintainability Index metric. These include:
* the Halstead Volume
* the Cyclomatic Complexity
* the number of LLOC (Logical Lines of Code)
* the percent of lines of comment
:param multi: If True, then count multiline strings as comment lines as
well. This is not always safe because Python multiline strings are not
always docstrings.
'''
ast_node = ast.parse(code)
raw = analyze(code)
comments_lines = raw.comments + (raw.multi if count_multi else 0)
comments = comments_lines / float(raw.sloc) * 100 if raw.sloc != 0 else 0
return (
h_visit_ast(ast_node).total.volume,
ComplexityVisitor.from_ast(ast_node).total_complexity,
raw.lloc,
comments,
)
def mi_visit(code, multi):
'''Visit the code and compute the Maintainability Index (MI) from it.'''
return mi_compute(*mi_parameters(code, multi))
def mi_rank(score):
r'''Rank the score with a letter:
* A if :math:`\text{score} > 19`;
* B if :math:`9 < \text{score} \le 19`;
* C if :math:`\text{score} \le 9`.
'''
return chr(65 + (9 - score >= 0) + (19 - score >= 0)) | 0.901477 | 0.608507 |
import typing
import json
import logging
import numpy as np
import os
import pickle
import warnings
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
# noinspection PyProtectedMember
from sklearn.utils import shuffle as sklearn_shuffle
from typing import Optional, Any, List, Text, Dict, Callable
import rasa.utils.io
from rasa.core import utils
from rasa.core.domain import Domain
from rasa.core.featurizers import TrackerFeaturizer, MaxHistoryTrackerFeaturizer
from rasa.core.policies.policy import Policy
from rasa.core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
import sklearn
class SklearnPolicy(Policy):
"""Use an sklearn classifier to train a policy."""
def __init__(
self,
featurizer: Optional[MaxHistoryTrackerFeaturizer] = None,
priority: int = 1,
model: Optional["sklearn.base.BaseEstimator"] = None,
param_grid: Optional[Dict[Text, List] or List[Dict]] = None,
cv: Optional[int] = None,
scoring: Optional[Text or List or Dict or Callable] = "accuracy",
label_encoder: LabelEncoder = LabelEncoder(),
shuffle: bool = True,
**kwargs: Any
) -> None:
"""Create a new sklearn policy.
Args:
featurizer: Featurizer used to convert the training data into
vector format.
model: The sklearn model or model pipeline.
param_grid: If *param_grid* is not None and *cv* is given,
a grid search on the given *param_grid* is performed
(e.g. *param_grid={'n_estimators': [50, 100]}*).
cv: If *cv* is not None, perform a cross validation on
the training data. *cv* should then conform to the
sklearn standard (e.g. *cv=5* for a 5-fold cross-validation).
scoring: Scoring strategy, using the sklearn standard.
label_encoder: Encoder for the labels. Must implement an
*inverse_transform* method.
shuffle: Whether to shuffle training data.
"""
if featurizer:
if not isinstance(featurizer, MaxHistoryTrackerFeaturizer):
raise TypeError(
"Passed featurizer of type {}, should be "
"MaxHistoryTrackerFeaturizer."
"".format(type(featurizer).__name__)
)
super(SklearnPolicy, self).__init__(featurizer, priority)
self.model = model or self._default_model()
self.cv = cv
self.param_grid = param_grid
self.scoring = scoring
self.label_encoder = label_encoder
self.shuffle = shuffle
# attributes that need to be restored after loading
self._pickle_params = ["model", "cv", "param_grid", "scoring", "label_encoder"]
self._train_params = kwargs
@staticmethod
def _default_model():
return LogisticRegression(solver="liblinear", multi_class="auto")
@property
def _state(self):
return {attr: getattr(self, attr) for attr in self._pickle_params}
def model_architecture(self, **kwargs):
# filter out kwargs that cannot be passed to model
train_params = self._get_valid_params(self.model.__init__, **kwargs)
return self.model.set_params(**train_params)
def _extract_training_data(self, training_data):
# transform y from one-hot to num_classes
X, y = training_data.X, training_data.y.argmax(axis=-1)
if self.shuffle:
X, y = sklearn_shuffle(X, y)
return X, y
def _preprocess_data(self, X, y=None):
Xt = X.reshape(X.shape[0], -1)
if y is None:
return Xt
else:
yt = self.label_encoder.transform(y)
return Xt, yt
def _search_and_score(self, model, X, y, param_grid):
search = GridSearchCV(
model, param_grid=param_grid, cv=self.cv, scoring="accuracy", verbose=1
)
search.fit(X, y)
print ("Best params:", search.best_params_)
return search.best_estimator_, search.best_score_
def train(
self,
training_trackers: List[DialogueStateTracker],
domain: Domain,
**kwargs: Any
) -> None:
training_data = self.featurize_for_training(training_trackers, domain, **kwargs)
X, y = self._extract_training_data(training_data)
self._train_params.update(kwargs)
model = self.model_architecture(**self._train_params)
score = None
# Note: clone is called throughout to avoid mutating default
# arguments.
self.label_encoder = clone(self.label_encoder).fit(y)
Xt, yt = self._preprocess_data(X, y)
if self.cv is None:
model = clone(model).fit(Xt, yt)
else:
param_grid = self.param_grid or {}
model, score = self._search_and_score(model, Xt, yt, param_grid)
self.model = model
logger.info("Done fitting sklearn policy model")
if score is not None:
logger.info("Cross validation score: {:.5f}".format(score))
def _postprocess_prediction(self, y_proba, domain):
yp = y_proba[0].tolist()
# Some classes might not be part of the training labels. Since
# sklearn does not predict labels it has never encountered
# during training, it is necessary to insert missing classes.
indices = self.label_encoder.inverse_transform(np.arange(len(yp)))
y_filled = [0.0 for _ in range(domain.num_actions)]
for i, pred in zip(indices, yp):
y_filled[i] = pred
return y_filled
def predict_action_probabilities(
self, tracker: DialogueStateTracker, domain: Domain
) -> List[float]:
X = self.featurizer.create_X([tracker], domain)
Xt = self._preprocess_data(X)
y_proba = self.model.predict_proba(Xt)
return self._postprocess_prediction(y_proba, domain)
def persist(self, path: Text) -> None:
if self.model:
self.featurizer.persist(path)
meta = {"priority": self.priority}
meta_file = os.path.join(path, "sklearn_policy.json")
utils.dump_obj_as_json_to_file(meta_file, meta)
filename = os.path.join(path, "sklearn_model.pkl")
with open(filename, "wb") as f:
pickle.dump(self._state, f)
else:
warnings.warn(
"Persist called without a trained model present. "
"Nothing to persist then!"
)
@classmethod
def load(cls, path: Text) -> Policy:
filename = os.path.join(path, "sklearn_model.pkl")
if not os.path.exists(path):
raise OSError(
"Failed to load dialogue model. Path {} "
"doesn't exist".format(os.path.abspath(filename))
)
featurizer = TrackerFeaturizer.load(path)
assert isinstance(featurizer, MaxHistoryTrackerFeaturizer), (
"Loaded featurizer of type {}, should be "
"MaxHistoryTrackerFeaturizer.".format(type(featurizer).__name__)
)
meta_file = os.path.join(path, "sklearn_policy.json")
meta = json.loads(rasa.utils.io.read_file(meta_file))
policy = cls(featurizer=featurizer, priority=meta["priority"])
with open(filename, "rb") as f:
state = pickle.load(f)
vars(policy).update(state)
logger.info("Loaded sklearn model")
return policy | rasa/core/policies/sklearn_policy.py | import typing
import json
import logging
import numpy as np
import os
import pickle
import warnings
from sklearn.base import clone
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
# noinspection PyProtectedMember
from sklearn.utils import shuffle as sklearn_shuffle
from typing import Optional, Any, List, Text, Dict, Callable
import rasa.utils.io
from rasa.core import utils
from rasa.core.domain import Domain
from rasa.core.featurizers import TrackerFeaturizer, MaxHistoryTrackerFeaturizer
from rasa.core.policies.policy import Policy
from rasa.core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
import sklearn
class SklearnPolicy(Policy):
"""Use an sklearn classifier to train a policy."""
def __init__(
self,
featurizer: Optional[MaxHistoryTrackerFeaturizer] = None,
priority: int = 1,
model: Optional["sklearn.base.BaseEstimator"] = None,
param_grid: Optional[Dict[Text, List] or List[Dict]] = None,
cv: Optional[int] = None,
scoring: Optional[Text or List or Dict or Callable] = "accuracy",
label_encoder: LabelEncoder = LabelEncoder(),
shuffle: bool = True,
**kwargs: Any
) -> None:
"""Create a new sklearn policy.
Args:
featurizer: Featurizer used to convert the training data into
vector format.
model: The sklearn model or model pipeline.
param_grid: If *param_grid* is not None and *cv* is given,
a grid search on the given *param_grid* is performed
(e.g. *param_grid={'n_estimators': [50, 100]}*).
cv: If *cv* is not None, perform a cross validation on
the training data. *cv* should then conform to the
sklearn standard (e.g. *cv=5* for a 5-fold cross-validation).
scoring: Scoring strategy, using the sklearn standard.
label_encoder: Encoder for the labels. Must implement an
*inverse_transform* method.
shuffle: Whether to shuffle training data.
"""
if featurizer:
if not isinstance(featurizer, MaxHistoryTrackerFeaturizer):
raise TypeError(
"Passed featurizer of type {}, should be "
"MaxHistoryTrackerFeaturizer."
"".format(type(featurizer).__name__)
)
super(SklearnPolicy, self).__init__(featurizer, priority)
self.model = model or self._default_model()
self.cv = cv
self.param_grid = param_grid
self.scoring = scoring
self.label_encoder = label_encoder
self.shuffle = shuffle
# attributes that need to be restored after loading
self._pickle_params = ["model", "cv", "param_grid", "scoring", "label_encoder"]
self._train_params = kwargs
@staticmethod
def _default_model():
return LogisticRegression(solver="liblinear", multi_class="auto")
@property
def _state(self):
return {attr: getattr(self, attr) for attr in self._pickle_params}
def model_architecture(self, **kwargs):
# filter out kwargs that cannot be passed to model
train_params = self._get_valid_params(self.model.__init__, **kwargs)
return self.model.set_params(**train_params)
def _extract_training_data(self, training_data):
# transform y from one-hot to num_classes
X, y = training_data.X, training_data.y.argmax(axis=-1)
if self.shuffle:
X, y = sklearn_shuffle(X, y)
return X, y
def _preprocess_data(self, X, y=None):
Xt = X.reshape(X.shape[0], -1)
if y is None:
return Xt
else:
yt = self.label_encoder.transform(y)
return Xt, yt
def _search_and_score(self, model, X, y, param_grid):
search = GridSearchCV(
model, param_grid=param_grid, cv=self.cv, scoring="accuracy", verbose=1
)
search.fit(X, y)
print ("Best params:", search.best_params_)
return search.best_estimator_, search.best_score_
def train(
self,
training_trackers: List[DialogueStateTracker],
domain: Domain,
**kwargs: Any
) -> None:
training_data = self.featurize_for_training(training_trackers, domain, **kwargs)
X, y = self._extract_training_data(training_data)
self._train_params.update(kwargs)
model = self.model_architecture(**self._train_params)
score = None
# Note: clone is called throughout to avoid mutating default
# arguments.
self.label_encoder = clone(self.label_encoder).fit(y)
Xt, yt = self._preprocess_data(X, y)
if self.cv is None:
model = clone(model).fit(Xt, yt)
else:
param_grid = self.param_grid or {}
model, score = self._search_and_score(model, Xt, yt, param_grid)
self.model = model
logger.info("Done fitting sklearn policy model")
if score is not None:
logger.info("Cross validation score: {:.5f}".format(score))
def _postprocess_prediction(self, y_proba, domain):
yp = y_proba[0].tolist()
# Some classes might not be part of the training labels. Since
# sklearn does not predict labels it has never encountered
# during training, it is necessary to insert missing classes.
indices = self.label_encoder.inverse_transform(np.arange(len(yp)))
y_filled = [0.0 for _ in range(domain.num_actions)]
for i, pred in zip(indices, yp):
y_filled[i] = pred
return y_filled
def predict_action_probabilities(
self, tracker: DialogueStateTracker, domain: Domain
) -> List[float]:
X = self.featurizer.create_X([tracker], domain)
Xt = self._preprocess_data(X)
y_proba = self.model.predict_proba(Xt)
return self._postprocess_prediction(y_proba, domain)
def persist(self, path: Text) -> None:
if self.model:
self.featurizer.persist(path)
meta = {"priority": self.priority}
meta_file = os.path.join(path, "sklearn_policy.json")
utils.dump_obj_as_json_to_file(meta_file, meta)
filename = os.path.join(path, "sklearn_model.pkl")
with open(filename, "wb") as f:
pickle.dump(self._state, f)
else:
warnings.warn(
"Persist called without a trained model present. "
"Nothing to persist then!"
)
@classmethod
def load(cls, path: Text) -> Policy:
filename = os.path.join(path, "sklearn_model.pkl")
if not os.path.exists(path):
raise OSError(
"Failed to load dialogue model. Path {} "
"doesn't exist".format(os.path.abspath(filename))
)
featurizer = TrackerFeaturizer.load(path)
assert isinstance(featurizer, MaxHistoryTrackerFeaturizer), (
"Loaded featurizer of type {}, should be "
"MaxHistoryTrackerFeaturizer.".format(type(featurizer).__name__)
)
meta_file = os.path.join(path, "sklearn_policy.json")
meta = json.loads(rasa.utils.io.read_file(meta_file))
policy = cls(featurizer=featurizer, priority=meta["priority"])
with open(filename, "rb") as f:
state = pickle.load(f)
vars(policy).update(state)
logger.info("Loaded sklearn model")
return policy | 0.84367 | 0.356867 |
"""Test cases for Zinnia's admin"""
from __future__ import unicode_literals
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import RequestFactory
from django.test import TestCase
from django.utils import timezone
from django.utils.translation import activate
from django.utils.translation import deactivate
from zinnia import settings
from zinnia.admin import entry as entry_admin
from zinnia.admin.category import CategoryAdmin
from zinnia.admin.entry import EntryAdmin
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
from zinnia.url_shortener.backends.default import base36
class BaseAdminTestCase(TestCase):
rich_urls = 'zinnia.tests.implementations.urls.default'
poor_urls = 'zinnia.tests.implementations.urls.poor'
model_class = None
admin_class = None
def setUp(self):
disconnect_entry_signals()
activate('en')
self.site = AdminSite()
self.admin = self.admin_class(
self.model_class, self.site)
def tearDown(self):
"""
Deactivate the translation system.
"""
deactivate()
def check_with_rich_and_poor_urls(self, func, args,
result_rich, result_poor):
with self.settings(ROOT_URLCONF=self.rich_urls):
self.assertEqual(func(*args), result_rich)
with self.settings(ROOT_URLCONF=self.poor_urls):
self.assertEqual(func(*args), result_poor)
class TestMessageBackend(object):
"""Message backend for testing"""
def __init__(self, *ka, **kw):
self.messages = []
def add(self, *ka, **kw):
self.messages.append(ka)
@skip_if_custom_user
class EntryAdminTestCase(BaseAdminTestCase):
"""Test case for Entry Admin"""
model_class = Entry
admin_class = EntryAdmin
def setUp(self):
super(EntryAdminTestCase, self).setUp()
params = {'title': 'My title',
'content': 'My content',
'slug': 'my-title'}
self.entry = Entry.objects.create(**params)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_get_title(self):
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words)')
self.entry.comment_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (1 reaction)')
self.entry.pingback_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (2 reactions)')
def test_get_authors(self):
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'', '')
author_1 = Author.objects.create_user(
'author-1', '<EMAIL>')
author_2 = Author.objects.create_user(
'author<2>', '<EMAIL>')
self.entry.authors.add(author_1)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>',
'author-1')
self.entry.authors.add(author_2)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>, '
'<a href="/authors/author%3C2%3E/" target="blank">'
'author<2></a>',
'author-1, author<2>')
def test_get_authors_non_ascii(self):
author = Author.objects.create_user(
'тест', '<EMAIL>')
self.entry.authors.add(author)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_categories(self):
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'', '')
category_1 = Category.objects.create(title='Category <b>1</b>',
slug='category-1')
category_2 = Category.objects.create(title='Category <b>2</b>',
slug='category-2')
self.entry.categories.add(category_1)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>',
'Category <b>1</b>')
self.entry.categories.add(category_2)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>, '
'<a href="/categories/category-2/" target="blank">Category '
'<b>2</b></a>',
'Category <b>1</b>, Category <b>2</b>')
def test_get_categories_non_ascii(self):
category = Category.objects.create(title='Category тест',
slug='category')
self.entry.categories.add(category)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category/" target="blank">'
'Category тест</a>',
'Category тест')
def test_get_tags(self):
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'', '')
self.entry.tags = 'zinnia'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia')
self.entry.tags = 'zinnia, t<e>st'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/t%3Ce%3Est/" target="blank">t<e>st</a>, '
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia, t<e>st') # Yes, this is not the same order...
def test_get_tags_non_ascii(self):
self.entry.tags = 'тест'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_sites(self):
self.assertEqual(self.admin.get_sites(self.entry), '')
self.entry.sites.add(Site.objects.get_current())
self.check_with_rich_and_poor_urls(
self.admin.get_sites, (self.entry,),
'<a href="http://example.com/" target="blank">example.com</a>',
'<a href="http://example.com" target="blank">example.com</a>')
def test_get_short_url(self):
with self.settings(ROOT_URLCONF=self.poor_urls):
entry_url = self.entry.get_absolute_url()
self.check_with_rich_and_poor_urls(
self.admin.get_short_url, (self.entry,),
'<a href="http://example.com/%(hash)s/" target="blank">'
'http://example.com/%(hash)s/</a>' % {
'hash': base36(self.entry.pk)},
'<a href="%(url)s" target="blank">%(url)s</a>' % {
'url': entry_url})
def test_get_is_visible(self):
self.assertEqual(self.admin.get_is_visible(self.entry),
self.entry.is_visible)
def test_queryset(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.entry.authors.add(user)
root = Author.objects.create_superuser(
'root', '<EMAIL>', 'toor')
params = {'title': 'My root title',
'content': 'My root content',
'slug': 'my-root-titile'}
root_entry = Entry.objects.create(**params)
root_entry.authors.add(root)
self.request.user = User.objects.get(pk=user.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 1)
self.request.user = User.objects.get(pk=root.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 2)
def test_get_changeform_initial_data(self):
user = User.objects.create_user(
'user', '<EMAIL>')
site = Site.objects.get_current()
self.request.user = user
data = self.admin.get_changeform_initial_data(self.request)
self.assertEqual(data, {'authors': [user.pk],
'sites': [site.pk]})
request = self.request_factory.get('/?title=data')
request.user = user
data = self.admin.get_changeform_initial_data(request)
self.assertEqual(data, {'title': 'data'})
def test_formfield_for_manytomany(self):
staff = User.objects.create_user(
'staff', '<EMAIL>')
author = User.objects.create_user(
'author', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = root
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 1)
staff.is_staff = True
staff.save()
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 2)
self.entry.authors.add(Author.objects.get(pk=author.pk))
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 3)
def test_get_readonly_fields(self):
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
self.assertEqual(self.admin.get_readonly_fields(self.request),
['status', 'authors'])
self.request.user = root
self.assertEqual(self.admin.get_readonly_fields(self.request),
[])
def test_get_actions(self):
original_ping_directories = settings.PING_DIRECTORIES
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
settings.PING_DIRECTORIES = True
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'ping_directories',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = False
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
self.request.user = root
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'make_mine',
'make_published',
'make_hidden',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = original_ping_directories
def test_get_actions_in_popup_mode_issue_291(self):
user = User.objects.create_user(
'user', '<EMAIL>')
request = self.request_factory.get('/?_popup=1')
request.user = user
self.assertEqual(
list(self.admin.get_actions(request).keys()),
[])
def test_make_mine(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.request.user = User.objects.get(pk=user.pk)
self.request._messages = TestMessageBackend()
self.assertEqual(user.entries.count(), 0)
self.admin.make_mine(self.request, Entry.objects.all())
self.assertEqual(user.entries.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
def test_make_published(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 0)
self.admin.make_published(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_make_hidden(self):
self.request._messages = TestMessageBackend()
self.entry.status = PUBLISHED
self.entry.save()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 1)
self.admin.make_hidden(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_comments(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 1)
self.admin.close_comments(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_pingbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 1)
self.admin.close_pingbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_trackbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 1)
self.admin.close_trackbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_put_on_top(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.publication_date = datetime(2011, 1, 1, 12, 0)
self.admin.put_on_top(self.request, Entry.objects.all())
self.assertEqual(
Entry.objects.get(pk=self.entry.pk).creation_date.date(),
timezone.now().date())
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_mark_unmark_featured(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
featured=True).count(), 0)
self.admin.mark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
self.admin.unmark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 2)
def test_ping_directories(self):
class FakePinger(object):
def __init__(self, *ka, **kw):
self.results = [{'flerror': False, 'message': 'OK'},
{'flerror': True, 'message': 'KO'}]
def join(self):
pass
original_pinger = entry_admin.DirectoryPinger
entry_admin.DirectoryPinger = FakePinger
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = ['http://ping.com/ping']
self.request._messages = TestMessageBackend()
self.admin.ping_directories(self.request, Entry.objects.all(), False)
self.assertEqual(len(self.request._messages.messages), 0)
self.admin.ping_directories(self.request, Entry.objects.all())
self.assertEqual(len(self.request._messages.messages), 2)
self.assertEqual(self.request._messages.messages,
[(20, 'http://ping.com/ping : KO', ''),
(20, 'http://ping.com/ping directory succesfully '
'pinged 1 entries.', '')])
entry_admin.DirectoryPinger = original_pinger
settings.PING_DIRECTORIES = original_ping_directories
class CategoryAdminTestCase(BaseAdminTestCase):
"""Test cases for Category Admin"""
model_class = Category
admin_class = CategoryAdmin
def test_get_tree_path(self):
category = Category.objects.create(title='Category', slug='cat')
self.check_with_rich_and_poor_urls(
self.admin.get_tree_path, (category,),
'<a href="/categories/cat/" target="blank">/cat/</a>',
'/cat/') | zinnia/tests/test_admin.py | """Test cases for Zinnia's admin"""
from __future__ import unicode_literals
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import RequestFactory
from django.test import TestCase
from django.utils import timezone
from django.utils.translation import activate
from django.utils.translation import deactivate
from zinnia import settings
from zinnia.admin import entry as entry_admin
from zinnia.admin.category import CategoryAdmin
from zinnia.admin.entry import EntryAdmin
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
from zinnia.url_shortener.backends.default import base36
class BaseAdminTestCase(TestCase):
rich_urls = 'zinnia.tests.implementations.urls.default'
poor_urls = 'zinnia.tests.implementations.urls.poor'
model_class = None
admin_class = None
def setUp(self):
disconnect_entry_signals()
activate('en')
self.site = AdminSite()
self.admin = self.admin_class(
self.model_class, self.site)
def tearDown(self):
"""
Deactivate the translation system.
"""
deactivate()
def check_with_rich_and_poor_urls(self, func, args,
result_rich, result_poor):
with self.settings(ROOT_URLCONF=self.rich_urls):
self.assertEqual(func(*args), result_rich)
with self.settings(ROOT_URLCONF=self.poor_urls):
self.assertEqual(func(*args), result_poor)
class TestMessageBackend(object):
"""Message backend for testing"""
def __init__(self, *ka, **kw):
self.messages = []
def add(self, *ka, **kw):
self.messages.append(ka)
@skip_if_custom_user
class EntryAdminTestCase(BaseAdminTestCase):
"""Test case for Entry Admin"""
model_class = Entry
admin_class = EntryAdmin
def setUp(self):
super(EntryAdminTestCase, self).setUp()
params = {'title': 'My title',
'content': 'My content',
'slug': 'my-title'}
self.entry = Entry.objects.create(**params)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_get_title(self):
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words)')
self.entry.comment_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (1 reaction)')
self.entry.pingback_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (2 reactions)')
def test_get_authors(self):
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'', '')
author_1 = Author.objects.create_user(
'author-1', '<EMAIL>')
author_2 = Author.objects.create_user(
'author<2>', '<EMAIL>')
self.entry.authors.add(author_1)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>',
'author-1')
self.entry.authors.add(author_2)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>, '
'<a href="/authors/author%3C2%3E/" target="blank">'
'author<2></a>',
'author-1, author<2>')
def test_get_authors_non_ascii(self):
author = Author.objects.create_user(
'тест', '<EMAIL>')
self.entry.authors.add(author)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_categories(self):
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'', '')
category_1 = Category.objects.create(title='Category <b>1</b>',
slug='category-1')
category_2 = Category.objects.create(title='Category <b>2</b>',
slug='category-2')
self.entry.categories.add(category_1)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>',
'Category <b>1</b>')
self.entry.categories.add(category_2)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>, '
'<a href="/categories/category-2/" target="blank">Category '
'<b>2</b></a>',
'Category <b>1</b>, Category <b>2</b>')
def test_get_categories_non_ascii(self):
category = Category.objects.create(title='Category тест',
slug='category')
self.entry.categories.add(category)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category/" target="blank">'
'Category тест</a>',
'Category тест')
def test_get_tags(self):
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'', '')
self.entry.tags = 'zinnia'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia')
self.entry.tags = 'zinnia, t<e>st'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/t%3Ce%3Est/" target="blank">t<e>st</a>, '
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia, t<e>st') # Yes, this is not the same order...
def test_get_tags_non_ascii(self):
self.entry.tags = 'тест'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_sites(self):
self.assertEqual(self.admin.get_sites(self.entry), '')
self.entry.sites.add(Site.objects.get_current())
self.check_with_rich_and_poor_urls(
self.admin.get_sites, (self.entry,),
'<a href="http://example.com/" target="blank">example.com</a>',
'<a href="http://example.com" target="blank">example.com</a>')
def test_get_short_url(self):
with self.settings(ROOT_URLCONF=self.poor_urls):
entry_url = self.entry.get_absolute_url()
self.check_with_rich_and_poor_urls(
self.admin.get_short_url, (self.entry,),
'<a href="http://example.com/%(hash)s/" target="blank">'
'http://example.com/%(hash)s/</a>' % {
'hash': base36(self.entry.pk)},
'<a href="%(url)s" target="blank">%(url)s</a>' % {
'url': entry_url})
def test_get_is_visible(self):
self.assertEqual(self.admin.get_is_visible(self.entry),
self.entry.is_visible)
def test_queryset(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.entry.authors.add(user)
root = Author.objects.create_superuser(
'root', '<EMAIL>', 'toor')
params = {'title': 'My root title',
'content': 'My root content',
'slug': 'my-root-titile'}
root_entry = Entry.objects.create(**params)
root_entry.authors.add(root)
self.request.user = User.objects.get(pk=user.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 1)
self.request.user = User.objects.get(pk=root.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 2)
def test_get_changeform_initial_data(self):
user = User.objects.create_user(
'user', '<EMAIL>')
site = Site.objects.get_current()
self.request.user = user
data = self.admin.get_changeform_initial_data(self.request)
self.assertEqual(data, {'authors': [user.pk],
'sites': [site.pk]})
request = self.request_factory.get('/?title=data')
request.user = user
data = self.admin.get_changeform_initial_data(request)
self.assertEqual(data, {'title': 'data'})
def test_formfield_for_manytomany(self):
staff = User.objects.create_user(
'staff', '<EMAIL>')
author = User.objects.create_user(
'author', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = root
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 1)
staff.is_staff = True
staff.save()
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 2)
self.entry.authors.add(Author.objects.get(pk=author.pk))
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 3)
def test_get_readonly_fields(self):
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
self.assertEqual(self.admin.get_readonly_fields(self.request),
['status', 'authors'])
self.request.user = root
self.assertEqual(self.admin.get_readonly_fields(self.request),
[])
def test_get_actions(self):
original_ping_directories = settings.PING_DIRECTORIES
user = User.objects.create_user(
'user', '<EMAIL>')
root = User.objects.create_superuser(
'root', '<EMAIL>', 'toor')
self.request.user = user
settings.PING_DIRECTORIES = True
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'ping_directories',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = False
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
self.request.user = root
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'make_mine',
'make_published',
'make_hidden',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = original_ping_directories
def test_get_actions_in_popup_mode_issue_291(self):
user = User.objects.create_user(
'user', '<EMAIL>')
request = self.request_factory.get('/?_popup=1')
request.user = user
self.assertEqual(
list(self.admin.get_actions(request).keys()),
[])
def test_make_mine(self):
user = Author.objects.create_user(
'user', '<EMAIL>')
self.request.user = User.objects.get(pk=user.pk)
self.request._messages = TestMessageBackend()
self.assertEqual(user.entries.count(), 0)
self.admin.make_mine(self.request, Entry.objects.all())
self.assertEqual(user.entries.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
def test_make_published(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 0)
self.admin.make_published(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_make_hidden(self):
self.request._messages = TestMessageBackend()
self.entry.status = PUBLISHED
self.entry.save()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 1)
self.admin.make_hidden(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_comments(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 1)
self.admin.close_comments(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_pingbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 1)
self.admin.close_pingbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_trackbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 1)
self.admin.close_trackbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_put_on_top(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.publication_date = datetime(2011, 1, 1, 12, 0)
self.admin.put_on_top(self.request, Entry.objects.all())
self.assertEqual(
Entry.objects.get(pk=self.entry.pk).creation_date.date(),
timezone.now().date())
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_mark_unmark_featured(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
featured=True).count(), 0)
self.admin.mark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
self.admin.unmark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 2)
def test_ping_directories(self):
class FakePinger(object):
def __init__(self, *ka, **kw):
self.results = [{'flerror': False, 'message': 'OK'},
{'flerror': True, 'message': 'KO'}]
def join(self):
pass
original_pinger = entry_admin.DirectoryPinger
entry_admin.DirectoryPinger = FakePinger
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = ['http://ping.com/ping']
self.request._messages = TestMessageBackend()
self.admin.ping_directories(self.request, Entry.objects.all(), False)
self.assertEqual(len(self.request._messages.messages), 0)
self.admin.ping_directories(self.request, Entry.objects.all())
self.assertEqual(len(self.request._messages.messages), 2)
self.assertEqual(self.request._messages.messages,
[(20, 'http://ping.com/ping : KO', ''),
(20, 'http://ping.com/ping directory succesfully '
'pinged 1 entries.', '')])
entry_admin.DirectoryPinger = original_pinger
settings.PING_DIRECTORIES = original_ping_directories
class CategoryAdminTestCase(BaseAdminTestCase):
"""Test cases for Category Admin"""
model_class = Category
admin_class = CategoryAdmin
def test_get_tree_path(self):
category = Category.objects.create(title='Category', slug='cat')
self.check_with_rich_and_poor_urls(
self.admin.get_tree_path, (category,),
'<a href="/categories/cat/" target="blank">/cat/</a>',
'/cat/') | 0.568296 | 0.172363 |
class MIPSNode:
pass
class MIPSProgramNode(MIPSNode):
def __init__(self, dotdata, dottext):
self.dotdata = dotdata
self.dottext = dottext
class MIPSDataNode(MIPSNode):
pass
class MIPSProcedureNode(MIPSNode):
def __init__(self, label):
self.label = label
self.instructions = []
class MIPSInstructionNode(MIPSNode):
pass
class MIPSDataTypedNode(MIPSDataNode):
def __init__(self, vname, data_type, values):
self.vname = vname
self.data_type = data_type
self.values = values
def __str__(self):
values = ""
for value in self.values:
values += f", {value}"
return f"{self.vname} : {self.data_type}{values}"
class MIPSConstantNode(MIPSDataNode):
def __init__(self, vname, value):
self.vname = vname
self.value = value
class MIPSArithmeticAndLogicNode(MIPSInstructionNode) :
def __init__(self, destination, left, right):
self.destination = destination
self.left = left
self.right = right
class MIPSAddNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f"add {self.destination}, {self.left}, {self.right}"
class MIPSSubstractNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f"sub {self.destination}, {self.left}, {self.right}"
class MIPSAddInmediateNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f'addi {self.destination}, {self.left}, {self.right}'
class MIPSAddUnsigned(MIPSArithmeticAndLogicNode):
pass
class MIPSSubstractUnsignedNode(MIPSArithmeticAndLogicNode):
pass
class MIPSAddInmediateUnsignedNode(MIPSArithmeticAndLogicNode):
pass
class MIPSMultiplyWithoutOverflow(MIPSArithmeticAndLogicNode):
pass
class MIPSAndNode(MIPSArithmeticAndLogicNode):
pass
class MIPSOrNode(MIPSArithmeticAndLogicNode):
pass
class MIPSAndInmediateNode(MIPSArithmeticAndLogicNode):
pass
class MIPSOrInmediateNode(MIPSArithmeticAndLogicNode):
pass
class MIPSShiftLeftNode(MIPSArithmeticAndLogicNode):
pass
class MIPSShiftRightNode(MIPSArithmeticAndLogicNode):
pass
class MIPSHiLoOperationNode(MIPSInstructionNode):
def __init__(self,left,right):
self.left = left
self.right = right
class MIPSMultiplyNode(MIPSHiLoOperationNode):
def __str__(self):
return f'mult {self.left}, {self.right}'
class MIPSDivideNode(MIPSHiLoOperationNode):
def __str__(self):
return f'div {self.left}, {self.right}'
class MIPSDataTransferNode(MIPSInstructionNode):
pass
class MIPSDataTransferWithOffsetNode(MIPSDataTransferNode):
def __init__(self, source, offset, destination):
self.source = source
self.offset = offset
self.destination = destination
class MIPSLoadWordNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'lw {self.source}, {str(self.offset)}({self.destination})'
class MIPSLoadByteNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'lb {self.source}, {str(self.offset)}({self.destination})'
class MIPSStoreWordNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'sw {self.source}, {str(self.offset)}({self.destination})'
class MIPSStoreByteNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'sb {self.source}, {str(self.offset)}({self.destination})'
class MIPSLoadNode(MIPSDataTransferNode):
def __init__(self, destination, source):
self.destination = destination
self.source = source
class MIPSLoadUpperInmediateNode(MIPSLoadNode):
pass
class MIPSLoadAdressNode(MIPSLoadNode):
def __str__(self):
return f'la {self.destination}, {self.source}'
class MIPSLoadInmediateNode(MIPSLoadNode):
def __str__(self):
return f'li {self.destination}, {str(self.source)}'
class MIPSMoveFromNode(MIPSDataTransferNode):
def __init__(self, destination):
self.destination = destination
class MIPSMoveNode(MIPSDataTransferNode):
def __init__(self, destination, source):
self.destination = destination
self.source = source
def __str__(self):
return f"move {self.destination} {self.source}"
class MIPSConditionalBranchNode(MIPSInstructionNode):
def __init__(self, r1, r2, jump):
self.r1 = r1
self.r2 = r2
self.jump = jump
class MIPSBranchOnEqualNode(MIPSConditionalBranchNode):
def __str__(self):
return f"beq {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchNeqZero(MIPSInstructionNode):
def __init__(self, r, label):
self.r = r
self.label = label
def __str__(self):
return f"bnez {self.r}, {self.label}"
class MIPSBranchOnNotEqualNode(MIPSConditionalBranchNode):
def __str__(self):
return f"bne {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnGTNode(MIPSConditionalBranchNode):
def __str__(self):
return f"bgt {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnGTENode(MIPSConditionalBranchNode):
pass
class MIPSBranchOnLTNode(MIPSConditionalBranchNode):
def __str__(self):
return f"blt {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnLTENode(MIPSConditionalBranchNode):
pass
class MIPSComparissonNode(MIPSInstructionNode):
def __init__(self, result_register, value1, value2):
self.result_register = result_register
self.value1 = value1
self.value2 = value2
class MIPSSetOnLTNode(MIPSComparissonNode):
def __str__(self):
return f'slt {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnLTENode(MIPSComparissonNode):
def __str__(self):
return f'sleu {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnENode(MIPSComparissonNode):
def __str__(self):
return f'seq {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnLTInmediateNode(MIPSComparissonNode):
def __str__(self):
return f'slti {self.result_register}, {self.value1}, {self.value2}'
class MIPSUnconditionalJumpNode(MIPSInstructionNode):
def __init__(self, jump):
self.jump = jump
class MIPSJumpNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"j {self.jump}"
class MIPSJumpRegisterNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"jr {self.jump}"
class MIPSJumpAndLinkNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"jal {self.jump}"
class MIPSJumpAndLinkRegNode(MIPSInstructionNode):
def __init__(self, r):
self.r = r
def __str__(self):
return f"jalr {self.r}"
class MIPSLabelNode(MIPSInstructionNode):
def __init__(self, label):
self.label = label
def __str__(self):
return f"{self.label}:"
class MIPSEmptyInstruction(MIPSInstructionNode):
def __str__(self):
return ""
class MIPSCommentNode(MIPSNode):
def __init__(self, comment):
self.comment = comment
def __str__(self):
return f"#{self.comment}"
class MIPSSyscallNode(MIPSInstructionNode):
def __str__(self):
return "syscall"
class MIPSMLONode(MIPSInstructionNode):
def __init__(self,destiny):
self.destiny = destiny
def __str__(self):
return f"mflo {self.destiny}" | src/compiler/components/generation/MIPS_defintions.py | class MIPSNode:
pass
class MIPSProgramNode(MIPSNode):
def __init__(self, dotdata, dottext):
self.dotdata = dotdata
self.dottext = dottext
class MIPSDataNode(MIPSNode):
pass
class MIPSProcedureNode(MIPSNode):
def __init__(self, label):
self.label = label
self.instructions = []
class MIPSInstructionNode(MIPSNode):
pass
class MIPSDataTypedNode(MIPSDataNode):
def __init__(self, vname, data_type, values):
self.vname = vname
self.data_type = data_type
self.values = values
def __str__(self):
values = ""
for value in self.values:
values += f", {value}"
return f"{self.vname} : {self.data_type}{values}"
class MIPSConstantNode(MIPSDataNode):
def __init__(self, vname, value):
self.vname = vname
self.value = value
class MIPSArithmeticAndLogicNode(MIPSInstructionNode) :
def __init__(self, destination, left, right):
self.destination = destination
self.left = left
self.right = right
class MIPSAddNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f"add {self.destination}, {self.left}, {self.right}"
class MIPSSubstractNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f"sub {self.destination}, {self.left}, {self.right}"
class MIPSAddInmediateNode(MIPSArithmeticAndLogicNode):
def __str__(self):
return f'addi {self.destination}, {self.left}, {self.right}'
class MIPSAddUnsigned(MIPSArithmeticAndLogicNode):
pass
class MIPSSubstractUnsignedNode(MIPSArithmeticAndLogicNode):
pass
class MIPSAddInmediateUnsignedNode(MIPSArithmeticAndLogicNode):
pass
class MIPSMultiplyWithoutOverflow(MIPSArithmeticAndLogicNode):
pass
class MIPSAndNode(MIPSArithmeticAndLogicNode):
pass
class MIPSOrNode(MIPSArithmeticAndLogicNode):
pass
class MIPSAndInmediateNode(MIPSArithmeticAndLogicNode):
pass
class MIPSOrInmediateNode(MIPSArithmeticAndLogicNode):
pass
class MIPSShiftLeftNode(MIPSArithmeticAndLogicNode):
pass
class MIPSShiftRightNode(MIPSArithmeticAndLogicNode):
pass
class MIPSHiLoOperationNode(MIPSInstructionNode):
def __init__(self,left,right):
self.left = left
self.right = right
class MIPSMultiplyNode(MIPSHiLoOperationNode):
def __str__(self):
return f'mult {self.left}, {self.right}'
class MIPSDivideNode(MIPSHiLoOperationNode):
def __str__(self):
return f'div {self.left}, {self.right}'
class MIPSDataTransferNode(MIPSInstructionNode):
pass
class MIPSDataTransferWithOffsetNode(MIPSDataTransferNode):
def __init__(self, source, offset, destination):
self.source = source
self.offset = offset
self.destination = destination
class MIPSLoadWordNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'lw {self.source}, {str(self.offset)}({self.destination})'
class MIPSLoadByteNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'lb {self.source}, {str(self.offset)}({self.destination})'
class MIPSStoreWordNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'sw {self.source}, {str(self.offset)}({self.destination})'
class MIPSStoreByteNode(MIPSDataTransferWithOffsetNode):
def __str__(self):
return f'sb {self.source}, {str(self.offset)}({self.destination})'
class MIPSLoadNode(MIPSDataTransferNode):
def __init__(self, destination, source):
self.destination = destination
self.source = source
class MIPSLoadUpperInmediateNode(MIPSLoadNode):
pass
class MIPSLoadAdressNode(MIPSLoadNode):
def __str__(self):
return f'la {self.destination}, {self.source}'
class MIPSLoadInmediateNode(MIPSLoadNode):
def __str__(self):
return f'li {self.destination}, {str(self.source)}'
class MIPSMoveFromNode(MIPSDataTransferNode):
def __init__(self, destination):
self.destination = destination
class MIPSMoveNode(MIPSDataTransferNode):
def __init__(self, destination, source):
self.destination = destination
self.source = source
def __str__(self):
return f"move {self.destination} {self.source}"
class MIPSConditionalBranchNode(MIPSInstructionNode):
def __init__(self, r1, r2, jump):
self.r1 = r1
self.r2 = r2
self.jump = jump
class MIPSBranchOnEqualNode(MIPSConditionalBranchNode):
def __str__(self):
return f"beq {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchNeqZero(MIPSInstructionNode):
def __init__(self, r, label):
self.r = r
self.label = label
def __str__(self):
return f"bnez {self.r}, {self.label}"
class MIPSBranchOnNotEqualNode(MIPSConditionalBranchNode):
def __str__(self):
return f"bne {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnGTNode(MIPSConditionalBranchNode):
def __str__(self):
return f"bgt {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnGTENode(MIPSConditionalBranchNode):
pass
class MIPSBranchOnLTNode(MIPSConditionalBranchNode):
def __str__(self):
return f"blt {self.r1}, {self.r2}, {self.jump}"
class MIPSBranchOnLTENode(MIPSConditionalBranchNode):
pass
class MIPSComparissonNode(MIPSInstructionNode):
def __init__(self, result_register, value1, value2):
self.result_register = result_register
self.value1 = value1
self.value2 = value2
class MIPSSetOnLTNode(MIPSComparissonNode):
def __str__(self):
return f'slt {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnLTENode(MIPSComparissonNode):
def __str__(self):
return f'sleu {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnENode(MIPSComparissonNode):
def __str__(self):
return f'seq {self.result_register}, {self.value1}, {self.value2}'
class MIPSSetOnLTInmediateNode(MIPSComparissonNode):
def __str__(self):
return f'slti {self.result_register}, {self.value1}, {self.value2}'
class MIPSUnconditionalJumpNode(MIPSInstructionNode):
def __init__(self, jump):
self.jump = jump
class MIPSJumpNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"j {self.jump}"
class MIPSJumpRegisterNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"jr {self.jump}"
class MIPSJumpAndLinkNode(MIPSUnconditionalJumpNode):
def __str__(self):
return f"jal {self.jump}"
class MIPSJumpAndLinkRegNode(MIPSInstructionNode):
def __init__(self, r):
self.r = r
def __str__(self):
return f"jalr {self.r}"
class MIPSLabelNode(MIPSInstructionNode):
def __init__(self, label):
self.label = label
def __str__(self):
return f"{self.label}:"
class MIPSEmptyInstruction(MIPSInstructionNode):
def __str__(self):
return ""
class MIPSCommentNode(MIPSNode):
def __init__(self, comment):
self.comment = comment
def __str__(self):
return f"#{self.comment}"
class MIPSSyscallNode(MIPSInstructionNode):
def __str__(self):
return "syscall"
class MIPSMLONode(MIPSInstructionNode):
def __init__(self,destiny):
self.destiny = destiny
def __str__(self):
return f"mflo {self.destiny}" | 0.76921 | 0.568925 |
from io import BytesIO
import mutagen
from kivy.core.audio import SoundLoader, Sound
from kivy.core.image import Image as CoreImage
from kivy.properties import ObjectProperty, NumericProperty, BoundedNumericProperty, Clock
from kivy.uix.screenmanager import Screen
from playlist.orm import DirectoryPlaylist
from playlist.orm.static_playlist import StaticPlaylist
class MainScreen(Screen):
"""
:var image_texture:
:type image_texture: kivy.graphics.texture.Texture
:var sound: the sound
:type sound: Sound
:var playlist:
:type playlist: playlist.orm.playlist.Playlist
:var playlist_current_index: The current index
:type playlist_current_index: int
:var last_sound_position: The last position in the song
:type last_sound_position: float
:var volume: The volume of the song
:type volume: float
:var __cached_playlist: The playlist loaded from the playlist
:type __cached_playlist: list[Media]
"""
image_texture = ObjectProperty(None)
sound = ObjectProperty(None)
playlist = ObjectProperty()
playlist_current_index = NumericProperty(0)
last_sound_position = NumericProperty(0)
volume = BoundedNumericProperty(1., min=0, max=1)
def __init__(self, **kw):
super().__init__(**kw)
self.playlist = StaticPlaylist([
])
self.__cached_playlist = list(self.playlist)
self.load_audio()
def update_position(_):
if self.sound and self.sound.state == 'play':
self.last_sound_position = self.sound.get_pos()
Clock.schedule_interval(update_position, 1.5)
def play(self):
if self.sound.state == 'stop':
self.sound.play()
self.sound.seek(self.last_sound_position)
else:
self.last_sound_position = self.sound.get_pos()
self.sound.stop()
def back(self):
is_played = self.sound.state == 'play'
self.sound.stop()
self.last_sound_position = 0
if self.last_sound_position < 1:
self.playlist_current_index -= 1
if -1 == self.playlist_current_index:
self.__cached_playlist = list(self.playlist)
self.playlist_current_index = len(self.__cached_playlist) - 1
self.load_audio()
if is_played:
self.play()
else:
self.play()
def next(self):
is_played = self.sound.state == 'play'
self.sound.stop()
self.last_sound_position = 0
self.playlist_current_index += 1
if len(self.__cached_playlist) == self.playlist_current_index:
self.__cached_playlist = list(self.playlist)
self.playlist_current_index = 0
self.load_audio()
if is_played:
self.play()
def load_audio(self):
if self.sound:
self.sound.unload()
if len(self.__cached_playlist) == 0:
return
self.sound = SoundLoader.load(self.__cached_playlist[self.playlist_current_index])
self.sound.volume = self.volume
audio_path = self.__cached_playlist[self.playlist_current_index]
music_file = mutagen.File(audio_path)
for k, v in music_file.items():
if k.startswith('APIC'):
ext = v.mime[6:]
data = BytesIO(v.data)
self.image_texture = CoreImage(data, ext=ext).texture
break | src/playlist/gui/main_screen.py | from io import BytesIO
import mutagen
from kivy.core.audio import SoundLoader, Sound
from kivy.core.image import Image as CoreImage
from kivy.properties import ObjectProperty, NumericProperty, BoundedNumericProperty, Clock
from kivy.uix.screenmanager import Screen
from playlist.orm import DirectoryPlaylist
from playlist.orm.static_playlist import StaticPlaylist
class MainScreen(Screen):
"""
:var image_texture:
:type image_texture: kivy.graphics.texture.Texture
:var sound: the sound
:type sound: Sound
:var playlist:
:type playlist: playlist.orm.playlist.Playlist
:var playlist_current_index: The current index
:type playlist_current_index: int
:var last_sound_position: The last position in the song
:type last_sound_position: float
:var volume: The volume of the song
:type volume: float
:var __cached_playlist: The playlist loaded from the playlist
:type __cached_playlist: list[Media]
"""
image_texture = ObjectProperty(None)
sound = ObjectProperty(None)
playlist = ObjectProperty()
playlist_current_index = NumericProperty(0)
last_sound_position = NumericProperty(0)
volume = BoundedNumericProperty(1., min=0, max=1)
def __init__(self, **kw):
super().__init__(**kw)
self.playlist = StaticPlaylist([
])
self.__cached_playlist = list(self.playlist)
self.load_audio()
def update_position(_):
if self.sound and self.sound.state == 'play':
self.last_sound_position = self.sound.get_pos()
Clock.schedule_interval(update_position, 1.5)
def play(self):
if self.sound.state == 'stop':
self.sound.play()
self.sound.seek(self.last_sound_position)
else:
self.last_sound_position = self.sound.get_pos()
self.sound.stop()
def back(self):
is_played = self.sound.state == 'play'
self.sound.stop()
self.last_sound_position = 0
if self.last_sound_position < 1:
self.playlist_current_index -= 1
if -1 == self.playlist_current_index:
self.__cached_playlist = list(self.playlist)
self.playlist_current_index = len(self.__cached_playlist) - 1
self.load_audio()
if is_played:
self.play()
else:
self.play()
def next(self):
is_played = self.sound.state == 'play'
self.sound.stop()
self.last_sound_position = 0
self.playlist_current_index += 1
if len(self.__cached_playlist) == self.playlist_current_index:
self.__cached_playlist = list(self.playlist)
self.playlist_current_index = 0
self.load_audio()
if is_played:
self.play()
def load_audio(self):
if self.sound:
self.sound.unload()
if len(self.__cached_playlist) == 0:
return
self.sound = SoundLoader.load(self.__cached_playlist[self.playlist_current_index])
self.sound.volume = self.volume
audio_path = self.__cached_playlist[self.playlist_current_index]
music_file = mutagen.File(audio_path)
for k, v in music_file.items():
if k.startswith('APIC'):
ext = v.mime[6:]
data = BytesIO(v.data)
self.image_texture = CoreImage(data, ext=ext).texture
break | 0.586286 | 0.127598 |
import discord
from discord.ext import commands
from _Util import BLUE, Checks
class Rules(commands.Cog):
"""
Main class
"""
def __init__(self, bot):
self.bot = bot
@commands.group()
@commands.check(Checks.admin)
async def rules(self, ctx):
"""
Modifies rules
"""
@rules.command(aliases=["add", "create"])
@commands.check(Checks.admin)
async def set(self, ctx, name, punishment, *, description):
"""
makes a rule
"""
await self.bot.db.execute("INSERT INTO rules (name, guild_id, punishment, description) VALUES ($2, $1, $3, $4)",
ctx.guild.id, name, punishment, description)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@rules.command(aliases=["del", "remove"])
@commands.check(Checks.admin)
async def delete(self, ctx, name):
"""
deletes a rule
"""
await self.bot.db.execute("DELETE FROM rules WHERE guild_id = $1 AND name = $2",
ctx.guild.id, name)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@rules.command()
@commands.check(Checks.admin)
async def clear(self, ctx):
"""
clears all rules
"""
await self.bot.db.execute("DELETE FROM rules WHERE guild_id = $1", ctx.guild.id)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command()
@commands.check(Checks.admin)
async def sendrules(self, ctx, channel: discord.TextChannel, inline: bool = False):
"""
Sends rules to the specified channel
"""
punishments = [punishment["punishment"] for punishment in await self.bot.db.fetch("SELECT DISTINCT punishment FROM rules WHERE guild_id = $1 AND punishment != 'faq'", ctx.guild.id)]
embed = discord.Embed(title="Rules")
for punishment in punishments:
embed.add_field(name=punishment, value="\n\n".join([rule["description"] for rule in await self.bot.db.fetch("SELECT description FROM rules WHERE guild_id = $1 AND punishment = $2", ctx.guild.id, punishment)]), inline=inline)
for faq in [(faq["name"], faq["description"]) for faq in await self.bot.db.fetch("SELECT name, description FROM rules WHERE guild_id = $1 AND punishment = 'faq'", ctx.guild.id)]:
embed.add_field(name=faq[0], value=faq[1], inline=inline)
await ctx.message.delete()
await channel.send(embed=embed)
embed = discord.Embed(title=f"Rules sent to #{channel.name}")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command()
async def rule(self, ctx, name):
"""
Shows info of a rule
"""
rule = await self.bot.db.fetchrow("SELECT name, punishment, description FROM rules WHERE guild_id = $1 AND name = $2 AND punishment != 'faq'",
ctx.guild.id, name)
embed = discord.Embed(title=name, description=rule["description"], colour=BLUE)
embed.add_field(name="Punishment:", value=rule["punishment"])
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.message.delete()
await ctx.send(embed=embed)
def setup(bot):
"""
Initialize cog
"""
bot.add_cog(Rules(bot)) | bot/Rules.py | import discord
from discord.ext import commands
from _Util import BLUE, Checks
class Rules(commands.Cog):
"""
Main class
"""
def __init__(self, bot):
self.bot = bot
@commands.group()
@commands.check(Checks.admin)
async def rules(self, ctx):
"""
Modifies rules
"""
@rules.command(aliases=["add", "create"])
@commands.check(Checks.admin)
async def set(self, ctx, name, punishment, *, description):
"""
makes a rule
"""
await self.bot.db.execute("INSERT INTO rules (name, guild_id, punishment, description) VALUES ($2, $1, $3, $4)",
ctx.guild.id, name, punishment, description)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@rules.command(aliases=["del", "remove"])
@commands.check(Checks.admin)
async def delete(self, ctx, name):
"""
deletes a rule
"""
await self.bot.db.execute("DELETE FROM rules WHERE guild_id = $1 AND name = $2",
ctx.guild.id, name)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@rules.command()
@commands.check(Checks.admin)
async def clear(self, ctx):
"""
clears all rules
"""
await self.bot.db.execute("DELETE FROM rules WHERE guild_id = $1", ctx.guild.id)
await ctx.message.delete()
embed = discord.Embed(title="Rules updated")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command()
@commands.check(Checks.admin)
async def sendrules(self, ctx, channel: discord.TextChannel, inline: bool = False):
"""
Sends rules to the specified channel
"""
punishments = [punishment["punishment"] for punishment in await self.bot.db.fetch("SELECT DISTINCT punishment FROM rules WHERE guild_id = $1 AND punishment != 'faq'", ctx.guild.id)]
embed = discord.Embed(title="Rules")
for punishment in punishments:
embed.add_field(name=punishment, value="\n\n".join([rule["description"] for rule in await self.bot.db.fetch("SELECT description FROM rules WHERE guild_id = $1 AND punishment = $2", ctx.guild.id, punishment)]), inline=inline)
for faq in [(faq["name"], faq["description"]) for faq in await self.bot.db.fetch("SELECT name, description FROM rules WHERE guild_id = $1 AND punishment = 'faq'", ctx.guild.id)]:
embed.add_field(name=faq[0], value=faq[1], inline=inline)
await ctx.message.delete()
await channel.send(embed=embed)
embed = discord.Embed(title=f"Rules sent to #{channel.name}")
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command()
async def rule(self, ctx, name):
"""
Shows info of a rule
"""
rule = await self.bot.db.fetchrow("SELECT name, punishment, description FROM rules WHERE guild_id = $1 AND name = $2 AND punishment != 'faq'",
ctx.guild.id, name)
embed = discord.Embed(title=name, description=rule["description"], colour=BLUE)
embed.add_field(name="Punishment:", value=rule["punishment"])
embed.set_author(name=ctx.author.nick if ctx.author.nick else ctx.author.name, icon_url=ctx.author.avatar_url)
await ctx.message.delete()
await ctx.send(embed=embed)
def setup(bot):
"""
Initialize cog
"""
bot.add_cog(Rules(bot)) | 0.47171 | 0.059565 |
from collections import OrderedDict
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, Union
# Avoid spurious X windows errors, see:
# https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
import GPy
import matplotlib.pyplot as plt # noqa: E402
import numpy as np
import pandas as pd
import scipy.stats
import seaborn as sns
from abex.constants import FILE
from abex.dataset import Dataset
from abex.plotting.composite_core import plot_multidimensional_function_slices
from abex.plotting.core import (
PLOT_SCALE,
calc_2d_slice,
make_lower_triangular_axis_grid_with_colorbar_axes,
plot_2d_slice_from_arrays,
)
from azureml.core import Run
from matplotlib.axes import Axes
from psbutils.type_annotations import PathOrString
if TYPE_CHECKING:
# Imports BayesOptModel for static type-checks only to get around circular import problem
from abex.bayesopt import BayesOptModel # pragma: no cover
RELATIVE_VALUE = "Relative value"
# noinspection PyUnresolvedReferences
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # type: ignore
def loss_convergence(losses: List[List[float]], fname: Optional[str] = None) -> None: # pragma: no cover
f = plt.figure(figsize=(6, 4))
for i, loss in enumerate(losses):
iterations = len(loss)
plt.scatter(list(range(iterations)), loss, label=f"Fold {i}", s=3)
plt.legend()
plt.xlabel("Iteration")
plt.ylabel("Marginal log-likelihood")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def opt_distance(X, optx, j): # pragma: no cover
# noinspection PyUnresolvedReferences
Nd, n_inputs = np.shape(X)
Ij = np.eye(n_inputs)
Ij[j, j] = 0
d = np.zeros(Nd)
for i in range(Nd):
xd = X[i, :] - optx
d[i] = xd @ Ij @ xd
return d
def simulation_panel1d(
ax: Axes,
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
slice_dim: int,
bounds: Tuple[float, float],
slice_x: np.ndarray,
slice_y: Optional[float] = None,
resolution: int = 101,
color="b",
) -> Axes: # pragma: no cover
"""Make a plot of the predicted output (and +- one standard deviation) against one input (defined by slice_dim)
that is a slice through input space at a location defined by slice_x.
Optionally, mark the point [slice_x, slice_y], which, if the slice is plotted at a maximum of the model's
predictions, would be the maximum of the model predictions.
"""
# Get a grid of inputs for the continuous variable being varied across this slice
x_grid = np.linspace(bounds[0], bounds[1], resolution)
xs = np.tile(slice_x, (len(x_grid), 1))
xs[:, slice_dim] = x_grid
y_pred, y_var = predict_func(xs)
sigma = np.sqrt(y_var)
ax.plot(x_grid, y_pred, "-", label="Prediction", c=color)
ax.fill_between(
x_grid,
y_pred - sigma,
y_pred + sigma,
alpha=0.25,
fc=color,
ec="None",
label="68% confidence interval",
)
ax.set_xlim(bounds[0], bounds[1])
if slice_y is not None:
ax.plot(slice_x[slice_dim], slice_y, "o", markeredgecolor=color, markerfacecolor="w", label="Optimum")
else:
ax.axvline(slice_x[slice_dim], alpha=0.2, linestyle="--")
return ax
def get_logged_img_title(title: Optional[str] = None, fname: Optional[PathOrString] = None) -> str:
"""
Creates a title for logging plots on AML. If title is provided, that forms the base, otherwise use default.
If filename is provided, append the filename, which contains information about iteration and seed number.
Args:
title:
fname:
Returns:
"""
title = title or "plot"
if fname is not None:
assert isinstance(fname, Path)
title += f"_{fname.stem}"
return title
def plot_prediction_slices1d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
slice_y: Optional[float] = None,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: str = "Objective",
resolution: int = 100,
size: int = 3,
num_cols: Optional[int] = None,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
"""
Plot slices of the predictions from the model crossing a given location.
Args:
predict_func (Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]]): A function taking an input and returning
mean and variance of the predictive distribution at those points.
parameter_space (OrderedDict[str, Tuple[float, float]]): An ordered dictionary mapping input names to bounds.
slice_loc (np.ndarray): The point through which to plot the slices of the predictive distribution.
slice_y (Optional[float], optional): The output value the the slice location. Defaults to None.
scatter_x (Optional[np.ndarray], optional): Points which to scatter on the plot (project onto the slices).
If given, scatter_y must be specified as well. Defaults to None.
scatter_y (Optional[np.ndarray], optional): Output values corresponding to scatter_x. Defaults to None.
output_label (str, optional): Label for the output axis. Defaults to "Objective".
resolution (int, optional): Resolution (num. points) for the grid of input points along each slice.
Defaults to 100.
size (int, optional): Size of each axis with one slice in inches. Defaults to 3.
num_cols (Optional[int], optional): Maximum number of columns. If more slices, the axes will wrap.
Defaults to num_cols = ceil(sqrt(num_input_dims)).
title (Optional[str], optional): Title for the plot. Defaults to None.
fname (Optional[PathOrString], optional): File-name where to save the plot. Defaults to None.
"""
parameters = list(parameter_space.items())
n_inputs = len(parameter_space)
num_cols = num_cols if num_cols else int(np.ceil(np.sqrt(n_inputs)))
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
nrows=num_rows,
ncols=num_cols,
sharey=True,
figsize=(size * num_cols, size * num_rows),
)
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i, j]
slice_dim = i * num_cols + j
if slice_dim < n_inputs:
param_name, bounds = parameters[slice_dim]
simulation_panel1d(
ax=ax,
predict_func=predict_func,
slice_x=slice_loc,
slice_y=slice_y,
bounds=bounds,
slice_dim=slice_dim,
color=colors[0],
resolution=resolution,
)
# Scatter-plot data points if points to scatter given
if scatter_x is not None and scatter_y is not None:
ax.scatter(scatter_x[:, slice_dim], scatter_y, s=3, c=colors[1])
ax.set_xlabel(param_name)
else:
ax.set_visible(False)
axs[i, 0].set_ylabel(output_label)
if title is not None:
fig.suptitle(title)
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot1d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
# If filename given, save
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axs
def plot_prediction_slices2d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: Optional[str] = None,
resolution: int = 100,
size: int = 3,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
parameters = list(parameter_space.items())
n_inputs = len(parameters)
assert n_inputs >= 2, "At least two input dimensions are required to plots 2d slice"
# Keep a running minimum and maximum of function values in 2D slices
func_values_min, func_values_max = np.inf, -np.inf
# Keep track of contour sets returned for each axis
contour_sets = []
num_cols = n_inputs - 1 # Number of rows of axes equals number of columns
# Construct axes
# noinspection PyTypeChecker
fig = plt.figure(figsize=(size * num_cols, size * num_cols))
axes, cbar_axes = make_lower_triangular_axis_grid_with_colorbar_axes(fig=fig, num_cols=num_cols, num_colorbars=1)
for i in range(num_cols): # i iterates over the rows of the plots
y_param_dim = i + 1
y_param_name, y_bounds = parameters[y_param_dim]
for j in range(num_cols): # j iterates over the columns of the plots
ax = axes[i, j]
if j <= i:
# Indices of the inputs to plot
x_param_dim = j
x_param_name, x_bounds = parameters[x_param_dim]
# Compute the data for the 2D slice plot
xx, yy, func_values_slice = calc_2d_slice(
func=lambda x: predict_func(x)[0], # Only interested in the mean of the prediction
dim_x=x_param_dim,
dim_y=y_param_dim,
slice_loc=slice_loc,
slice_bounds_x=x_bounds,
slice_bounds_y=y_bounds,
resolution=resolution,
)
# Plot the 2D slice
_, contour_set = plot_2d_slice_from_arrays(xx, yy, func_values_slice, ax=ax, plot_type="contourf")
contour_sets.append(contour_set)
# Keep a running minimum and maximum of function values in slices
func_values_min = min(func_values_min, func_values_slice.min()) # type: ignore
func_values_max = max(func_values_max, func_values_slice.max()) # type: ignore
# Scatter-plot the data
if scatter_x is not None and scatter_y is not None:
if len(scatter_y) > 0:
s = (scatter_y - np.min(scatter_y)) / np.max(scatter_y) + 1
ax.scatter(scatter_x[:, x_param_dim], scatter_x[:, y_param_dim], s=5 * s, c="yellow")
ax.set_xlim(x_bounds[0], x_bounds[1])
ax.set_ylim(y_bounds[0], y_bounds[1])
if i == num_cols - 1:
ax.set_xlabel(x_param_name)
else:
# Remove redundant ticks on inner plots
ax.xaxis.set_visible(False)
if j > 0:
ax.yaxis.set_visible(False)
axes[i, 0].set_ylabel(y_param_name)
# Update norm limits for colour scaling for each axis:
for im in contour_sets:
im.set_clim(vmin=func_values_min, vmax=func_values_max)
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
cb = fig.colorbar(contour_sets[-1], cax=cbar_axes[0])
cb.set_label(output_label)
cbar_axes[0].yaxis.set_ticks_position("left")
if title is not None:
fig.suptitle(title)
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot2d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axes
def plot_calibration_curve(
predict_func: Callable, datasets: List[Dataset], labels: List[str]
) -> Tuple[plt.Figure, plt.Axes]: # pragma: no cover
"""Plot a calibration curve - the curve showing the percentage of points within each confidence interval around
the mean prediction from the model. This is useful for gauging how reliable uncertainty estimates from the model
are.
Args:
predict_func (Callable): A function taking an array of inputs and returning a tuple of two arrays: mean
prediction and variance of the predictive distribution (which is assumed to be Gaussian).
datasets (List[Dataset]): A list of datasets for which to plot calibration curves.
labels (List[str]): A list of labels for each dataset of the same length as datasets.
"""
fig, ax = plt.subplots(figsize=(7, 6))
with sns.axes_style("whitegrid"):
# Plot the individual calibration curves for each dataset
for i, dataset in enumerate(datasets):
_make_single_calibration_curve(predict_func, dataset.inputs_array, dataset.output_array, ax, labels[i])
plt.plot([0, 1], [0, 1], ":", color="gray", alpha=0.3, label="Ideal", zorder=-1)
ax.set_xlabel("Predictive Confidence Interval")
ax.set_ylabel("Percentage points within that interval")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend()
sns.despine()
return fig, ax
def _make_single_calibration_curve(
predict_func: Callable, x: np.ndarray, y: np.ndarray, ax: Optional[plt.Axes] = None, label: Optional[str] = None
) -> plt.Axes: # pragma: no cover
if ax is None:
fig, ax = plt.subplot(figsize=(5, 5)) # type: ignore
assert ax is not None
mean, variances = predict_func(x)
stds = np.sqrt(variances)
residuals = np.abs(y - mean)
# Normalised residuals is the number of standard deviations the observed point is away from mean prediction
normalised_residuals = residuals / stds
normalised_residuals = np.sort(normalised_residuals, axis=0).ravel()
# Convert num. of standard deviations from mean to confidence interval (centered around the mean)
confidence = scipy.stats.norm.cdf(normalised_residuals) - scipy.stats.norm.cdf( # type: ignore
-normalised_residuals
)
confidence = np.insert(confidence, 0, 0.0) # Insert a (0% confidence, 0% points) entry at start
# Percent points observed
perc_points = np.linspace(0, 1, len(confidence))
# Append the end-point of the curve (100% confidence, 100% points observed)
confidence = np.append(confidence, [1.0])
perc_points = np.append(perc_points, [1.0])
plt.step(confidence, perc_points, where="post", label=label, alpha=0.75, linewidth=3.0)
return ax
# noinspection PyTypeChecker
def _decorate_axis_predicted_against_observed(model: "BayesOptModel", ax: Axes) -> None: # pragma: no cover
"""Label and set limits of axes for a predictions against observed scatter-plots."""
ax.set_xlabel(f"Observed: {model.train.transformed_output_name}")
ax.set_ylabel(f"Predicted: {model.train.transformed_output_name}")
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lims = np.array([np.minimum(xlim[0], ylim[0]), np.maximum(xlim[1], ylim[1])])
ax.plot(lims, lims, "k--")
ax.fill_between(lims, lims - np.log10(2.0), lims + np.log10(2.0), color=(0.7, 0.7, 0.7), zorder=-1)
ax.fill_between(lims, lims - np.log10(3.0), lims + np.log10(3.0), color=(0.85, 0.85, 0.85), zorder=-2)
ax.set_xlim(lims)
ax.set_ylim(lims)
def plot_train_test_predictions(
model: "BayesOptModel",
category: Optional[str] = None,
ms: int = 10,
alpha: float = 0.25,
output_path: Optional[PathOrString] = None,
) -> None: # pragma: no cover
"""Plot predicted outputs against the actual observed outputs for the train-set and the test-set for this model.
If category is given, plot the points in different colour depending on the category.
Args:
model: Model to plot predictions of
category (optional): Which category to condition on. Points will be plotted with different colour
depending on the value of that category. Defaults to None (don't condition on any category).
ms (optional): Marker size. Defaults to 10.
alpha (optional): Opacity of plotted points. Defaults to 0.25.
output_path (optional): Path where to save the plot. Defaults to None.
TODO: There is an argument to be made that these plots should be made in the original,
rather than dataset, space if possible. One might want to compare the performance of the models from the plots
when different pre-processing steps are used for instance.
"""
axs: List[Axes]
# noinspection PyTypeChecker
f, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(10, 5)) # type: ignore
# Add the plot for train points on the first axis
train_labels = model.train.categorical_inputs_df[category] if category else None
_add_errorbar_predictions_against_observed(
ax=axs[0], model=model, dataset=model.train, labels=np.asarray(train_labels), ms=ms, alpha=alpha
)
# Add the plot for test points on the second axis
assert model.test is not None
test_labels = model.test.categorical_inputs_df[category] if category else None
_add_errorbar_predictions_against_observed(
ax=axs[1], model=model, dataset=model.test, labels=np.asarray(test_labels), ms=ms, alpha=alpha
)
if category is not None:
axs[0].legend()
for ax in axs:
_decorate_axis_predicted_against_observed(model, ax)
axs[0].set_title(f"Train ($r = {model.r_train:.3f}$)")
axs[1].set_title(f"Test ($r = {model.r_test:.3f}$)")
sns.despine()
# noinspection PyUnresolvedReferences
plt.tight_layout() # type: ignore
if output_path is not None:
f.savefig(output_path, bbox_inches="tight")
# noinspection PyArgumentList
plt.close() # type: ignore
def _add_errorbar_predictions_against_observed(
ax: Axes, model: "BayesOptModel", dataset: Dataset, labels: Optional[np.ndarray], ms: float, alpha: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: # pragma: no cover
"""Helper function to add errorbar plots of predicted outputs against the actual observed outputs on a given
dataset. The plot is added onto an axis give by argument ax.
"""
# Get the mean and variance of surrogate model predictions
y_mean, y_var = model.minus_predict(dataset.inputs_array)
y_std = np.sqrt(y_var)
# Get the true (observed) outputs
y_obs = dataset.output_array
if labels is not None:
unique_labels = np.unique(labels)
for label in unique_labels:
locs = np.where(labels == label)
ax.errorbar(y_obs[locs], y_mean[locs], y_std[locs], fmt=".", ms=ms, alpha=alpha, label=label)
else:
ax.errorbar(y_obs, y_mean, y_std, fmt=".", ms=ms, alpha=alpha) # pragma: no cover
return y_mean, y_std, y_obs
def plot_predictions_against_observed(
ax: Axes,
models: List["BayesOptModel"],
datasets: List[Dataset],
title: str,
category: Optional[str] = None,
by_file: bool = False,
ms: float = 10,
alpha: float = 0.25,
) -> float: # pragma: no cover
"""
Plot predicted outputs against the actual observed outputs for the datasets given (which could be the corresponding
test-sets of each of the cross-validation models).
If "category" is given, plot the points in different colour depending on the value of the categorical variable
"category".
Args:
ax: Axis to plot on
models: List of models to corresponding to each cross-validation fold
datasets: A list of same length as models with the corresponding datasets to evaluate each model on.
title: Title for the plot (e.g. "Cross-validation")
category (optional): Which category to condition on. Points will be plotted with different colour
depending on the value of that category. Defaults to None (don't condition on any category).
by_file (bool): Points will be plotted with colours reflecting the FILE identifier.
ms (optional): Marker size. Defaults to 10.
alpha (optional): Opacity of plotted points. Defaults to 0.25.
Returns:
Pearson correlation coefficient (combined for all folds)
"""
Y_pred, Y_obs = np.empty((0,)), np.empty((0,))
legend_title: Optional[str] = None
for model, dataset in zip(models, datasets):
if by_file: # pragma: no cover
labels = dataset.file
legend_title = FILE
elif category is not None:
labels = np.asarray(dataset.categorical_inputs_df[category])
legend_title = category
else: # pragma: no cover
labels = None
legend_title = None
y_mean_test, _, y_obs_test = _add_errorbar_predictions_against_observed(
ax=ax, model=model, dataset=dataset, labels=labels, ms=ms, alpha=alpha
)
Y_pred = np.append(Y_pred, y_mean_test)
Y_obs = np.append(Y_obs, y_obs_test)
if category:
ax.legend(title=legend_title)
# Compute Pearson's correlation
r = float(np.corrcoef(Y_pred, Y_obs)[1, 0])
_decorate_axis_predicted_against_observed(models[0], ax)
ax.set_title(f"{title} ($r = {r:.3f}$)")
# noinspection PyUnresolvedReferences
plt.tight_layout() # type: ignore
sns.despine(ax=ax)
return r
def hmc_traces(burnin: Axes, samples: Axes, fname: Optional[str] = None): # pragma: no cover
# noinspection PyTypeChecker
f, axs = plt.subplots(1, 2, sharey=True, figsize=(10, 4))
sample = "Sample"
burnin.plot(x=sample, ax=axs[0], title="Burn-in")
samples.plot(x=sample, ax=axs[1], title="Samples")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def hmc_samples(samples_dataframe: pd.DataFrame, fname: Optional[PathOrString] = None) -> None:
"""Visualise the samples of the parameters of a model with a collection of pair-wise scatter-plots.
Args:
samples_dataframe (pd.DataFrame): A DataFrame with each row of values corresponding the a single HMC sample
(these values can represent the parameter values, or log-likelihood of that sample for instance).
fname (Optional[PathOrString], optional): If given, where to save the plot. Defaults to None.
"""
f = sns.pairplot(samples_dataframe, diag_kind="kde")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def distance(x: np.ndarray) -> np.ndarray: # pragma: no cover
n = len(x)
d = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
# noinspection PyUnresolvedReferences
d[i, j] = np.linalg.norm(x[i] - x[j])
d[j, i] = d[i, j]
return d
# noinspection PyUnresolvedReferences
def experiment_distance(expt: np.ndarray, fname: Optional[str] = None) -> None: # pragma: no cover
n = np.shape(expt)[0]
f = plt.figure(figsize=(4.5, 4))
xx, yy = np.meshgrid(list(range(n)), list(range(n)))
d = distance(expt)
# TODO: d is an array of float, but c should be a list of color names.
# noinspection PyTypeChecker
plt.scatter(x=xx, y=yy, c=d, cmap="jet") # type: ignore
plt.gca().invert_yaxis()
experiment_id = "Experiment ID"
plt.xlabel(experiment_id)
plt.ylabel(experiment_id)
plt.colorbar(label="Euclidean distance")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def plot_pred_objective_for_batch(
batch: pd.DataFrame,
predict_func: Callable[[pd.DataFrame], Tuple[np.ndarray, np.ndarray, np.ndarray]],
bounds: "OrderedDict[str, Tuple[float, float]]",
dataset: Optional[Dataset] = None,
columns_to_plot: Optional[List[str]] = None,
units: Optional[List[Union[str, None]]] = None,
num_cols: int = 4,
input_scales: Optional[List[str]] = None,
output_scale: str = "linear",
fname: Optional[Path] = None,
output_label: str = "Output",
subplot_size: float = 3,
) -> None: # pragma: no cover
"""Plot the batch of points generated from the bayesopt procedure against each input dimension, together
with the objective value prediction from the model.
Args:
batch: DataFrame with the batch of inputs to plot model predictions for (could be in either the model input
space, or original "pretransform" input space)
predict_func: Function which returns the mean and lower and upper confidence bounds for the prediction at each
point. This is helpful if preprocessing needs to be applied to data before passing to a model.
bounds:
Constraints on the input-space for the Bayes. Opt. procedure to visualise.
dataset: Experiment data to plot alongside. Currently only plots in the original input space
num_cols: Maximum number of columns in the plot
input_scales (optional): Scales of each input dimension (log, symlog, linear...). Defaults to linear for all.
output_scale (optional): Scale of the output dimension. Defaults to "linear".
fname: Path to where to save the plot. Don't save it if None. Defaults to None.
output_label: The label for the y-axis. Defaults to 'Output'.
subplot_size: The size in inches of each individual subplot
"""
# Get model predictions at the experiment batch locations (mean, lower confidence bound, upper confidence bound)
mean_pred, lb_pred, ub_pred = predict_func(batch)
y_error = (mean_pred - lb_pred, ub_pred - mean_pred)
# If input_scales not specified, default all to 'linear'
input_scales = input_scales if input_scales else ["linear"] * len(bounds)
# Get the names of variables to plot
columns_to_plot = columns_to_plot if columns_to_plot else batch.columns # type: ignore
assert columns_to_plot is not None
units = units if units else [None] * len(columns_to_plot) # type: ignore
n_inputs = len(columns_to_plot)
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
nrows=num_rows, ncols=num_cols, sharey=True, figsize=(subplot_size * num_cols, subplot_size * num_rows)
)
# Ensure that axs are a 2d grid of axes, even if num_rows=1
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i, j]
input_idx = i * num_cols + j
if input_idx < n_inputs:
col = columns_to_plot[input_idx]
# Plot model predictions
ax.errorbar(
batch[col].values,
mean_pred,
yerr=y_error,
fmt="o",
color=colors[1],
markerfacecolor="w",
label="Model",
)
# Plot data in dataset
if dataset:
# TODO: This bit currently assumes that the plotting happens in pretransformed space.
dataset_x = dataset.pretransform_df[col].values
dataset_y = dataset.pretransform_df[dataset.pretransform_output_name].values
ax.scatter(dataset_x, dataset_y, s=3, c=colors[0], label="Data")
# Set axis scales
ax.set_yscale(output_scale)
ax.set_xscale(input_scales[input_idx])
axlabel = col + f" ({units[input_idx]})" if units[input_idx] else col
ax.set_xlabel(axlabel)
ax.axvspan(*bounds[col], ymax=0.1, color=colors[1], alpha=0.3, label="Bounds")
else:
ax.set_visible(False)
axs[i, 0].set_ylabel(output_label)
# noinspection PyUnresolvedReferences
lines, labels = fig.axes[0].get_legend_handles_labels()
fig.legend(lines, labels, loc=[0.82, 0.2])
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def acquisition1d(
model: "BayesOptModel",
x0: np.ndarray,
is_input_normalised: bool = True,
num_cols: int = 5,
num_xs: int = 101,
title: Optional[str] = None,
fname: Optional[Path] = None,
) -> None: # pragma: no cover
"""
Plot the acquisition function in 1d variations around a reference point
Args:
model: The model
x0 (numpy array): The reference point
is_input_normalised (bool): Whether the input data are normalised
num_cols (int): Number of columns in subplot grid
num_xs (int): Number of grid-points to calculate the input axis
title (str): Figure title
fname (str): Optional file path to save figure
"""
y0 = model.acquisition.evaluate(x0[np.newaxis]) # type: ignore
parameters = list(model.continuous_parameters.items())
n_inputs = len(parameters)
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
num_rows, num_cols, sharex=is_input_normalised, sharey=True, figsize=(3 * num_cols, 3 * num_rows)
)
# Ensure that axs are a 2d grid of axes, even if num_rows is 1
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i][j]
input_idx = i * num_cols + j
if input_idx < n_inputs:
pname, bounds = parameters[input_idx]
# make a grid of inputs along the input_idx dimension while keeping the other values same as in x0
cs = np.linspace(0, bounds[1], num_xs)
xs = np.tile(x0, (num_xs, 1))
xs[:, input_idx] = cs
ax.plot(cs, model.acquisition.evaluate(xs), "-") # type: ignore
ax.plot(x0[input_idx], y0.ravel(), marker="o", markerfacecolor="w", label="Model optimum")
if is_input_normalised:
ax.set_title(pname)
if i == num_rows - 1:
ax.set_xlabel(RELATIVE_VALUE)
else:
ax.set_xlabel(pname)
else:
ax.set_visible(False)
axs[i][0].set_ylabel("Acquisition")
if title is not None:
fig.suptitle(title)
else:
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
plt.close(fig)
def plot_acquisition_slices(
model: "BayesOptModel",
dataset: Dataset,
slice_loc: np.ndarray,
input_names: Optional[List[str]] = None,
input_scales: Optional[List[PLOT_SCALE]] = None,
onehot_context: Optional[Iterable[float]] = None,
output_label: str = "Acquisition Value",
fname: Optional[Path] = None,
) -> None: # pragma: no cover
"""
Plot 2d acquisition function slices through the input space. A wrapper
around plot_multidimensional_function_slices().
Args:
model: Model for which to plot slices through acquisition function.
dataset: Dataset with the preprocessing transform to apply to inputs.
slice_loc: Location at which to take slices in original space.
input_names: Names of the input variables in original space.
input_scales: Plotting scales for the inputs (linear, log, etc.).
onehot_context: If given, the onehot encoding of the categorical variables on which to condition
the slices (i.e. in all slices the input for categorical variables will be fixed to that value)
output_label: Label for the acquisition function output (can be acqusition function name).
fname: If given, the plot will be saved there.
"""
def acquisition_with_preprocessing(x: np.ndarray):
assert model.acquisition is not None
x_trans = dataset.preprocessing_transform(pd.DataFrame(x, columns=input_names))
x_trans = x_trans[dataset.transformed_input_names].values
if onehot_context:
x_trans = np.concatenate((x_trans, np.tile(onehot_context, [x_trans.shape[0], 1])), axis=1)
return model.acquisition.evaluate(x_trans) # type: ignore # auto
fig, _ = plot_multidimensional_function_slices(
func=acquisition_with_preprocessing,
slice_loc=slice_loc,
bounds=list(dataset.pretransform_cont_param_bounds.values()),
input_names=input_names,
input_scales=input_scales,
output_scale="linear",
output_label=output_label,
)
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
return
def plot_gpy_priors(
priors: List[GPy.priors.Prior], param_names: List[str], size: float = 3, allow_logspace: bool = False
) -> plt.Figure: # pragma: no cover
"""Visualise a set of priors on parameters (corresponding, for instance, to model parameters) by plotting
their density.
Args:
priors (List[GPy.priors.Prior]): A list of GPy.priors.Prior objects the PDFs of which will be drawn.
param_names (List[str]): List of names of parameters corresponding to the priors.
size (float, optional): Size (height) of each sub-plot in inches. Defaults to 3.
allow_logspace (bool, optional): Whether to plot a parameter in log-space if the parameter is strictly
positive. Defaults to False.
Returns:
plt.Figure: The figure
"""
assert len(param_names) == len(priors)
num_params = len(param_names)
fig, axes = plt.subplots(ncols=num_params, figsize=(1.3 * size * num_params, size))
colors = sns.color_palette("pastel", num_params)
with sns.axes_style("whitegrid"):
for i, (param_name, prior) in enumerate(zip(param_names, priors)):
samples = prior.rvs(1000) # type: ignore
xmin, xmax = np.percentile(samples, 0.1), np.percentile(samples, 99.9) # Remove outliers
if samples.min() > 0 and allow_logspace:
axes[i].set_xscale("log")
bins = np.geomspace(xmin, xmax, 6)
x_grid = np.geomspace(xmin, xmax, 100)
else:
bins = np.linspace(xmin, xmax, 50)
x_grid = np.linspace(xmin, xmax, 100)
axes[i].hist(samples, bins=bins, density=True, alpha=0.7, color=colors[i])
axes[i].plot(x_grid, prior.pdf(x_grid), linewidth=2.0, color=colors[i])
# Mark mean and +- standard deviation
samples_mean, samples_std = samples.mean(), samples.std()
axes[i].axvline(samples_mean, color="black", zorder=-1, label="Mean")
axes[i].axvspan(
samples_mean - samples_std,
samples_mean + samples_std,
color="gray",
zorder=-1,
alpha=0.2,
hatch="/",
label="Mean$\\pm$std.",
)
axes[i].set_title(param_name, fontsize=12)
axes[i].set_xlabel("$x$")
axes[i].set_xlim(xmin, xmax)
plt.legend()
axes[0].set_ylabel("$p(x)$")
plt.subplots_adjust(wspace=0.2)
return fig | PyStationB/libraries/ABEX/abex/plotting/bayesopt_plotting.py | from collections import OrderedDict
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Iterable, List, Optional, Tuple, Union
# Avoid spurious X windows errors, see:
# https://stackoverflow.com/questions/37604289/tkinter-tclerror-no-display-name-and-no-display-environment-variable
import GPy
import matplotlib.pyplot as plt # noqa: E402
import numpy as np
import pandas as pd
import scipy.stats
import seaborn as sns
from abex.constants import FILE
from abex.dataset import Dataset
from abex.plotting.composite_core import plot_multidimensional_function_slices
from abex.plotting.core import (
PLOT_SCALE,
calc_2d_slice,
make_lower_triangular_axis_grid_with_colorbar_axes,
plot_2d_slice_from_arrays,
)
from azureml.core import Run
from matplotlib.axes import Axes
from psbutils.type_annotations import PathOrString
if TYPE_CHECKING:
# Imports BayesOptModel for static type-checks only to get around circular import problem
from abex.bayesopt import BayesOptModel # pragma: no cover
RELATIVE_VALUE = "Relative value"
# noinspection PyUnresolvedReferences
colors = plt.rcParams["axes.prop_cycle"].by_key()["color"] # type: ignore
def loss_convergence(losses: List[List[float]], fname: Optional[str] = None) -> None: # pragma: no cover
f = plt.figure(figsize=(6, 4))
for i, loss in enumerate(losses):
iterations = len(loss)
plt.scatter(list(range(iterations)), loss, label=f"Fold {i}", s=3)
plt.legend()
plt.xlabel("Iteration")
plt.ylabel("Marginal log-likelihood")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def opt_distance(X, optx, j): # pragma: no cover
# noinspection PyUnresolvedReferences
Nd, n_inputs = np.shape(X)
Ij = np.eye(n_inputs)
Ij[j, j] = 0
d = np.zeros(Nd)
for i in range(Nd):
xd = X[i, :] - optx
d[i] = xd @ Ij @ xd
return d
def simulation_panel1d(
ax: Axes,
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
slice_dim: int,
bounds: Tuple[float, float],
slice_x: np.ndarray,
slice_y: Optional[float] = None,
resolution: int = 101,
color="b",
) -> Axes: # pragma: no cover
"""Make a plot of the predicted output (and +- one standard deviation) against one input (defined by slice_dim)
that is a slice through input space at a location defined by slice_x.
Optionally, mark the point [slice_x, slice_y], which, if the slice is plotted at a maximum of the model's
predictions, would be the maximum of the model predictions.
"""
# Get a grid of inputs for the continuous variable being varied across this slice
x_grid = np.linspace(bounds[0], bounds[1], resolution)
xs = np.tile(slice_x, (len(x_grid), 1))
xs[:, slice_dim] = x_grid
y_pred, y_var = predict_func(xs)
sigma = np.sqrt(y_var)
ax.plot(x_grid, y_pred, "-", label="Prediction", c=color)
ax.fill_between(
x_grid,
y_pred - sigma,
y_pred + sigma,
alpha=0.25,
fc=color,
ec="None",
label="68% confidence interval",
)
ax.set_xlim(bounds[0], bounds[1])
if slice_y is not None:
ax.plot(slice_x[slice_dim], slice_y, "o", markeredgecolor=color, markerfacecolor="w", label="Optimum")
else:
ax.axvline(slice_x[slice_dim], alpha=0.2, linestyle="--")
return ax
def get_logged_img_title(title: Optional[str] = None, fname: Optional[PathOrString] = None) -> str:
"""
Creates a title for logging plots on AML. If title is provided, that forms the base, otherwise use default.
If filename is provided, append the filename, which contains information about iteration and seed number.
Args:
title:
fname:
Returns:
"""
title = title or "plot"
if fname is not None:
assert isinstance(fname, Path)
title += f"_{fname.stem}"
return title
def plot_prediction_slices1d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
slice_y: Optional[float] = None,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: str = "Objective",
resolution: int = 100,
size: int = 3,
num_cols: Optional[int] = None,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
"""
Plot slices of the predictions from the model crossing a given location.
Args:
predict_func (Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]]): A function taking an input and returning
mean and variance of the predictive distribution at those points.
parameter_space (OrderedDict[str, Tuple[float, float]]): An ordered dictionary mapping input names to bounds.
slice_loc (np.ndarray): The point through which to plot the slices of the predictive distribution.
slice_y (Optional[float], optional): The output value the the slice location. Defaults to None.
scatter_x (Optional[np.ndarray], optional): Points which to scatter on the plot (project onto the slices).
If given, scatter_y must be specified as well. Defaults to None.
scatter_y (Optional[np.ndarray], optional): Output values corresponding to scatter_x. Defaults to None.
output_label (str, optional): Label for the output axis. Defaults to "Objective".
resolution (int, optional): Resolution (num. points) for the grid of input points along each slice.
Defaults to 100.
size (int, optional): Size of each axis with one slice in inches. Defaults to 3.
num_cols (Optional[int], optional): Maximum number of columns. If more slices, the axes will wrap.
Defaults to num_cols = ceil(sqrt(num_input_dims)).
title (Optional[str], optional): Title for the plot. Defaults to None.
fname (Optional[PathOrString], optional): File-name where to save the plot. Defaults to None.
"""
parameters = list(parameter_space.items())
n_inputs = len(parameter_space)
num_cols = num_cols if num_cols else int(np.ceil(np.sqrt(n_inputs)))
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
nrows=num_rows,
ncols=num_cols,
sharey=True,
figsize=(size * num_cols, size * num_rows),
)
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i, j]
slice_dim = i * num_cols + j
if slice_dim < n_inputs:
param_name, bounds = parameters[slice_dim]
simulation_panel1d(
ax=ax,
predict_func=predict_func,
slice_x=slice_loc,
slice_y=slice_y,
bounds=bounds,
slice_dim=slice_dim,
color=colors[0],
resolution=resolution,
)
# Scatter-plot data points if points to scatter given
if scatter_x is not None and scatter_y is not None:
ax.scatter(scatter_x[:, slice_dim], scatter_y, s=3, c=colors[1])
ax.set_xlabel(param_name)
else:
ax.set_visible(False)
axs[i, 0].set_ylabel(output_label)
if title is not None:
fig.suptitle(title)
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot1d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
# If filename given, save
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axs
def plot_prediction_slices2d(
predict_func: Callable[[np.ndarray], Tuple[np.ndarray, np.ndarray]],
parameter_space: "OrderedDict[str, Tuple[float, float]]",
slice_loc: np.ndarray,
scatter_x: Optional[np.ndarray] = None,
scatter_y: Optional[np.ndarray] = None,
output_label: Optional[str] = None,
resolution: int = 100,
size: int = 3,
title: Optional[str] = None,
fname: Optional[PathOrString] = None,
) -> Tuple[plt.Figure, np.ndarray]: # pragma: no cover
parameters = list(parameter_space.items())
n_inputs = len(parameters)
assert n_inputs >= 2, "At least two input dimensions are required to plots 2d slice"
# Keep a running minimum and maximum of function values in 2D slices
func_values_min, func_values_max = np.inf, -np.inf
# Keep track of contour sets returned for each axis
contour_sets = []
num_cols = n_inputs - 1 # Number of rows of axes equals number of columns
# Construct axes
# noinspection PyTypeChecker
fig = plt.figure(figsize=(size * num_cols, size * num_cols))
axes, cbar_axes = make_lower_triangular_axis_grid_with_colorbar_axes(fig=fig, num_cols=num_cols, num_colorbars=1)
for i in range(num_cols): # i iterates over the rows of the plots
y_param_dim = i + 1
y_param_name, y_bounds = parameters[y_param_dim]
for j in range(num_cols): # j iterates over the columns of the plots
ax = axes[i, j]
if j <= i:
# Indices of the inputs to plot
x_param_dim = j
x_param_name, x_bounds = parameters[x_param_dim]
# Compute the data for the 2D slice plot
xx, yy, func_values_slice = calc_2d_slice(
func=lambda x: predict_func(x)[0], # Only interested in the mean of the prediction
dim_x=x_param_dim,
dim_y=y_param_dim,
slice_loc=slice_loc,
slice_bounds_x=x_bounds,
slice_bounds_y=y_bounds,
resolution=resolution,
)
# Plot the 2D slice
_, contour_set = plot_2d_slice_from_arrays(xx, yy, func_values_slice, ax=ax, plot_type="contourf")
contour_sets.append(contour_set)
# Keep a running minimum and maximum of function values in slices
func_values_min = min(func_values_min, func_values_slice.min()) # type: ignore
func_values_max = max(func_values_max, func_values_slice.max()) # type: ignore
# Scatter-plot the data
if scatter_x is not None and scatter_y is not None:
if len(scatter_y) > 0:
s = (scatter_y - np.min(scatter_y)) / np.max(scatter_y) + 1
ax.scatter(scatter_x[:, x_param_dim], scatter_x[:, y_param_dim], s=5 * s, c="yellow")
ax.set_xlim(x_bounds[0], x_bounds[1])
ax.set_ylim(y_bounds[0], y_bounds[1])
if i == num_cols - 1:
ax.set_xlabel(x_param_name)
else:
# Remove redundant ticks on inner plots
ax.xaxis.set_visible(False)
if j > 0:
ax.yaxis.set_visible(False)
axes[i, 0].set_ylabel(y_param_name)
# Update norm limits for colour scaling for each axis:
for im in contour_sets:
im.set_clim(vmin=func_values_min, vmax=func_values_max)
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
cb = fig.colorbar(contour_sets[-1], cax=cbar_axes[0])
cb.set_label(output_label)
cbar_axes[0].yaxis.set_ticks_position("left")
if title is not None:
fig.suptitle(title)
run = Run.get_context()
logged_img_title = get_logged_img_title(title="plot2d", fname=fname)
run.log_image(name=logged_img_title, plot=plt)
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
return fig, axes
def plot_calibration_curve(
predict_func: Callable, datasets: List[Dataset], labels: List[str]
) -> Tuple[plt.Figure, plt.Axes]: # pragma: no cover
"""Plot a calibration curve - the curve showing the percentage of points within each confidence interval around
the mean prediction from the model. This is useful for gauging how reliable uncertainty estimates from the model
are.
Args:
predict_func (Callable): A function taking an array of inputs and returning a tuple of two arrays: mean
prediction and variance of the predictive distribution (which is assumed to be Gaussian).
datasets (List[Dataset]): A list of datasets for which to plot calibration curves.
labels (List[str]): A list of labels for each dataset of the same length as datasets.
"""
fig, ax = plt.subplots(figsize=(7, 6))
with sns.axes_style("whitegrid"):
# Plot the individual calibration curves for each dataset
for i, dataset in enumerate(datasets):
_make_single_calibration_curve(predict_func, dataset.inputs_array, dataset.output_array, ax, labels[i])
plt.plot([0, 1], [0, 1], ":", color="gray", alpha=0.3, label="Ideal", zorder=-1)
ax.set_xlabel("Predictive Confidence Interval")
ax.set_ylabel("Percentage points within that interval")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.legend()
sns.despine()
return fig, ax
def _make_single_calibration_curve(
predict_func: Callable, x: np.ndarray, y: np.ndarray, ax: Optional[plt.Axes] = None, label: Optional[str] = None
) -> plt.Axes: # pragma: no cover
if ax is None:
fig, ax = plt.subplot(figsize=(5, 5)) # type: ignore
assert ax is not None
mean, variances = predict_func(x)
stds = np.sqrt(variances)
residuals = np.abs(y - mean)
# Normalised residuals is the number of standard deviations the observed point is away from mean prediction
normalised_residuals = residuals / stds
normalised_residuals = np.sort(normalised_residuals, axis=0).ravel()
# Convert num. of standard deviations from mean to confidence interval (centered around the mean)
confidence = scipy.stats.norm.cdf(normalised_residuals) - scipy.stats.norm.cdf( # type: ignore
-normalised_residuals
)
confidence = np.insert(confidence, 0, 0.0) # Insert a (0% confidence, 0% points) entry at start
# Percent points observed
perc_points = np.linspace(0, 1, len(confidence))
# Append the end-point of the curve (100% confidence, 100% points observed)
confidence = np.append(confidence, [1.0])
perc_points = np.append(perc_points, [1.0])
plt.step(confidence, perc_points, where="post", label=label, alpha=0.75, linewidth=3.0)
return ax
# noinspection PyTypeChecker
def _decorate_axis_predicted_against_observed(model: "BayesOptModel", ax: Axes) -> None: # pragma: no cover
"""Label and set limits of axes for a predictions against observed scatter-plots."""
ax.set_xlabel(f"Observed: {model.train.transformed_output_name}")
ax.set_ylabel(f"Predicted: {model.train.transformed_output_name}")
xlim = ax.get_xlim()
ylim = ax.get_ylim()
lims = np.array([np.minimum(xlim[0], ylim[0]), np.maximum(xlim[1], ylim[1])])
ax.plot(lims, lims, "k--")
ax.fill_between(lims, lims - np.log10(2.0), lims + np.log10(2.0), color=(0.7, 0.7, 0.7), zorder=-1)
ax.fill_between(lims, lims - np.log10(3.0), lims + np.log10(3.0), color=(0.85, 0.85, 0.85), zorder=-2)
ax.set_xlim(lims)
ax.set_ylim(lims)
def plot_train_test_predictions(
model: "BayesOptModel",
category: Optional[str] = None,
ms: int = 10,
alpha: float = 0.25,
output_path: Optional[PathOrString] = None,
) -> None: # pragma: no cover
"""Plot predicted outputs against the actual observed outputs for the train-set and the test-set for this model.
If category is given, plot the points in different colour depending on the category.
Args:
model: Model to plot predictions of
category (optional): Which category to condition on. Points will be plotted with different colour
depending on the value of that category. Defaults to None (don't condition on any category).
ms (optional): Marker size. Defaults to 10.
alpha (optional): Opacity of plotted points. Defaults to 0.25.
output_path (optional): Path where to save the plot. Defaults to None.
TODO: There is an argument to be made that these plots should be made in the original,
rather than dataset, space if possible. One might want to compare the performance of the models from the plots
when different pre-processing steps are used for instance.
"""
axs: List[Axes]
# noinspection PyTypeChecker
f, axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(10, 5)) # type: ignore
# Add the plot for train points on the first axis
train_labels = model.train.categorical_inputs_df[category] if category else None
_add_errorbar_predictions_against_observed(
ax=axs[0], model=model, dataset=model.train, labels=np.asarray(train_labels), ms=ms, alpha=alpha
)
# Add the plot for test points on the second axis
assert model.test is not None
test_labels = model.test.categorical_inputs_df[category] if category else None
_add_errorbar_predictions_against_observed(
ax=axs[1], model=model, dataset=model.test, labels=np.asarray(test_labels), ms=ms, alpha=alpha
)
if category is not None:
axs[0].legend()
for ax in axs:
_decorate_axis_predicted_against_observed(model, ax)
axs[0].set_title(f"Train ($r = {model.r_train:.3f}$)")
axs[1].set_title(f"Test ($r = {model.r_test:.3f}$)")
sns.despine()
# noinspection PyUnresolvedReferences
plt.tight_layout() # type: ignore
if output_path is not None:
f.savefig(output_path, bbox_inches="tight")
# noinspection PyArgumentList
plt.close() # type: ignore
def _add_errorbar_predictions_against_observed(
ax: Axes, model: "BayesOptModel", dataset: Dataset, labels: Optional[np.ndarray], ms: float, alpha: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: # pragma: no cover
"""Helper function to add errorbar plots of predicted outputs against the actual observed outputs on a given
dataset. The plot is added onto an axis give by argument ax.
"""
# Get the mean and variance of surrogate model predictions
y_mean, y_var = model.minus_predict(dataset.inputs_array)
y_std = np.sqrt(y_var)
# Get the true (observed) outputs
y_obs = dataset.output_array
if labels is not None:
unique_labels = np.unique(labels)
for label in unique_labels:
locs = np.where(labels == label)
ax.errorbar(y_obs[locs], y_mean[locs], y_std[locs], fmt=".", ms=ms, alpha=alpha, label=label)
else:
ax.errorbar(y_obs, y_mean, y_std, fmt=".", ms=ms, alpha=alpha) # pragma: no cover
return y_mean, y_std, y_obs
def plot_predictions_against_observed(
ax: Axes,
models: List["BayesOptModel"],
datasets: List[Dataset],
title: str,
category: Optional[str] = None,
by_file: bool = False,
ms: float = 10,
alpha: float = 0.25,
) -> float: # pragma: no cover
"""
Plot predicted outputs against the actual observed outputs for the datasets given (which could be the corresponding
test-sets of each of the cross-validation models).
If "category" is given, plot the points in different colour depending on the value of the categorical variable
"category".
Args:
ax: Axis to plot on
models: List of models to corresponding to each cross-validation fold
datasets: A list of same length as models with the corresponding datasets to evaluate each model on.
title: Title for the plot (e.g. "Cross-validation")
category (optional): Which category to condition on. Points will be plotted with different colour
depending on the value of that category. Defaults to None (don't condition on any category).
by_file (bool): Points will be plotted with colours reflecting the FILE identifier.
ms (optional): Marker size. Defaults to 10.
alpha (optional): Opacity of plotted points. Defaults to 0.25.
Returns:
Pearson correlation coefficient (combined for all folds)
"""
Y_pred, Y_obs = np.empty((0,)), np.empty((0,))
legend_title: Optional[str] = None
for model, dataset in zip(models, datasets):
if by_file: # pragma: no cover
labels = dataset.file
legend_title = FILE
elif category is not None:
labels = np.asarray(dataset.categorical_inputs_df[category])
legend_title = category
else: # pragma: no cover
labels = None
legend_title = None
y_mean_test, _, y_obs_test = _add_errorbar_predictions_against_observed(
ax=ax, model=model, dataset=dataset, labels=labels, ms=ms, alpha=alpha
)
Y_pred = np.append(Y_pred, y_mean_test)
Y_obs = np.append(Y_obs, y_obs_test)
if category:
ax.legend(title=legend_title)
# Compute Pearson's correlation
r = float(np.corrcoef(Y_pred, Y_obs)[1, 0])
_decorate_axis_predicted_against_observed(models[0], ax)
ax.set_title(f"{title} ($r = {r:.3f}$)")
# noinspection PyUnresolvedReferences
plt.tight_layout() # type: ignore
sns.despine(ax=ax)
return r
def hmc_traces(burnin: Axes, samples: Axes, fname: Optional[str] = None): # pragma: no cover
# noinspection PyTypeChecker
f, axs = plt.subplots(1, 2, sharey=True, figsize=(10, 4))
sample = "Sample"
burnin.plot(x=sample, ax=axs[0], title="Burn-in")
samples.plot(x=sample, ax=axs[1], title="Samples")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def hmc_samples(samples_dataframe: pd.DataFrame, fname: Optional[PathOrString] = None) -> None:
"""Visualise the samples of the parameters of a model with a collection of pair-wise scatter-plots.
Args:
samples_dataframe (pd.DataFrame): A DataFrame with each row of values corresponding the a single HMC sample
(these values can represent the parameter values, or log-likelihood of that sample for instance).
fname (Optional[PathOrString], optional): If given, where to save the plot. Defaults to None.
"""
f = sns.pairplot(samples_dataframe, diag_kind="kde")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def distance(x: np.ndarray) -> np.ndarray: # pragma: no cover
n = len(x)
d = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
# noinspection PyUnresolvedReferences
d[i, j] = np.linalg.norm(x[i] - x[j])
d[j, i] = d[i, j]
return d
# noinspection PyUnresolvedReferences
def experiment_distance(expt: np.ndarray, fname: Optional[str] = None) -> None: # pragma: no cover
n = np.shape(expt)[0]
f = plt.figure(figsize=(4.5, 4))
xx, yy = np.meshgrid(list(range(n)), list(range(n)))
d = distance(expt)
# TODO: d is an array of float, but c should be a list of color names.
# noinspection PyTypeChecker
plt.scatter(x=xx, y=yy, c=d, cmap="jet") # type: ignore
plt.gca().invert_yaxis()
experiment_id = "Experiment ID"
plt.xlabel(experiment_id)
plt.ylabel(experiment_id)
plt.colorbar(label="Euclidean distance")
sns.despine()
if fname is not None:
f.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def plot_pred_objective_for_batch(
batch: pd.DataFrame,
predict_func: Callable[[pd.DataFrame], Tuple[np.ndarray, np.ndarray, np.ndarray]],
bounds: "OrderedDict[str, Tuple[float, float]]",
dataset: Optional[Dataset] = None,
columns_to_plot: Optional[List[str]] = None,
units: Optional[List[Union[str, None]]] = None,
num_cols: int = 4,
input_scales: Optional[List[str]] = None,
output_scale: str = "linear",
fname: Optional[Path] = None,
output_label: str = "Output",
subplot_size: float = 3,
) -> None: # pragma: no cover
"""Plot the batch of points generated from the bayesopt procedure against each input dimension, together
with the objective value prediction from the model.
Args:
batch: DataFrame with the batch of inputs to plot model predictions for (could be in either the model input
space, or original "pretransform" input space)
predict_func: Function which returns the mean and lower and upper confidence bounds for the prediction at each
point. This is helpful if preprocessing needs to be applied to data before passing to a model.
bounds:
Constraints on the input-space for the Bayes. Opt. procedure to visualise.
dataset: Experiment data to plot alongside. Currently only plots in the original input space
num_cols: Maximum number of columns in the plot
input_scales (optional): Scales of each input dimension (log, symlog, linear...). Defaults to linear for all.
output_scale (optional): Scale of the output dimension. Defaults to "linear".
fname: Path to where to save the plot. Don't save it if None. Defaults to None.
output_label: The label for the y-axis. Defaults to 'Output'.
subplot_size: The size in inches of each individual subplot
"""
# Get model predictions at the experiment batch locations (mean, lower confidence bound, upper confidence bound)
mean_pred, lb_pred, ub_pred = predict_func(batch)
y_error = (mean_pred - lb_pred, ub_pred - mean_pred)
# If input_scales not specified, default all to 'linear'
input_scales = input_scales if input_scales else ["linear"] * len(bounds)
# Get the names of variables to plot
columns_to_plot = columns_to_plot if columns_to_plot else batch.columns # type: ignore
assert columns_to_plot is not None
units = units if units else [None] * len(columns_to_plot) # type: ignore
n_inputs = len(columns_to_plot)
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
nrows=num_rows, ncols=num_cols, sharey=True, figsize=(subplot_size * num_cols, subplot_size * num_rows)
)
# Ensure that axs are a 2d grid of axes, even if num_rows=1
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i, j]
input_idx = i * num_cols + j
if input_idx < n_inputs:
col = columns_to_plot[input_idx]
# Plot model predictions
ax.errorbar(
batch[col].values,
mean_pred,
yerr=y_error,
fmt="o",
color=colors[1],
markerfacecolor="w",
label="Model",
)
# Plot data in dataset
if dataset:
# TODO: This bit currently assumes that the plotting happens in pretransformed space.
dataset_x = dataset.pretransform_df[col].values
dataset_y = dataset.pretransform_df[dataset.pretransform_output_name].values
ax.scatter(dataset_x, dataset_y, s=3, c=colors[0], label="Data")
# Set axis scales
ax.set_yscale(output_scale)
ax.set_xscale(input_scales[input_idx])
axlabel = col + f" ({units[input_idx]})" if units[input_idx] else col
ax.set_xlabel(axlabel)
ax.axvspan(*bounds[col], ymax=0.1, color=colors[1], alpha=0.3, label="Bounds")
else:
ax.set_visible(False)
axs[i, 0].set_ylabel(output_label)
# noinspection PyUnresolvedReferences
lines, labels = fig.axes[0].get_legend_handles_labels()
fig.legend(lines, labels, loc=[0.82, 0.2])
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
# noinspection PyArgumentList
plt.close()
def acquisition1d(
model: "BayesOptModel",
x0: np.ndarray,
is_input_normalised: bool = True,
num_cols: int = 5,
num_xs: int = 101,
title: Optional[str] = None,
fname: Optional[Path] = None,
) -> None: # pragma: no cover
"""
Plot the acquisition function in 1d variations around a reference point
Args:
model: The model
x0 (numpy array): The reference point
is_input_normalised (bool): Whether the input data are normalised
num_cols (int): Number of columns in subplot grid
num_xs (int): Number of grid-points to calculate the input axis
title (str): Figure title
fname (str): Optional file path to save figure
"""
y0 = model.acquisition.evaluate(x0[np.newaxis]) # type: ignore
parameters = list(model.continuous_parameters.items())
n_inputs = len(parameters)
num_rows = int(np.ceil(n_inputs / num_cols))
# noinspection PyTypeChecker
fig, axs = plt.subplots(
num_rows, num_cols, sharex=is_input_normalised, sharey=True, figsize=(3 * num_cols, 3 * num_rows)
)
# Ensure that axs are a 2d grid of axes, even if num_rows is 1
axs = np.atleast_2d(axs) # type: ignore
for i in range(num_rows):
for j in range(num_cols):
ax = axs[i][j]
input_idx = i * num_cols + j
if input_idx < n_inputs:
pname, bounds = parameters[input_idx]
# make a grid of inputs along the input_idx dimension while keeping the other values same as in x0
cs = np.linspace(0, bounds[1], num_xs)
xs = np.tile(x0, (num_xs, 1))
xs[:, input_idx] = cs
ax.plot(cs, model.acquisition.evaluate(xs), "-") # type: ignore
ax.plot(x0[input_idx], y0.ravel(), marker="o", markerfacecolor="w", label="Model optimum")
if is_input_normalised:
ax.set_title(pname)
if i == num_rows - 1:
ax.set_xlabel(RELATIVE_VALUE)
else:
ax.set_xlabel(pname)
else:
ax.set_visible(False)
axs[i][0].set_ylabel("Acquisition")
if title is not None:
fig.suptitle(title)
else:
# noinspection PyUnresolvedReferences
plt.tight_layout()
sns.despine()
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
plt.close(fig)
def plot_acquisition_slices(
model: "BayesOptModel",
dataset: Dataset,
slice_loc: np.ndarray,
input_names: Optional[List[str]] = None,
input_scales: Optional[List[PLOT_SCALE]] = None,
onehot_context: Optional[Iterable[float]] = None,
output_label: str = "Acquisition Value",
fname: Optional[Path] = None,
) -> None: # pragma: no cover
"""
Plot 2d acquisition function slices through the input space. A wrapper
around plot_multidimensional_function_slices().
Args:
model: Model for which to plot slices through acquisition function.
dataset: Dataset with the preprocessing transform to apply to inputs.
slice_loc: Location at which to take slices in original space.
input_names: Names of the input variables in original space.
input_scales: Plotting scales for the inputs (linear, log, etc.).
onehot_context: If given, the onehot encoding of the categorical variables on which to condition
the slices (i.e. in all slices the input for categorical variables will be fixed to that value)
output_label: Label for the acquisition function output (can be acqusition function name).
fname: If given, the plot will be saved there.
"""
def acquisition_with_preprocessing(x: np.ndarray):
assert model.acquisition is not None
x_trans = dataset.preprocessing_transform(pd.DataFrame(x, columns=input_names))
x_trans = x_trans[dataset.transformed_input_names].values
if onehot_context:
x_trans = np.concatenate((x_trans, np.tile(onehot_context, [x_trans.shape[0], 1])), axis=1)
return model.acquisition.evaluate(x_trans) # type: ignore # auto
fig, _ = plot_multidimensional_function_slices(
func=acquisition_with_preprocessing,
slice_loc=slice_loc,
bounds=list(dataset.pretransform_cont_param_bounds.values()),
input_names=input_names,
input_scales=input_scales,
output_scale="linear",
output_label=output_label,
)
if fname is not None:
fig.savefig(fname, bbox_inches="tight")
return
def plot_gpy_priors(
priors: List[GPy.priors.Prior], param_names: List[str], size: float = 3, allow_logspace: bool = False
) -> plt.Figure: # pragma: no cover
"""Visualise a set of priors on parameters (corresponding, for instance, to model parameters) by plotting
their density.
Args:
priors (List[GPy.priors.Prior]): A list of GPy.priors.Prior objects the PDFs of which will be drawn.
param_names (List[str]): List of names of parameters corresponding to the priors.
size (float, optional): Size (height) of each sub-plot in inches. Defaults to 3.
allow_logspace (bool, optional): Whether to plot a parameter in log-space if the parameter is strictly
positive. Defaults to False.
Returns:
plt.Figure: The figure
"""
assert len(param_names) == len(priors)
num_params = len(param_names)
fig, axes = plt.subplots(ncols=num_params, figsize=(1.3 * size * num_params, size))
colors = sns.color_palette("pastel", num_params)
with sns.axes_style("whitegrid"):
for i, (param_name, prior) in enumerate(zip(param_names, priors)):
samples = prior.rvs(1000) # type: ignore
xmin, xmax = np.percentile(samples, 0.1), np.percentile(samples, 99.9) # Remove outliers
if samples.min() > 0 and allow_logspace:
axes[i].set_xscale("log")
bins = np.geomspace(xmin, xmax, 6)
x_grid = np.geomspace(xmin, xmax, 100)
else:
bins = np.linspace(xmin, xmax, 50)
x_grid = np.linspace(xmin, xmax, 100)
axes[i].hist(samples, bins=bins, density=True, alpha=0.7, color=colors[i])
axes[i].plot(x_grid, prior.pdf(x_grid), linewidth=2.0, color=colors[i])
# Mark mean and +- standard deviation
samples_mean, samples_std = samples.mean(), samples.std()
axes[i].axvline(samples_mean, color="black", zorder=-1, label="Mean")
axes[i].axvspan(
samples_mean - samples_std,
samples_mean + samples_std,
color="gray",
zorder=-1,
alpha=0.2,
hatch="/",
label="Mean$\\pm$std.",
)
axes[i].set_title(param_name, fontsize=12)
axes[i].set_xlabel("$x$")
axes[i].set_xlim(xmin, xmax)
plt.legend()
axes[0].set_ylabel("$p(x)$")
plt.subplots_adjust(wspace=0.2)
return fig | 0.953708 | 0.561696 |
import ast
from collections.abc import Mapping
from typing import Union
import polyaxon_sdk
from marshmallow import fields, validate, validates_schema
from marshmallow.exceptions import ValidationError
from polyaxon import types
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig, BaseOneOfSchema
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
np = None
# pylint:disable=redefined-outer-name
class PChoice(fields.Field):
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, (list, tuple)) and len(value) == 2:
if isinstance(value[1], float) and 0 <= value[1] < 1:
return value
raise ValidationError("This field expects a list of [value<Any>, dist<float>].")
class Range(fields.Field):
REQUIRED_KEYS = ["start", "stop", "step"]
OPTIONAL_KEY = None
KEYS = REQUIRED_KEYS
CHECK_ORDER = True
def _deserialize(
self, value, attr, data, **kwargs
): # pylint:disable=too-many-branches
if isinstance(value, str):
value = value.split(":")
elif isinstance(value, Mapping):
if set(self.REQUIRED_KEYS) - set(value.keys()):
raise ValidationError(
"{} dict must have {} keys {}.".format(
self.__class__.__name__,
len(self.REQUIRED_KEYS),
self.REQUIRED_KEYS,
)
)
if len(value) == len(self.REQUIRED_KEYS):
value = [value[k] for k in self.REQUIRED_KEYS]
elif len(value) == len(self.KEYS):
value = [value[k] for k in self.KEYS]
elif not isinstance(value, list):
raise ValidationError(
"{} accept values formatted as the following:\n"
" * str: {}\n"
" * dict: {}\n"
" * list: {}".format(
self.__class__.__name__,
":".join(self.REQUIRED_KEYS),
dict(
zip(
self.REQUIRED_KEYS,
["v{}".format(i) for i in range(len(self.REQUIRED_KEYS))],
)
),
self.REQUIRED_KEYS,
)
)
if len(value) != len(self.REQUIRED_KEYS) and len(value) != len(self.KEYS):
raise ValidationError(
"{} requires {} or {} elements received {}".format(
self.__class__.__name__,
len(self.REQUIRED_KEYS),
len(self.KEYS),
len(value),
)
)
for i, v in enumerate(value):
try:
float(v)
except (ValueError, TypeError):
raise ValidationError(
"{}: {} must of type int or float, received instead {}".format(
self.__class__.__name__, self.REQUIRED_KEYS[i], v
)
)
if not isinstance(v, (int, float)):
value[i] = ast.literal_eval(v)
# Check that lower value is smaller than higher value
if self.CHECK_ORDER and value[0] >= value[1]:
raise ValidationError(
"{key2} value must be strictly higher that {key1} value, "
"received instead {key1}: {val1}, {key2}: {val2}".format(
key1=self.REQUIRED_KEYS[0],
key2=self.REQUIRED_KEYS[1],
val1=value[0],
val2=value[1],
)
)
if len(self.REQUIRED_KEYS) == 3 and value[2] == 0:
raise ValidationError("{} cannot be 0".format(self.REQUIRED_KEYS[2]))
value = dict(zip(self.KEYS, value))
return value
class LinSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
KEYS = REQUIRED_KEYS
class GeomSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
KEYS = REQUIRED_KEYS
class LogSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
OPTIONAL_KEYS = ["base"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
def validate_pchoice(values):
dists = [v for v in values if v]
if sum(dists) > 1:
raise ValidationError("The distribution of different outcomes should sum to 1.")
def pchoice(values, size=None, rand_generator=None):
rand_generator = rand_generator or np.random
keys = [v[0] for v in values]
dists = [v[1] for v in values]
validate_pchoice(dists)
indices = rand_generator.multinomial(1, dists, size=size)
if size is None:
return keys[indices.argmax()]
return [keys[ind.argmax()] for ind in indices]
class Dist(Range):
CHECK_ORDER = False
class Uniform(Dist):
REQUIRED_KEYS = ["low", "high"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QUniform(Dist):
REQUIRED_KEYS = ["low", "high", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class LogUniform(Dist):
REQUIRED_KEYS = ["low", "high"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QLogUniform(Dist):
REQUIRED_KEYS = ["low", "high", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class Normal(Dist):
REQUIRED_KEYS = ["loc", "scale"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QNormal(Dist):
REQUIRED_KEYS = ["loc", "scale", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class LogNormal(Dist):
REQUIRED_KEYS = ["loc", "scale"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QLogNormal(Dist):
REQUIRED_KEYS = ["loc", "scale", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
def validate_matrix(values):
v = sum(map(lambda x: 1 if x else 0, values))
if v == 0 or v > 1:
raise ValidationError(
"Matrix element is not valid, one and only one option is required."
)
class BaseHpParamConfig(BaseConfig):
@staticmethod
def validate_io(io: "V1IO"): # noqa
if io.iotype not in [types.INT, types.FLOAT]:
raise ValidationError(
"Param `{}` has a an input type `{}` "
"and it does not correspond to hyper-param type `int or float`.".format(
io.name,
io.iotype,
)
)
return True
class HpChoiceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("choice"))
value = fields.List(fields.Raw(), allow_none=True)
@staticmethod
def schema_config():
return V1HpChoice
class V1HpChoice(BaseHpParamConfig, polyaxon_sdk.V1HpChoice):
"""`Choice` picks a value from a of list values.
```yaml
>>> params:
>>> paramTest:
>>> kind: choice
>>> value: [1, 2, 3, 4, 5]
```
```python
>>> from polyaxon.polyflow import V1HpChoice
>>> param_test = V1HpChoice(value=[1, 2, 3, 4, 5])
```
"""
SCHEMA = HpChoiceSchema
IDENTIFIER = "choice"
@staticmethod
def validate_io(io: "V1IO"): # noqa
return True
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return False
@property
def is_categorical(self):
return any(
[
v
for v in self.value
if not isinstance(v, (int, float, complex, np.integer, np.floating))
]
)
@property
def is_uniform(self):
return False
class HpPChoiceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("pchoice"))
value = fields.List(PChoice(), allow_none=True)
@staticmethod
def schema_config():
return V1HpPChoice
@validates_schema
def validate_pchoice(self, data, **kwargs):
if data.get("value"):
validate_pchoice(values=[v[1] for v in data["value"] if v])
class V1HpPChoice(BaseHpParamConfig, polyaxon_sdk.V1HpPChoice):
"""`PChoice` picks a value with a probability from a list of
[(value, probability), (value, probability), ...].
```yaml
>>> params:
>>> paramTest:
>>> kind: pchoice
>>> value: [(1, 0.1), (2, 0.1), (3, 0.8)]
```
```python
>>> from polyaxon.polyflow import V1HpPChoice
>>> param_test = V1HpPChoice(value=[("A", 0.1), ("B", 0.1), ("C", 0.8)])
```
"""
SCHEMA = HpPChoiceSchema
IDENTIFIER = "pchoice"
@staticmethod
def validate_io(io: "V1IO"): # noqa
return True
@property
def is_distribution(self):
return True
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpRangeSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("range"))
value = Range(allow_none=True)
@staticmethod
def schema_config():
return V1HpRange
class V1HpRange(BaseHpParamConfig, polyaxon_sdk.V1HpRange):
"""`Range` picks a value from a generated list of values using `[start, stop, step]`,
you can pass values in these forms:
* [1, 10, 2]
* {start: 1, stop: 10, step: 2}
* '1:10:2'
```yaml
>>> params:
>>> paramTest:
>>> kind: range
>>> value: [1, 10, 2]
```
```python
>>> from polyaxon.polyflow import V1HpRange
>>> param_test = V1HpRange(value=[1, 10, 2])
```
"""
SCHEMA = HpRangeSchema
IDENTIFIER = "range"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpLinSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("linspace"))
value = LinSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpLinSpace
class V1HpLinSpace(BaseHpParamConfig, polyaxon_sdk.V1HpLinSpace):
"""`LinSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a linear scale `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: linspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpLinSpace
>>> param_test = V1HpLinSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpLinSpaceSchema
IDENTIFIER = "linspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpLogSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("logspace"))
value = LogSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogSpace
class V1HpLogSpace(BaseHpParamConfig, polyaxon_sdk.V1HpLogSpace):
"""`LogSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a log scale `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: logspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpLogSpace
>>> param_test = V1HpLinSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpLogSpaceSchema
IDENTIFIER = "logspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpGeomSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("geomspace"))
value = GeomSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpGeomSpace
class V1HpGeomSpace(BaseHpParamConfig, polyaxon_sdk.V1HpGeomSpace):
"""`GeomSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a geometric progression `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: geomspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpGeomSpace
>>> param_test = V1HpGeomSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpGeomSpaceSchema
IDENTIFIER = "geomspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("uniform"))
value = Uniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpUniform
class V1HpUniform(BaseHpParamConfig, polyaxon_sdk.V1HpUniform):
"""`Uniform` draws samples from a uniform distribution over the half-open
interval `[low, high)`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'low': 0, 'high': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: uniform
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpUniform
>>> param_test = V1HpUniform(value=[0, 1])
```
"""
SCHEMA = HpUniformSchema
IDENTIFIER = "uniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return True
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("quniform"))
value = QUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpQUniform
class V1HpQUniform(BaseHpParamConfig, polyaxon_sdk.V1HpQUniform):
"""`QUniform` samples from a quantized uniform distribution over `[low, high]`
(`round(uniform(low, high) / q) * q`),
you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'low': 0, 'high': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: quniform
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQUniform
>>> param_test = V1HpQUniform(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQUniformSchema
IDENTIFIER = "quniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpLogUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("loguniform"))
value = LogUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogUniform
class V1HpLogUniform(BaseHpParamConfig, polyaxon_sdk.V1HpLogUniform):
"""`LogUniform` samples from a log uniform distribution over`[low, high]`,
you can pass values in these forms:
* 0:1
* [0, 1]
* {'low': 0, 'high': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: loguniform
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpLogUniform
>>> param_test = V1HpLogUniform(value=[0, 1])
```
"""
SCHEMA = HpLogUniformSchema
IDENTIFIER = "loguniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQLogUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qloguniform"))
value = QLogUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpQLogUniform
class V1HpQLogUniform(BaseHpParamConfig, polyaxon_sdk.V1HpQLogUniform):
"""`LogUniform` samples from a log uniform distribution over`[low, high]`,
you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'low': 0, 'high': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qloguniform
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQLogUniform
>>> param_test = V1HpQLogUniform(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQLogUniformSchema
IDENTIFIER = "qloguniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
@property
def min(self):
return None
class HpNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("normal"))
value = Normal(allow_none=True)
@staticmethod
def schema_config():
return V1HpNormal
class V1HpNormal(BaseHpParamConfig, polyaxon_sdk.V1HpNormal):
"""`Normal` draws random samples from a normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'loc': 0, 'scale': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: normal
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpNormal
>>> param_test = V1HpNormal(value=[0, 1])
```
"""
SCHEMA = HpNormalSchema
IDENTIFIER = "normal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qnormal"))
value = QNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpQNormal
class V1HpQNormal(BaseHpParamConfig, polyaxon_sdk.V1HpQNormal):
"""`QNormal` draws random samples from a quantized normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'loc': 0, 'scale': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qnormal
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQNormal
>>> param_test = V1HpNormal(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQNormalSchema
IDENTIFIER = "qnormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpLogNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("lognormal"))
value = LogNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogNormal
class V1HpLogNormal(BaseHpParamConfig, polyaxon_sdk.V1HpLogNormal):
"""`LogNormal` draws random samples from a log normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'loc': 0, 'scale': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: lognormal
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpLogNormal
>>> param_test = V1HpLogNormal(value=[0, 1])
```
"""
SCHEMA = HpLogNormalSchema
IDENTIFIER = "lognormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQLogNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qlognormal"))
value = QLogNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpQLogNormal
class V1HpQLogNormal(BaseHpParamConfig, polyaxon_sdk.V1HpQLogNormal):
"""`QLogNormal` draws random samples from a log normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'loc': 0, 'scale': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qlognormal
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQLogNormal
>>> param_test = V1HpQLogNormal(value=[0, 1])
```
"""
SCHEMA = HpQLogNormalSchema
IDENTIFIER = "qlognormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpParamSchema(BaseOneOfSchema):
TYPE_FIELD = "kind"
TYPE_FIELD_REMOVE = False
SCHEMAS = {
V1HpChoice.IDENTIFIER: HpChoiceSchema,
V1HpPChoice.IDENTIFIER: HpPChoiceSchema,
V1HpRange.IDENTIFIER: HpRangeSchema,
V1HpLinSpace.IDENTIFIER: HpLinSpaceSchema,
V1HpLogSpace.IDENTIFIER: HpLogSpaceSchema,
V1HpGeomSpace.IDENTIFIER: HpGeomSpaceSchema,
V1HpUniform.IDENTIFIER: HpUniformSchema,
V1HpQUniform.IDENTIFIER: HpQUniformSchema,
V1HpLogUniform.IDENTIFIER: HpLogUniformSchema,
V1HpQLogUniform.IDENTIFIER: HpQLogUniformSchema,
V1HpNormal.IDENTIFIER: HpNormalSchema,
V1HpQNormal.IDENTIFIER: HpQNormalSchema,
V1HpLogNormal.IDENTIFIER: HpLogNormalSchema,
V1HpQLogNormal.IDENTIFIER: HpQLogNormalSchema,
}
V1HpParam = Union[
V1HpChoice,
V1HpPChoice,
V1HpRange,
V1HpLinSpace,
V1HpLogSpace,
V1HpGeomSpace,
V1HpUniform,
V1HpQUniform,
V1HpLogUniform,
V1HpQLogUniform,
V1HpNormal,
V1HpQNormal,
V1HpLogNormal,
V1HpQLogNormal,
] | core/polyaxon/polyflow/matrix/params.py |
import ast
from collections.abc import Mapping
from typing import Union
import polyaxon_sdk
from marshmallow import fields, validate, validates_schema
from marshmallow.exceptions import ValidationError
from polyaxon import types
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig, BaseOneOfSchema
try:
import numpy as np
except (ImportError, ModuleNotFoundError):
np = None
# pylint:disable=redefined-outer-name
class PChoice(fields.Field):
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, (list, tuple)) and len(value) == 2:
if isinstance(value[1], float) and 0 <= value[1] < 1:
return value
raise ValidationError("This field expects a list of [value<Any>, dist<float>].")
class Range(fields.Field):
REQUIRED_KEYS = ["start", "stop", "step"]
OPTIONAL_KEY = None
KEYS = REQUIRED_KEYS
CHECK_ORDER = True
def _deserialize(
self, value, attr, data, **kwargs
): # pylint:disable=too-many-branches
if isinstance(value, str):
value = value.split(":")
elif isinstance(value, Mapping):
if set(self.REQUIRED_KEYS) - set(value.keys()):
raise ValidationError(
"{} dict must have {} keys {}.".format(
self.__class__.__name__,
len(self.REQUIRED_KEYS),
self.REQUIRED_KEYS,
)
)
if len(value) == len(self.REQUIRED_KEYS):
value = [value[k] for k in self.REQUIRED_KEYS]
elif len(value) == len(self.KEYS):
value = [value[k] for k in self.KEYS]
elif not isinstance(value, list):
raise ValidationError(
"{} accept values formatted as the following:\n"
" * str: {}\n"
" * dict: {}\n"
" * list: {}".format(
self.__class__.__name__,
":".join(self.REQUIRED_KEYS),
dict(
zip(
self.REQUIRED_KEYS,
["v{}".format(i) for i in range(len(self.REQUIRED_KEYS))],
)
),
self.REQUIRED_KEYS,
)
)
if len(value) != len(self.REQUIRED_KEYS) and len(value) != len(self.KEYS):
raise ValidationError(
"{} requires {} or {} elements received {}".format(
self.__class__.__name__,
len(self.REQUIRED_KEYS),
len(self.KEYS),
len(value),
)
)
for i, v in enumerate(value):
try:
float(v)
except (ValueError, TypeError):
raise ValidationError(
"{}: {} must of type int or float, received instead {}".format(
self.__class__.__name__, self.REQUIRED_KEYS[i], v
)
)
if not isinstance(v, (int, float)):
value[i] = ast.literal_eval(v)
# Check that lower value is smaller than higher value
if self.CHECK_ORDER and value[0] >= value[1]:
raise ValidationError(
"{key2} value must be strictly higher that {key1} value, "
"received instead {key1}: {val1}, {key2}: {val2}".format(
key1=self.REQUIRED_KEYS[0],
key2=self.REQUIRED_KEYS[1],
val1=value[0],
val2=value[1],
)
)
if len(self.REQUIRED_KEYS) == 3 and value[2] == 0:
raise ValidationError("{} cannot be 0".format(self.REQUIRED_KEYS[2]))
value = dict(zip(self.KEYS, value))
return value
class LinSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
KEYS = REQUIRED_KEYS
class GeomSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
KEYS = REQUIRED_KEYS
class LogSpace(Range):
REQUIRED_KEYS = ["start", "stop", "num"]
OPTIONAL_KEYS = ["base"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
def validate_pchoice(values):
dists = [v for v in values if v]
if sum(dists) > 1:
raise ValidationError("The distribution of different outcomes should sum to 1.")
def pchoice(values, size=None, rand_generator=None):
rand_generator = rand_generator or np.random
keys = [v[0] for v in values]
dists = [v[1] for v in values]
validate_pchoice(dists)
indices = rand_generator.multinomial(1, dists, size=size)
if size is None:
return keys[indices.argmax()]
return [keys[ind.argmax()] for ind in indices]
class Dist(Range):
CHECK_ORDER = False
class Uniform(Dist):
REQUIRED_KEYS = ["low", "high"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QUniform(Dist):
REQUIRED_KEYS = ["low", "high", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class LogUniform(Dist):
REQUIRED_KEYS = ["low", "high"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QLogUniform(Dist):
REQUIRED_KEYS = ["low", "high", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class Normal(Dist):
REQUIRED_KEYS = ["loc", "scale"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QNormal(Dist):
REQUIRED_KEYS = ["loc", "scale", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class LogNormal(Dist):
REQUIRED_KEYS = ["loc", "scale"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
class QLogNormal(Dist):
REQUIRED_KEYS = ["loc", "scale", "q"]
OPTIONAL_KEYS = ["size"]
KEYS = REQUIRED_KEYS + OPTIONAL_KEYS
def validate_matrix(values):
v = sum(map(lambda x: 1 if x else 0, values))
if v == 0 or v > 1:
raise ValidationError(
"Matrix element is not valid, one and only one option is required."
)
class BaseHpParamConfig(BaseConfig):
@staticmethod
def validate_io(io: "V1IO"): # noqa
if io.iotype not in [types.INT, types.FLOAT]:
raise ValidationError(
"Param `{}` has a an input type `{}` "
"and it does not correspond to hyper-param type `int or float`.".format(
io.name,
io.iotype,
)
)
return True
class HpChoiceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("choice"))
value = fields.List(fields.Raw(), allow_none=True)
@staticmethod
def schema_config():
return V1HpChoice
class V1HpChoice(BaseHpParamConfig, polyaxon_sdk.V1HpChoice):
"""`Choice` picks a value from a of list values.
```yaml
>>> params:
>>> paramTest:
>>> kind: choice
>>> value: [1, 2, 3, 4, 5]
```
```python
>>> from polyaxon.polyflow import V1HpChoice
>>> param_test = V1HpChoice(value=[1, 2, 3, 4, 5])
```
"""
SCHEMA = HpChoiceSchema
IDENTIFIER = "choice"
@staticmethod
def validate_io(io: "V1IO"): # noqa
return True
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return False
@property
def is_categorical(self):
return any(
[
v
for v in self.value
if not isinstance(v, (int, float, complex, np.integer, np.floating))
]
)
@property
def is_uniform(self):
return False
class HpPChoiceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("pchoice"))
value = fields.List(PChoice(), allow_none=True)
@staticmethod
def schema_config():
return V1HpPChoice
@validates_schema
def validate_pchoice(self, data, **kwargs):
if data.get("value"):
validate_pchoice(values=[v[1] for v in data["value"] if v])
class V1HpPChoice(BaseHpParamConfig, polyaxon_sdk.V1HpPChoice):
"""`PChoice` picks a value with a probability from a list of
[(value, probability), (value, probability), ...].
```yaml
>>> params:
>>> paramTest:
>>> kind: pchoice
>>> value: [(1, 0.1), (2, 0.1), (3, 0.8)]
```
```python
>>> from polyaxon.polyflow import V1HpPChoice
>>> param_test = V1HpPChoice(value=[("A", 0.1), ("B", 0.1), ("C", 0.8)])
```
"""
SCHEMA = HpPChoiceSchema
IDENTIFIER = "pchoice"
@staticmethod
def validate_io(io: "V1IO"): # noqa
return True
@property
def is_distribution(self):
return True
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpRangeSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("range"))
value = Range(allow_none=True)
@staticmethod
def schema_config():
return V1HpRange
class V1HpRange(BaseHpParamConfig, polyaxon_sdk.V1HpRange):
"""`Range` picks a value from a generated list of values using `[start, stop, step]`,
you can pass values in these forms:
* [1, 10, 2]
* {start: 1, stop: 10, step: 2}
* '1:10:2'
```yaml
>>> params:
>>> paramTest:
>>> kind: range
>>> value: [1, 10, 2]
```
```python
>>> from polyaxon.polyflow import V1HpRange
>>> param_test = V1HpRange(value=[1, 10, 2])
```
"""
SCHEMA = HpRangeSchema
IDENTIFIER = "range"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpLinSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("linspace"))
value = LinSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpLinSpace
class V1HpLinSpace(BaseHpParamConfig, polyaxon_sdk.V1HpLinSpace):
"""`LinSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a linear scale `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: linspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpLinSpace
>>> param_test = V1HpLinSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpLinSpaceSchema
IDENTIFIER = "linspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpLogSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("logspace"))
value = LogSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogSpace
class V1HpLogSpace(BaseHpParamConfig, polyaxon_sdk.V1HpLogSpace):
"""`LogSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a log scale `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: logspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpLogSpace
>>> param_test = V1HpLinSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpLogSpaceSchema
IDENTIFIER = "logspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpGeomSpaceSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("geomspace"))
value = GeomSpace(allow_none=True)
@staticmethod
def schema_config():
return V1HpGeomSpace
class V1HpGeomSpace(BaseHpParamConfig, polyaxon_sdk.V1HpGeomSpace):
"""`GeomSpace` picks a value from a generated list of steps from start to stop spaced evenly
on a geometric progression `[start, stop, step]`, you can pass values in these forms:
* [1, 10, 20]
* {start: 1, stop: 10, num: 20}
* '1:10:20'
```yaml
>>> params:
>>> paramTest:
>>> kind: geomspace
>>> value: [1, 10, 20]
```
```python
>>> from polyaxon.polyflow import V1HpGeomSpace
>>> param_test = V1HpGeomSpace(value=[1, 10, 20])
```
"""
SCHEMA = HpGeomSpaceSchema
IDENTIFIER = "geomspace"
@property
def is_distribution(self):
return False
@property
def is_continuous(self):
return False
@property
def is_discrete(self):
return True
@property
def is_range(self):
return True
@property
def is_categorical(self):
return False
@property
def is_uniform(self):
return False
class HpUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("uniform"))
value = Uniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpUniform
class V1HpUniform(BaseHpParamConfig, polyaxon_sdk.V1HpUniform):
"""`Uniform` draws samples from a uniform distribution over the half-open
interval `[low, high)`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'low': 0, 'high': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: uniform
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpUniform
>>> param_test = V1HpUniform(value=[0, 1])
```
"""
SCHEMA = HpUniformSchema
IDENTIFIER = "uniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return True
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("quniform"))
value = QUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpQUniform
class V1HpQUniform(BaseHpParamConfig, polyaxon_sdk.V1HpQUniform):
"""`QUniform` samples from a quantized uniform distribution over `[low, high]`
(`round(uniform(low, high) / q) * q`),
you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'low': 0, 'high': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: quniform
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQUniform
>>> param_test = V1HpQUniform(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQUniformSchema
IDENTIFIER = "quniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpLogUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("loguniform"))
value = LogUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogUniform
class V1HpLogUniform(BaseHpParamConfig, polyaxon_sdk.V1HpLogUniform):
"""`LogUniform` samples from a log uniform distribution over`[low, high]`,
you can pass values in these forms:
* 0:1
* [0, 1]
* {'low': 0, 'high': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: loguniform
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpLogUniform
>>> param_test = V1HpLogUniform(value=[0, 1])
```
"""
SCHEMA = HpLogUniformSchema
IDENTIFIER = "loguniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQLogUniformSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qloguniform"))
value = QLogUniform(allow_none=True)
@staticmethod
def schema_config():
return V1HpQLogUniform
class V1HpQLogUniform(BaseHpParamConfig, polyaxon_sdk.V1HpQLogUniform):
"""`LogUniform` samples from a log uniform distribution over`[low, high]`,
you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'low': 0, 'high': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qloguniform
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQLogUniform
>>> param_test = V1HpQLogUniform(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQLogUniformSchema
IDENTIFIER = "qloguniform"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
@property
def min(self):
return None
class HpNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("normal"))
value = Normal(allow_none=True)
@staticmethod
def schema_config():
return V1HpNormal
class V1HpNormal(BaseHpParamConfig, polyaxon_sdk.V1HpNormal):
"""`Normal` draws random samples from a normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'loc': 0, 'scale': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: normal
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpNormal
>>> param_test = V1HpNormal(value=[0, 1])
```
"""
SCHEMA = HpNormalSchema
IDENTIFIER = "normal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qnormal"))
value = QNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpQNormal
class V1HpQNormal(BaseHpParamConfig, polyaxon_sdk.V1HpQNormal):
"""`QNormal` draws random samples from a quantized normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'loc': 0, 'scale': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qnormal
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQNormal
>>> param_test = V1HpNormal(value=[0, 1, 0.1])
```
"""
SCHEMA = HpQNormalSchema
IDENTIFIER = "qnormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpLogNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("lognormal"))
value = LogNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpLogNormal
class V1HpLogNormal(BaseHpParamConfig, polyaxon_sdk.V1HpLogNormal):
"""`LogNormal` draws random samples from a log normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1
* [0, 1]
* {'loc': 0, 'scale': 1}
```yaml
>>> params:
>>> paramTest:
>>> kind: lognormal
>>> value: [0, 1]
```
```python
>>> from polyaxon.polyflow import V1HpLogNormal
>>> param_test = V1HpLogNormal(value=[0, 1])
```
"""
SCHEMA = HpLogNormalSchema
IDENTIFIER = "lognormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpQLogNormalSchema(BaseCamelSchema):
kind = fields.Str(allow_none=True, validate=validate.Equal("qlognormal"))
value = QLogNormal(allow_none=True)
@staticmethod
def schema_config():
return V1HpQLogNormal
class V1HpQLogNormal(BaseHpParamConfig, polyaxon_sdk.V1HpQLogNormal):
"""`QLogNormal` draws random samples from a log normal (Gaussian) distribution defined by
`[loc, scale]`, you can pass values in these forms:
* 0:1:0.1
* [0, 1, 0.1]
* {'loc': 0, 'scale': 1, 'q': 0.1}
```yaml
>>> params:
>>> paramTest:
>>> kind: qlognormal
>>> value: [0, 1, 0.1]
```
```python
>>> from polyaxon.polyflow import V1HpQLogNormal
>>> param_test = V1HpQLogNormal(value=[0, 1])
```
"""
SCHEMA = HpQLogNormalSchema
IDENTIFIER = "qlognormal"
@property
def is_distribution(self):
return True
@property
def is_uniform(self):
return False
@property
def is_continuous(self):
return True
@property
def is_discrete(self):
return False
@property
def is_range(self):
return False
@property
def is_categorical(self):
return False
class HpParamSchema(BaseOneOfSchema):
TYPE_FIELD = "kind"
TYPE_FIELD_REMOVE = False
SCHEMAS = {
V1HpChoice.IDENTIFIER: HpChoiceSchema,
V1HpPChoice.IDENTIFIER: HpPChoiceSchema,
V1HpRange.IDENTIFIER: HpRangeSchema,
V1HpLinSpace.IDENTIFIER: HpLinSpaceSchema,
V1HpLogSpace.IDENTIFIER: HpLogSpaceSchema,
V1HpGeomSpace.IDENTIFIER: HpGeomSpaceSchema,
V1HpUniform.IDENTIFIER: HpUniformSchema,
V1HpQUniform.IDENTIFIER: HpQUniformSchema,
V1HpLogUniform.IDENTIFIER: HpLogUniformSchema,
V1HpQLogUniform.IDENTIFIER: HpQLogUniformSchema,
V1HpNormal.IDENTIFIER: HpNormalSchema,
V1HpQNormal.IDENTIFIER: HpQNormalSchema,
V1HpLogNormal.IDENTIFIER: HpLogNormalSchema,
V1HpQLogNormal.IDENTIFIER: HpQLogNormalSchema,
}
V1HpParam = Union[
V1HpChoice,
V1HpPChoice,
V1HpRange,
V1HpLinSpace,
V1HpLogSpace,
V1HpGeomSpace,
V1HpUniform,
V1HpQUniform,
V1HpLogUniform,
V1HpQLogUniform,
V1HpNormal,
V1HpQNormal,
V1HpLogNormal,
V1HpQLogNormal,
] | 0.716715 | 0.254202 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ActorResponseResult',
'EventContentResponseResult',
'EventRequestMessageResponseResult',
'EventResponseResult',
'EventResponseMessageResponseResult',
'RegistryPasswordResponseResult',
'RequestResponseResult',
'SkuResponse',
'SourceResponseResult',
'StatusResponse',
'StorageAccountPropertiesResponse',
'TargetResponseResult',
]
@pulumi.output_type
class ActorResponseResult(dict):
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
:param str name: The subject or username associated with the request context that generated the event.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The subject or username associated with the request context that generated the event.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class EventContentResponseResult(dict):
"""
The content of the event request message.
"""
def __init__(__self__, *,
action: Optional[str] = None,
actor: Optional['outputs.ActorResponseResult'] = None,
id: Optional[str] = None,
request: Optional['outputs.RequestResponseResult'] = None,
source: Optional['outputs.SourceResponseResult'] = None,
target: Optional['outputs.TargetResponseResult'] = None,
timestamp: Optional[str] = None):
"""
The content of the event request message.
:param str action: The action that encompasses the provided event.
:param 'ActorResponseArgs' actor: The agent that initiated the event. For most situations, this could be from the authorization context of the request.
:param str id: The event ID.
:param 'RequestResponseArgs' request: The request that generated the event.
:param 'SourceResponseArgs' source: The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
:param 'TargetResponseArgs' target: The target of the event.
:param str timestamp: The time at which the event occurred.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if actor is not None:
pulumi.set(__self__, "actor", actor)
if id is not None:
pulumi.set(__self__, "id", id)
if request is not None:
pulumi.set(__self__, "request", request)
if source is not None:
pulumi.set(__self__, "source", source)
if target is not None:
pulumi.set(__self__, "target", target)
if timestamp is not None:
pulumi.set(__self__, "timestamp", timestamp)
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
The action that encompasses the provided event.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def actor(self) -> Optional['outputs.ActorResponseResult']:
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
"""
return pulumi.get(self, "actor")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The event ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def request(self) -> Optional['outputs.RequestResponseResult']:
"""
The request that generated the event.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def source(self) -> Optional['outputs.SourceResponseResult']:
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def target(self) -> Optional['outputs.TargetResponseResult']:
"""
The target of the event.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def timestamp(self) -> Optional[str]:
"""
The time at which the event occurred.
"""
return pulumi.get(self, "timestamp")
@pulumi.output_type
class EventRequestMessageResponseResult(dict):
"""
The event request message sent to the service URI.
"""
def __init__(__self__, *,
content: Optional['outputs.EventContentResponseResult'] = None,
headers: Optional[Mapping[str, str]] = None,
method: Optional[str] = None,
request_uri: Optional[str] = None,
version: Optional[str] = None):
"""
The event request message sent to the service URI.
:param 'EventContentResponseArgs' content: The content of the event request message.
:param Mapping[str, str] headers: The headers of the event request message.
:param str method: The HTTP method used to send the event request message.
:param str request_uri: The URI used to send the event request message.
:param str version: The HTTP message version.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if method is not None:
pulumi.set(__self__, "method", method)
if request_uri is not None:
pulumi.set(__self__, "request_uri", request_uri)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def content(self) -> Optional['outputs.EventContentResponseResult']:
"""
The content of the event request message.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
The headers of the event request message.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter
def method(self) -> Optional[str]:
"""
The HTTP method used to send the event request message.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter(name="requestUri")
def request_uri(self) -> Optional[str]:
"""
The URI used to send the event request message.
"""
return pulumi.get(self, "request_uri")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The HTTP message version.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class EventResponseResult(dict):
"""
The event for a webhook.
"""
def __init__(__self__, *,
event_request_message: Optional['outputs.EventRequestMessageResponseResult'] = None,
event_response_message: Optional['outputs.EventResponseMessageResponseResult'] = None,
id: Optional[str] = None):
"""
The event for a webhook.
:param 'EventRequestMessageResponseArgs' event_request_message: The event request message sent to the service URI.
:param 'EventResponseMessageResponseArgs' event_response_message: The event response message received from the service URI.
:param str id: The event ID.
"""
if event_request_message is not None:
pulumi.set(__self__, "event_request_message", event_request_message)
if event_response_message is not None:
pulumi.set(__self__, "event_response_message", event_response_message)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="eventRequestMessage")
def event_request_message(self) -> Optional['outputs.EventRequestMessageResponseResult']:
"""
The event request message sent to the service URI.
"""
return pulumi.get(self, "event_request_message")
@property
@pulumi.getter(name="eventResponseMessage")
def event_response_message(self) -> Optional['outputs.EventResponseMessageResponseResult']:
"""
The event response message received from the service URI.
"""
return pulumi.get(self, "event_response_message")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The event ID.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class EventResponseMessageResponseResult(dict):
"""
The event response message received from the service URI.
"""
def __init__(__self__, *,
content: Optional[str] = None,
headers: Optional[Mapping[str, str]] = None,
reason_phrase: Optional[str] = None,
status_code: Optional[str] = None,
version: Optional[str] = None):
"""
The event response message received from the service URI.
:param str content: The content of the event response message.
:param Mapping[str, str] headers: The headers of the event response message.
:param str reason_phrase: The reason phrase of the event response message.
:param str status_code: The status code of the event response message.
:param str version: The HTTP message version.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if reason_phrase is not None:
pulumi.set(__self__, "reason_phrase", reason_phrase)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def content(self) -> Optional[str]:
"""
The content of the event response message.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
The headers of the event response message.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="reasonPhrase")
def reason_phrase(self) -> Optional[str]:
"""
The reason phrase of the event response message.
"""
return pulumi.get(self, "reason_phrase")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
"""
The status code of the event response message.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The HTTP message version.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class RegistryPasswordResponseResult(dict):
"""
The login password for the container registry.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
The login password for the container registry.
:param str name: The password name.
:param str value: The password value.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The password name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The password value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class RequestResponseResult(dict):
"""
The request that generated the event.
"""
def __init__(__self__, *,
addr: Optional[str] = None,
host: Optional[str] = None,
id: Optional[str] = None,
method: Optional[str] = None,
useragent: Optional[str] = None):
"""
The request that generated the event.
:param str addr: The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request.
:param str host: The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests.
:param str id: The ID of the request that initiated the event.
:param str method: The request method that generated the event.
:param str useragent: The user agent header of the request.
"""
if addr is not None:
pulumi.set(__self__, "addr", addr)
if host is not None:
pulumi.set(__self__, "host", host)
if id is not None:
pulumi.set(__self__, "id", id)
if method is not None:
pulumi.set(__self__, "method", method)
if useragent is not None:
pulumi.set(__self__, "useragent", useragent)
@property
@pulumi.getter
def addr(self) -> Optional[str]:
"""
The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request.
"""
return pulumi.get(self, "addr")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the request that initiated the event.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def method(self) -> Optional[str]:
"""
The request method that generated the event.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def useragent(self) -> Optional[str]:
"""
The user agent header of the request.
"""
return pulumi.get(self, "useragent")
@pulumi.output_type
class SkuResponse(dict):
"""
The SKU of a container registry.
"""
def __init__(__self__, *,
name: str,
tier: str):
"""
The SKU of a container registry.
:param str name: The SKU name of the container registry. Required for registry creation.
:param str tier: The SKU tier based on the SKU name.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The SKU name of the container registry. Required for registry creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> str:
"""
The SKU tier based on the SKU name.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SourceResponseResult(dict):
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
"""
def __init__(__self__, *,
addr: Optional[str] = None,
instance_id: Optional[str] = None):
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
:param str addr: The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port.
:param str instance_id: The running instance of an application. Changes after each restart.
"""
if addr is not None:
pulumi.set(__self__, "addr", addr)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
@property
@pulumi.getter
def addr(self) -> Optional[str]:
"""
The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port.
"""
return pulumi.get(self, "addr")
@property
@pulumi.getter(name="instanceID")
def instance_id(self) -> Optional[str]:
"""
The running instance of an application. Changes after each restart.
"""
return pulumi.get(self, "instance_id")
@pulumi.output_type
class StatusResponse(dict):
"""
The status of an Azure resource at the time the operation was called.
"""
def __init__(__self__, *,
display_status: str,
message: str,
timestamp: str):
"""
The status of an Azure resource at the time the operation was called.
:param str display_status: The short label for the status.
:param str message: The detailed message for the status, including alerts and error messages.
:param str timestamp: The timestamp when the status was changed to the current value.
"""
pulumi.set(__self__, "display_status", display_status)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "timestamp", timestamp)
@property
@pulumi.getter(name="displayStatus")
def display_status(self) -> str:
"""
The short label for the status.
"""
return pulumi.get(self, "display_status")
@property
@pulumi.getter
def message(self) -> str:
"""
The detailed message for the status, including alerts and error messages.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def timestamp(self) -> str:
"""
The timestamp when the status was changed to the current value.
"""
return pulumi.get(self, "timestamp")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageAccountPropertiesResponse(dict):
"""
The properties of a storage account for a container registry. Only applicable to Basic SKU.
"""
def __init__(__self__, *,
id: str):
"""
The properties of a storage account for a container registry. Only applicable to Basic SKU.
:param str id: The resource ID of the storage account.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID of the storage account.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TargetResponseResult(dict):
"""
The target of the event.
"""
def __init__(__self__, *,
digest: Optional[str] = None,
length: Optional[int] = None,
media_type: Optional[str] = None,
repository: Optional[str] = None,
size: Optional[int] = None,
tag: Optional[str] = None,
url: Optional[str] = None):
"""
The target of the event.
:param str digest: The digest of the content, as defined by the Registry V2 HTTP API Specification.
:param int length: The number of bytes of the content. Same as Size field.
:param str media_type: The MIME type of the referenced object.
:param str repository: The repository name.
:param int size: The number of bytes of the content. Same as Length field.
:param str tag: The tag name.
:param str url: The direct URL to the content.
"""
if digest is not None:
pulumi.set(__self__, "digest", digest)
if length is not None:
pulumi.set(__self__, "length", length)
if media_type is not None:
pulumi.set(__self__, "media_type", media_type)
if repository is not None:
pulumi.set(__self__, "repository", repository)
if size is not None:
pulumi.set(__self__, "size", size)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def digest(self) -> Optional[str]:
"""
The digest of the content, as defined by the Registry V2 HTTP API Specification.
"""
return pulumi.get(self, "digest")
@property
@pulumi.getter
def length(self) -> Optional[int]:
"""
The number of bytes of the content. Same as Size field.
"""
return pulumi.get(self, "length")
@property
@pulumi.getter(name="mediaType")
def media_type(self) -> Optional[str]:
"""
The MIME type of the referenced object.
"""
return pulumi.get(self, "media_type")
@property
@pulumi.getter
def repository(self) -> Optional[str]:
"""
The repository name.
"""
return pulumi.get(self, "repository")
@property
@pulumi.getter
def size(self) -> Optional[int]:
"""
The number of bytes of the content. Same as Length field.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def tag(self) -> Optional[str]:
"""
The tag name.
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
The direct URL to the content.
"""
return pulumi.get(self, "url") | sdk/python/pulumi_azure_nextgen/containerregistry/v20170601preview/outputs.py |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'ActorResponseResult',
'EventContentResponseResult',
'EventRequestMessageResponseResult',
'EventResponseResult',
'EventResponseMessageResponseResult',
'RegistryPasswordResponseResult',
'RequestResponseResult',
'SkuResponse',
'SourceResponseResult',
'StatusResponse',
'StorageAccountPropertiesResponse',
'TargetResponseResult',
]
@pulumi.output_type
class ActorResponseResult(dict):
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
"""
def __init__(__self__, *,
name: Optional[str] = None):
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
:param str name: The subject or username associated with the request context that generated the event.
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The subject or username associated with the request context that generated the event.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class EventContentResponseResult(dict):
"""
The content of the event request message.
"""
def __init__(__self__, *,
action: Optional[str] = None,
actor: Optional['outputs.ActorResponseResult'] = None,
id: Optional[str] = None,
request: Optional['outputs.RequestResponseResult'] = None,
source: Optional['outputs.SourceResponseResult'] = None,
target: Optional['outputs.TargetResponseResult'] = None,
timestamp: Optional[str] = None):
"""
The content of the event request message.
:param str action: The action that encompasses the provided event.
:param 'ActorResponseArgs' actor: The agent that initiated the event. For most situations, this could be from the authorization context of the request.
:param str id: The event ID.
:param 'RequestResponseArgs' request: The request that generated the event.
:param 'SourceResponseArgs' source: The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
:param 'TargetResponseArgs' target: The target of the event.
:param str timestamp: The time at which the event occurred.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if actor is not None:
pulumi.set(__self__, "actor", actor)
if id is not None:
pulumi.set(__self__, "id", id)
if request is not None:
pulumi.set(__self__, "request", request)
if source is not None:
pulumi.set(__self__, "source", source)
if target is not None:
pulumi.set(__self__, "target", target)
if timestamp is not None:
pulumi.set(__self__, "timestamp", timestamp)
@property
@pulumi.getter
def action(self) -> Optional[str]:
"""
The action that encompasses the provided event.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def actor(self) -> Optional['outputs.ActorResponseResult']:
"""
The agent that initiated the event. For most situations, this could be from the authorization context of the request.
"""
return pulumi.get(self, "actor")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The event ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def request(self) -> Optional['outputs.RequestResponseResult']:
"""
The request that generated the event.
"""
return pulumi.get(self, "request")
@property
@pulumi.getter
def source(self) -> Optional['outputs.SourceResponseResult']:
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def target(self) -> Optional['outputs.TargetResponseResult']:
"""
The target of the event.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter
def timestamp(self) -> Optional[str]:
"""
The time at which the event occurred.
"""
return pulumi.get(self, "timestamp")
@pulumi.output_type
class EventRequestMessageResponseResult(dict):
"""
The event request message sent to the service URI.
"""
def __init__(__self__, *,
content: Optional['outputs.EventContentResponseResult'] = None,
headers: Optional[Mapping[str, str]] = None,
method: Optional[str] = None,
request_uri: Optional[str] = None,
version: Optional[str] = None):
"""
The event request message sent to the service URI.
:param 'EventContentResponseArgs' content: The content of the event request message.
:param Mapping[str, str] headers: The headers of the event request message.
:param str method: The HTTP method used to send the event request message.
:param str request_uri: The URI used to send the event request message.
:param str version: The HTTP message version.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if method is not None:
pulumi.set(__self__, "method", method)
if request_uri is not None:
pulumi.set(__self__, "request_uri", request_uri)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def content(self) -> Optional['outputs.EventContentResponseResult']:
"""
The content of the event request message.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
The headers of the event request message.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter
def method(self) -> Optional[str]:
"""
The HTTP method used to send the event request message.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter(name="requestUri")
def request_uri(self) -> Optional[str]:
"""
The URI used to send the event request message.
"""
return pulumi.get(self, "request_uri")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The HTTP message version.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class EventResponseResult(dict):
"""
The event for a webhook.
"""
def __init__(__self__, *,
event_request_message: Optional['outputs.EventRequestMessageResponseResult'] = None,
event_response_message: Optional['outputs.EventResponseMessageResponseResult'] = None,
id: Optional[str] = None):
"""
The event for a webhook.
:param 'EventRequestMessageResponseArgs' event_request_message: The event request message sent to the service URI.
:param 'EventResponseMessageResponseArgs' event_response_message: The event response message received from the service URI.
:param str id: The event ID.
"""
if event_request_message is not None:
pulumi.set(__self__, "event_request_message", event_request_message)
if event_response_message is not None:
pulumi.set(__self__, "event_response_message", event_response_message)
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter(name="eventRequestMessage")
def event_request_message(self) -> Optional['outputs.EventRequestMessageResponseResult']:
"""
The event request message sent to the service URI.
"""
return pulumi.get(self, "event_request_message")
@property
@pulumi.getter(name="eventResponseMessage")
def event_response_message(self) -> Optional['outputs.EventResponseMessageResponseResult']:
"""
The event response message received from the service URI.
"""
return pulumi.get(self, "event_response_message")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The event ID.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class EventResponseMessageResponseResult(dict):
"""
The event response message received from the service URI.
"""
def __init__(__self__, *,
content: Optional[str] = None,
headers: Optional[Mapping[str, str]] = None,
reason_phrase: Optional[str] = None,
status_code: Optional[str] = None,
version: Optional[str] = None):
"""
The event response message received from the service URI.
:param str content: The content of the event response message.
:param Mapping[str, str] headers: The headers of the event response message.
:param str reason_phrase: The reason phrase of the event response message.
:param str status_code: The status code of the event response message.
:param str version: The HTTP message version.
"""
if content is not None:
pulumi.set(__self__, "content", content)
if headers is not None:
pulumi.set(__self__, "headers", headers)
if reason_phrase is not None:
pulumi.set(__self__, "reason_phrase", reason_phrase)
if status_code is not None:
pulumi.set(__self__, "status_code", status_code)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def content(self) -> Optional[str]:
"""
The content of the event response message.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter
def headers(self) -> Optional[Mapping[str, str]]:
"""
The headers of the event response message.
"""
return pulumi.get(self, "headers")
@property
@pulumi.getter(name="reasonPhrase")
def reason_phrase(self) -> Optional[str]:
"""
The reason phrase of the event response message.
"""
return pulumi.get(self, "reason_phrase")
@property
@pulumi.getter(name="statusCode")
def status_code(self) -> Optional[str]:
"""
The status code of the event response message.
"""
return pulumi.get(self, "status_code")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The HTTP message version.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class RegistryPasswordResponseResult(dict):
"""
The login password for the container registry.
"""
def __init__(__self__, *,
name: Optional[str] = None,
value: Optional[str] = None):
"""
The login password for the container registry.
:param str name: The password name.
:param str value: The password value.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The password name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
The password value.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class RequestResponseResult(dict):
"""
The request that generated the event.
"""
def __init__(__self__, *,
addr: Optional[str] = None,
host: Optional[str] = None,
id: Optional[str] = None,
method: Optional[str] = None,
useragent: Optional[str] = None):
"""
The request that generated the event.
:param str addr: The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request.
:param str host: The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests.
:param str id: The ID of the request that initiated the event.
:param str method: The request method that generated the event.
:param str useragent: The user agent header of the request.
"""
if addr is not None:
pulumi.set(__self__, "addr", addr)
if host is not None:
pulumi.set(__self__, "host", host)
if id is not None:
pulumi.set(__self__, "id", id)
if method is not None:
pulumi.set(__self__, "method", method)
if useragent is not None:
pulumi.set(__self__, "useragent", useragent)
@property
@pulumi.getter
def addr(self) -> Optional[str]:
"""
The IP or hostname and possibly port of the client connection that initiated the event. This is the RemoteAddr from the standard http request.
"""
return pulumi.get(self, "addr")
@property
@pulumi.getter
def host(self) -> Optional[str]:
"""
The externally accessible hostname of the registry instance, as specified by the http host header on incoming requests.
"""
return pulumi.get(self, "host")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ID of the request that initiated the event.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def method(self) -> Optional[str]:
"""
The request method that generated the event.
"""
return pulumi.get(self, "method")
@property
@pulumi.getter
def useragent(self) -> Optional[str]:
"""
The user agent header of the request.
"""
return pulumi.get(self, "useragent")
@pulumi.output_type
class SkuResponse(dict):
"""
The SKU of a container registry.
"""
def __init__(__self__, *,
name: str,
tier: str):
"""
The SKU of a container registry.
:param str name: The SKU name of the container registry. Required for registry creation.
:param str tier: The SKU tier based on the SKU name.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The SKU name of the container registry. Required for registry creation.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> str:
"""
The SKU tier based on the SKU name.
"""
return pulumi.get(self, "tier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SourceResponseResult(dict):
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
"""
def __init__(__self__, *,
addr: Optional[str] = None,
instance_id: Optional[str] = None):
"""
The registry node that generated the event. Put differently, while the actor initiates the event, the source generates it.
:param str addr: The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port.
:param str instance_id: The running instance of an application. Changes after each restart.
"""
if addr is not None:
pulumi.set(__self__, "addr", addr)
if instance_id is not None:
pulumi.set(__self__, "instance_id", instance_id)
@property
@pulumi.getter
def addr(self) -> Optional[str]:
"""
The IP or hostname and the port of the registry node that generated the event. Generally, this will be resolved by os.Hostname() along with the running port.
"""
return pulumi.get(self, "addr")
@property
@pulumi.getter(name="instanceID")
def instance_id(self) -> Optional[str]:
"""
The running instance of an application. Changes after each restart.
"""
return pulumi.get(self, "instance_id")
@pulumi.output_type
class StatusResponse(dict):
"""
The status of an Azure resource at the time the operation was called.
"""
def __init__(__self__, *,
display_status: str,
message: str,
timestamp: str):
"""
The status of an Azure resource at the time the operation was called.
:param str display_status: The short label for the status.
:param str message: The detailed message for the status, including alerts and error messages.
:param str timestamp: The timestamp when the status was changed to the current value.
"""
pulumi.set(__self__, "display_status", display_status)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "timestamp", timestamp)
@property
@pulumi.getter(name="displayStatus")
def display_status(self) -> str:
"""
The short label for the status.
"""
return pulumi.get(self, "display_status")
@property
@pulumi.getter
def message(self) -> str:
"""
The detailed message for the status, including alerts and error messages.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def timestamp(self) -> str:
"""
The timestamp when the status was changed to the current value.
"""
return pulumi.get(self, "timestamp")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageAccountPropertiesResponse(dict):
"""
The properties of a storage account for a container registry. Only applicable to Basic SKU.
"""
def __init__(__self__, *,
id: str):
"""
The properties of a storage account for a container registry. Only applicable to Basic SKU.
:param str id: The resource ID of the storage account.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID of the storage account.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TargetResponseResult(dict):
"""
The target of the event.
"""
def __init__(__self__, *,
digest: Optional[str] = None,
length: Optional[int] = None,
media_type: Optional[str] = None,
repository: Optional[str] = None,
size: Optional[int] = None,
tag: Optional[str] = None,
url: Optional[str] = None):
"""
The target of the event.
:param str digest: The digest of the content, as defined by the Registry V2 HTTP API Specification.
:param int length: The number of bytes of the content. Same as Size field.
:param str media_type: The MIME type of the referenced object.
:param str repository: The repository name.
:param int size: The number of bytes of the content. Same as Length field.
:param str tag: The tag name.
:param str url: The direct URL to the content.
"""
if digest is not None:
pulumi.set(__self__, "digest", digest)
if length is not None:
pulumi.set(__self__, "length", length)
if media_type is not None:
pulumi.set(__self__, "media_type", media_type)
if repository is not None:
pulumi.set(__self__, "repository", repository)
if size is not None:
pulumi.set(__self__, "size", size)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def digest(self) -> Optional[str]:
"""
The digest of the content, as defined by the Registry V2 HTTP API Specification.
"""
return pulumi.get(self, "digest")
@property
@pulumi.getter
def length(self) -> Optional[int]:
"""
The number of bytes of the content. Same as Size field.
"""
return pulumi.get(self, "length")
@property
@pulumi.getter(name="mediaType")
def media_type(self) -> Optional[str]:
"""
The MIME type of the referenced object.
"""
return pulumi.get(self, "media_type")
@property
@pulumi.getter
def repository(self) -> Optional[str]:
"""
The repository name.
"""
return pulumi.get(self, "repository")
@property
@pulumi.getter
def size(self) -> Optional[int]:
"""
The number of bytes of the content. Same as Length field.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter
def tag(self) -> Optional[str]:
"""
The tag name.
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
The direct URL to the content.
"""
return pulumi.get(self, "url") | 0.882725 | 0.055823 |
from __future__ import unicode_literals
from unittest import TestCase
from alex.applications.PublicTransportInfoCS.hdc_slu import PTICSHDCSLU
from alex.applications.PublicTransportInfoCS.preprocessing import PTICSSLUPreprocessing
from alex.components.asr.utterance import Utterance, UtteranceNBList
from alex.components.slu.base import CategoryLabelDatabase
from alex.components.slu.da import DialogueAct, DialogueActItem
from alex.utils.config import as_project_path
class TestPTICSHDCSLU(TestCase):
def test_parse_with_mutliple_date_rel(self):
asr_hyp = UtteranceNBList()
asr_hyp.add(0.1, Utterance("CHTEL BYCH ZITRA ZITRA JET"))
cn = self.slu.parse(asr_hyp)
self.assert_(DialogueActItem(dai="inform(date_rel=tomorrow)") in cn)
def test_parse_meta(self):
utterances_to_understand = [
(u"ahoj", "hello()", ),
(u"sbohem čau", "bye()", ),
(u"jiné", "reqalts()", ),
(u"začneme znovu", "restart()", ),
(u"zopakuj", "repeat()", ),
(u"promiň", "apology()", ),
(u"co se zeptat", "help()", ),
(u"haló", "canthearyou()", ),
(u"nerozuměl jsem", "notunderstood()", ),
(u"ano jo", "affirm()", ),
(u"ne ano nechci", "negate()", ),
(u"děkuji", "thankyou()", ),
(u"dobře", "ack()", ),
(u"chci jet", "inform(task=find_connection)", ),
(u"jak bude", "inform(task=weather)", ),
(u"nástupiště", "inform(task=find_platform)", ),
(u"z jaké jede", "request(from_stop)", ),
(u"kam to jede", "request(to_stop)", ),
(u"kdy to jede", "request(departure_time)", ),
(u"za jak dlouho", "request(departure_time_rel)", ),
(u"kdy tam budem", "request(arrival_time)", ),
(u"za jak dlouho tam přijedu", "request(arrival_time_rel)", ),
(u"jak dlouho bude trvat cesta", "request(duration)", ),
(u"kolik je hodin", "request(current_time)", ),
(u"jak dlouho trvá přestup", "request(time_transfers)", ),
(u"kolik přestupů", "request(num_transfers)", ),
(u"nechci přestup bez jet přímo", "inform(num_transfers=0)", ),
(u"jeden přestup", "inform(num_transfers=1)", ),
(u"dva přestupy", "inform(num_transfers=2)", ),
(u"tři přestupy", "inform(num_transfers=3)", ),
(u"čtyři přestupy", "inform(num_transfers=4)", ),
(u"libovolně přestupů", "inform(num_transfers=dontcare)", ),
(u"jet přímo", "inform(num_transfers=0)", ),
(u"alternativa libovolný", "inform(alternative=dontcare)", ),
(u"alternativa první", "inform(alternative=1)", ),
(u"alternativa druhá", "inform(alternative=2)", ),
(u"alternativa třetí", "inform(alternative=3)", ),
(u"alternativa čtvrtá", "inform(alternative=4)", ),
(u"alternativa páté", "inform(alternative=5)", ),
(u"předchozí spoj", "inform(alternative=prev)", ),
(u"nechci předchozí spoj", "deny(alternative=prev)", ),
(u"poslední spoj", "inform(alternative=last)", ),
(u"nechci poslední spoj", "deny(alternative=last)", ),
(u"další spoj", "inform(alternative=next)", ),
(u"další", "inform(alternative=next)", ),
(u"předchozí", "inform(alternative=prev)", ),
(u"jako ve dne", "inform(ampm=pm)", ),
]
for utt, res in utterances_to_understand:
asr_hyp = UtteranceNBList()
asr_hyp.add(0.79, Utterance(utt))
cn = self.slu.parse(asr_hyp)
self.assertIn(DialogueActItem(dai=res), cn)
@classmethod
def setUpClass(cls):
cfg = {
'SLU': {
'debug': True,
'type': PTICSHDCSLU,
PTICSHDCSLU: {
'preprocessing_cls': PTICSSLUPreprocessing
},
},
}
slu_type = cfg['SLU']['type']
cldb = CategoryLabelDatabase()
class db:
database = {
"task": {
"find_connection": ["najít spojení", "najít spoj", "zjistit spojení",
"zjistit spoj", "hledám spojení", 'spojení', 'spoj',
],
"find_platform": ["najít nástupiště", "zjistit nástupiště", ],
'weather': ['pocasi', ],
},
"number": {
"1": ["jednu"]
},
"time": {
"now": ["nyní", "teď", "teďka", "hned", "nejbližší", "v tuto chvíli", "co nejdřív"],
"18": ["osmnáct", "osmnact"]
},
"date_rel": {
"tomorrow": ["zítra", "zitra"],
}
}
cldb.load(db_mod=db)
preprocessing = cfg['SLU'][slu_type]['preprocessing_cls'](cldb)
cls.slu = slu_type(preprocessing, cfg) | alex/applications/PublicTransportInfoCS/test_hdc_slu.py | from __future__ import unicode_literals
from unittest import TestCase
from alex.applications.PublicTransportInfoCS.hdc_slu import PTICSHDCSLU
from alex.applications.PublicTransportInfoCS.preprocessing import PTICSSLUPreprocessing
from alex.components.asr.utterance import Utterance, UtteranceNBList
from alex.components.slu.base import CategoryLabelDatabase
from alex.components.slu.da import DialogueAct, DialogueActItem
from alex.utils.config import as_project_path
class TestPTICSHDCSLU(TestCase):
def test_parse_with_mutliple_date_rel(self):
asr_hyp = UtteranceNBList()
asr_hyp.add(0.1, Utterance("CHTEL BYCH ZITRA ZITRA JET"))
cn = self.slu.parse(asr_hyp)
self.assert_(DialogueActItem(dai="inform(date_rel=tomorrow)") in cn)
def test_parse_meta(self):
utterances_to_understand = [
(u"ahoj", "hello()", ),
(u"sbohem čau", "bye()", ),
(u"jiné", "reqalts()", ),
(u"začneme znovu", "restart()", ),
(u"zopakuj", "repeat()", ),
(u"promiň", "apology()", ),
(u"co se zeptat", "help()", ),
(u"haló", "canthearyou()", ),
(u"nerozuměl jsem", "notunderstood()", ),
(u"ano jo", "affirm()", ),
(u"ne ano nechci", "negate()", ),
(u"děkuji", "thankyou()", ),
(u"dobře", "ack()", ),
(u"chci jet", "inform(task=find_connection)", ),
(u"jak bude", "inform(task=weather)", ),
(u"nástupiště", "inform(task=find_platform)", ),
(u"z jaké jede", "request(from_stop)", ),
(u"kam to jede", "request(to_stop)", ),
(u"kdy to jede", "request(departure_time)", ),
(u"za jak dlouho", "request(departure_time_rel)", ),
(u"kdy tam budem", "request(arrival_time)", ),
(u"za jak dlouho tam přijedu", "request(arrival_time_rel)", ),
(u"jak dlouho bude trvat cesta", "request(duration)", ),
(u"kolik je hodin", "request(current_time)", ),
(u"jak dlouho trvá přestup", "request(time_transfers)", ),
(u"kolik přestupů", "request(num_transfers)", ),
(u"nechci přestup bez jet přímo", "inform(num_transfers=0)", ),
(u"jeden přestup", "inform(num_transfers=1)", ),
(u"dva přestupy", "inform(num_transfers=2)", ),
(u"tři přestupy", "inform(num_transfers=3)", ),
(u"čtyři přestupy", "inform(num_transfers=4)", ),
(u"libovolně přestupů", "inform(num_transfers=dontcare)", ),
(u"jet přímo", "inform(num_transfers=0)", ),
(u"alternativa libovolný", "inform(alternative=dontcare)", ),
(u"alternativa první", "inform(alternative=1)", ),
(u"alternativa druhá", "inform(alternative=2)", ),
(u"alternativa třetí", "inform(alternative=3)", ),
(u"alternativa čtvrtá", "inform(alternative=4)", ),
(u"alternativa páté", "inform(alternative=5)", ),
(u"předchozí spoj", "inform(alternative=prev)", ),
(u"nechci předchozí spoj", "deny(alternative=prev)", ),
(u"poslední spoj", "inform(alternative=last)", ),
(u"nechci poslední spoj", "deny(alternative=last)", ),
(u"další spoj", "inform(alternative=next)", ),
(u"další", "inform(alternative=next)", ),
(u"předchozí", "inform(alternative=prev)", ),
(u"jako ve dne", "inform(ampm=pm)", ),
]
for utt, res in utterances_to_understand:
asr_hyp = UtteranceNBList()
asr_hyp.add(0.79, Utterance(utt))
cn = self.slu.parse(asr_hyp)
self.assertIn(DialogueActItem(dai=res), cn)
@classmethod
def setUpClass(cls):
cfg = {
'SLU': {
'debug': True,
'type': PTICSHDCSLU,
PTICSHDCSLU: {
'preprocessing_cls': PTICSSLUPreprocessing
},
},
}
slu_type = cfg['SLU']['type']
cldb = CategoryLabelDatabase()
class db:
database = {
"task": {
"find_connection": ["najít spojení", "najít spoj", "zjistit spojení",
"zjistit spoj", "hledám spojení", 'spojení', 'spoj',
],
"find_platform": ["najít nástupiště", "zjistit nástupiště", ],
'weather': ['pocasi', ],
},
"number": {
"1": ["jednu"]
},
"time": {
"now": ["nyní", "teď", "teďka", "hned", "nejbližší", "v tuto chvíli", "co nejdřív"],
"18": ["osmnáct", "osmnact"]
},
"date_rel": {
"tomorrow": ["zítra", "zitra"],
}
}
cldb.load(db_mod=db)
preprocessing = cfg['SLU'][slu_type]['preprocessing_cls'](cldb)
cls.slu = slu_type(preprocessing, cfg) | 0.542379 | 0.349866 |
import unittest
import tensorflow as tf
import numpy as np
from tensorforce.core.memories import Queue, Latest, Replay
class TestMemory(unittest.TestCase):
states_spec = dict(
test_state=dict(
shape=[1],
type="float"
)
)
actions_spec = dict(
test_action=dict(
shape=[],
type="float"
)
)
internals_spec = dict()
def setUp(self):
self._episode_index = 0
self.states = dict(
test_state=tf.placeholder(dtype=tf.float32, shape=[None, 1], )
)
self.actions = dict(
test_action=tf.placeholder(dtype=tf.float32, shape=[None], )
)
self.terminal = tf.placeholder(dtype=tf.bool, shape=[None])
self.reward = tf.placeholder(dtype=tf.float32, shape=[None])
def custom_getter(getter, name, registered=False, **kwargs):
return getter(name=name, **kwargs)
self.variable_scope = tf.variable_scope("test_memory", custom_getter=custom_getter)
self.variable_scope.__enter__()
self.sess = tf.Session()
def tearDown(self):
self.variable_scope.__exit__(None, None, None)
self.sess.close()
tf.reset_default_graph()
def _build_store_op(self, mem):
return mem.store(
states=self.states,
internals=dict(),
actions=self.actions,
terminal=self.terminal,
reward=self.reward
)
def _make_mem(self, clazz, capacity, include_next_states=False):
return clazz(
states=self.states_spec,
internals=self.internals_spec,
actions=self.actions_spec,
include_next_states=include_next_states,
capacity=capacity
)
def _store_episode(self, store_op, episode_length):
self.sess.run(store_op, feed_dict={
self.states["test_state"]: np.ones(shape=[episode_length, 1]) * self._episode_index,
self.actions["test_action"]: np.random.uniform(size=[episode_length]),
self.terminal: np.array([False] * (episode_length - 1) + [True]),
self.reward: np.random.uniform(size=[episode_length])
})
self._episode_index += 1
def test_queue(self):
episode_length = 3
capacity_episodes = 2
mem = self._make_mem(Queue, episode_length * capacity_episodes)
mem.initialize()
store_op = self._build_store_op(mem)
self.sess.run(tf.global_variables_initializer())
for i in range(capacity_episodes + 1):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert min(capacity_episodes, i + 1) == episodes_inserted
episodes_inserted = self.sess.run(mem.episode_count)
assert capacity_episodes == episodes_inserted
def test_queue_not_aligned(self):
episode_length = 3
num_full_episodes = 2
mem = self._make_mem(Queue, episode_length * (num_full_episodes + 1) - 1)
true_capacity_episodes = num_full_episodes + 1
mem.initialize()
store_op = self._build_store_op(mem)
self.sess.run(tf.global_variables_initializer())
for i in range(true_capacity_episodes + 1):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert min(true_capacity_episodes, i + 1) == episodes_inserted
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
def test_latest_episodes(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
true_capacity_episodes = num_full_episodes + 1
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_e = mem.retrieve_episodes(n)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes + 1})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes - 1})
assert [False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes + 1})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
def test_latest_timesteps(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_t = mem.retrieve_timesteps(n)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity + 1})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length - 1})
assert [True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: 1})
assert [True] == retrieved_data["terminal"].tolist()
assert [1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: 0})
assert [] == retrieved_data["terminal"].tolist()
assert [] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity + 1})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
@unittest.skip(reason="https://github.com/reinforceio/tensorforce/issues/128")
def test_latest_sequences(self):
episode_length = 3
seq_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_seq = mem.retrieve_sequences(n, seq_length)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_seq, feed_dict={n: num_full_episodes * episode_length})
print(retrieved_data)
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
def test_replay_episodes(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Replay, capacity=capacity)
true_capacity_episodes = num_full_episodes + 1
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_full = mem.retrieve_episodes(num_full_episodes)
retrieve_op_full_plus = mem.retrieve_episodes(num_full_episodes + 1)
retrieve_op_full_minus = mem.retrieve_episodes(num_full_episodes - 1)
self.sess.run(tf.global_variables_initializer())
try:
self.sess.run(retrieve_op_full)
assert False
except tf.errors.InvalidArgumentError as e:
assert "nothing stored yet" in e.message
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_full)
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
retrieved_data = self.sess.run(retrieve_op_full_plus)
assert [False, True, False, True, False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
retrieved_data = self.sess.run(retrieve_op_full_minus)
assert [False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_full)
# We avoid explicit check as the 0-th episode is partial and can be selected
assert np.sum(retrieved_data["terminal"]) == num_full_episodes
retrieved_data = self.sess.run(retrieve_op_full_plus)
assert np.sum(retrieved_data["terminal"]) == true_capacity_episodes
if __name__ == "__main__":
unittest.main() | tensorforce/tests/test_memory.py | import unittest
import tensorflow as tf
import numpy as np
from tensorforce.core.memories import Queue, Latest, Replay
class TestMemory(unittest.TestCase):
states_spec = dict(
test_state=dict(
shape=[1],
type="float"
)
)
actions_spec = dict(
test_action=dict(
shape=[],
type="float"
)
)
internals_spec = dict()
def setUp(self):
self._episode_index = 0
self.states = dict(
test_state=tf.placeholder(dtype=tf.float32, shape=[None, 1], )
)
self.actions = dict(
test_action=tf.placeholder(dtype=tf.float32, shape=[None], )
)
self.terminal = tf.placeholder(dtype=tf.bool, shape=[None])
self.reward = tf.placeholder(dtype=tf.float32, shape=[None])
def custom_getter(getter, name, registered=False, **kwargs):
return getter(name=name, **kwargs)
self.variable_scope = tf.variable_scope("test_memory", custom_getter=custom_getter)
self.variable_scope.__enter__()
self.sess = tf.Session()
def tearDown(self):
self.variable_scope.__exit__(None, None, None)
self.sess.close()
tf.reset_default_graph()
def _build_store_op(self, mem):
return mem.store(
states=self.states,
internals=dict(),
actions=self.actions,
terminal=self.terminal,
reward=self.reward
)
def _make_mem(self, clazz, capacity, include_next_states=False):
return clazz(
states=self.states_spec,
internals=self.internals_spec,
actions=self.actions_spec,
include_next_states=include_next_states,
capacity=capacity
)
def _store_episode(self, store_op, episode_length):
self.sess.run(store_op, feed_dict={
self.states["test_state"]: np.ones(shape=[episode_length, 1]) * self._episode_index,
self.actions["test_action"]: np.random.uniform(size=[episode_length]),
self.terminal: np.array([False] * (episode_length - 1) + [True]),
self.reward: np.random.uniform(size=[episode_length])
})
self._episode_index += 1
def test_queue(self):
episode_length = 3
capacity_episodes = 2
mem = self._make_mem(Queue, episode_length * capacity_episodes)
mem.initialize()
store_op = self._build_store_op(mem)
self.sess.run(tf.global_variables_initializer())
for i in range(capacity_episodes + 1):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert min(capacity_episodes, i + 1) == episodes_inserted
episodes_inserted = self.sess.run(mem.episode_count)
assert capacity_episodes == episodes_inserted
def test_queue_not_aligned(self):
episode_length = 3
num_full_episodes = 2
mem = self._make_mem(Queue, episode_length * (num_full_episodes + 1) - 1)
true_capacity_episodes = num_full_episodes + 1
mem.initialize()
store_op = self._build_store_op(mem)
self.sess.run(tf.global_variables_initializer())
for i in range(true_capacity_episodes + 1):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert min(true_capacity_episodes, i + 1) == episodes_inserted
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
def test_latest_episodes(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
true_capacity_episodes = num_full_episodes + 1
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_e = mem.retrieve_episodes(n)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes + 1})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes - 1})
assert [False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_e, feed_dict={n: num_full_episodes + 1})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
def test_latest_timesteps(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_t = mem.retrieve_timesteps(n)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity + 1})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length - 1})
assert [True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: 1})
assert [True] == retrieved_data["terminal"].tolist()
assert [1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: 0})
assert [] == retrieved_data["terminal"].tolist()
assert [] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: num_full_episodes * episode_length})
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
retrieved_data = self.sess.run(retrieve_op_t, feed_dict={n: capacity + 1})
assert [True, False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 1.0, 1.0, 2.0, 2.0] == retrieved_data["states"]["test_state"].flatten().tolist()
@unittest.skip(reason="https://github.com/reinforceio/tensorforce/issues/128")
def test_latest_sequences(self):
episode_length = 3
seq_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Latest, capacity=capacity)
n = tf.placeholder(dtype=tf.int32)
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_seq = mem.retrieve_sequences(n, seq_length)
self.sess.run(tf.global_variables_initializer())
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_seq, feed_dict={n: num_full_episodes * episode_length})
print(retrieved_data)
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
assert [0.0, 0.0, 1.0, 1.0] == retrieved_data["states"]["test_state"].flatten().tolist()
self._store_episode(store_op=store_op, episode_length=episode_length)
def test_replay_episodes(self):
episode_length = 2
num_full_episodes = 2
capacity = episode_length * num_full_episodes + 1
mem = self._make_mem(Replay, capacity=capacity)
true_capacity_episodes = num_full_episodes + 1
mem.initialize()
store_op = self._build_store_op(mem)
retrieve_op_full = mem.retrieve_episodes(num_full_episodes)
retrieve_op_full_plus = mem.retrieve_episodes(num_full_episodes + 1)
retrieve_op_full_minus = mem.retrieve_episodes(num_full_episodes - 1)
self.sess.run(tf.global_variables_initializer())
try:
self.sess.run(retrieve_op_full)
assert False
except tf.errors.InvalidArgumentError as e:
assert "nothing stored yet" in e.message
for _ in range(num_full_episodes):
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert num_full_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_full)
assert [False, True, False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
retrieved_data = self.sess.run(retrieve_op_full_plus)
assert [False, True, False, True, False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
retrieved_data = self.sess.run(retrieve_op_full_minus)
assert [False, True] == retrieved_data["terminal"].tolist()
states = retrieved_data["states"]["test_state"].flatten().tolist()
for i in range(0, len(states), episode_length):
assert states[i] == states[i + 1]
self._store_episode(store_op=store_op, episode_length=episode_length)
episodes_inserted = self.sess.run(mem.episode_count)
assert true_capacity_episodes == episodes_inserted
retrieved_data = self.sess.run(retrieve_op_full)
# We avoid explicit check as the 0-th episode is partial and can be selected
assert np.sum(retrieved_data["terminal"]) == num_full_episodes
retrieved_data = self.sess.run(retrieve_op_full_plus)
assert np.sum(retrieved_data["terminal"]) == true_capacity_episodes
if __name__ == "__main__":
unittest.main() | 0.664758 | 0.47859 |
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
DEVICE_RAISE_EXCEPTION = {
"app_id": "com.example.spqr",
"pushkey": "raise_exception",
"pushkey_ts": 1234,
}
DEVICE_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "remote_error",
"pushkey_ts": 1234,
}
DEVICE_TEMPORARY_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "temporary_remote_error",
"pushkey_ts": 1234,
}
DEVICE_REJECTED = {
"app_id": "com.example.spqr",
"pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
class TestPushkin(Pushkin):
"""
A synthetic Pushkin with simple rules.
"""
async def dispatch_notification(self, n, device, context):
if device.pushkey == "raise_exception":
raise Exception("Bad things have occurred!")
elif device.pushkey == "remote_error":
raise NotificationDispatchException("Synthetic failure")
elif device.pushkey == "temporary_remote_error":
raise TemporaryNotificationDispatchException("Synthetic failure")
elif device.pushkey == "reject":
return [device.pushkey]
elif device.pushkey == "accept":
return []
raise Exception(f"Unexpected fall-through. {device.pushkey}")
class PushGatewayApiV1TestCase(testutils.TestCase):
def config_setup(self, config):
"""
Set up a TestPushkin for the test.
"""
super(PushGatewayApiV1TestCase, self).config_setup(config)
config["apps"]["com.example.spqr"] = {
"type": "tests.test_pushgateway_api_v1.TestPushkin"
}
def test_good_requests_give_200(self):
"""
Test that good requests give a 200 response code.
"""
# 200 codes cause the result to be parsed instead of returning the code
self.assertNot(
isinstance(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REJECTED])
),
int,
)
)
def test_accepted_devices_are_not_rejected(self):
"""
Test that devices which are accepted by the Pushkin
do not lead to a rejection being returned to the homeserver.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_ACCEPTED])),
{"rejected": []},
)
def test_rejected_devices_are_rejected(self):
"""
Test that devices which are rejected by the Pushkin
DO lead to a rejection being returned to the homeserver.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_REJECTED])),
{"rejected": [DEVICE_REJECTED["pushkey"]]},
)
def test_only_rejected_devices_are_rejected(self):
"""
Test that devices which are rejected by the Pushkin
are the only ones to have a rejection returned to the homeserver,
even if other devices feature in the request.
"""
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_REJECTED, DEVICE_ACCEPTED])
),
{"rejected": [DEVICE_REJECTED["pushkey"]]},
)
def test_bad_requests_give_400(self):
"""
Test that bad requests lead to a 400 Bad Request response.
"""
self.assertEqual(self._request({}), 400)
def test_exceptions_give_500(self):
"""
Test that internal exceptions/errors lead to a 500 Internal Server Error
response.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_RAISE_EXCEPTION])), 500
)
# we also check that a successful device doesn't hide the exception
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_RAISE_EXCEPTION])
),
500,
)
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_RAISE_EXCEPTION, DEVICE_ACCEPTED])
),
500,
)
def test_remote_errors_give_502(self):
"""
Test that errors caused by remote services such as GCM or APNS
lead to a 502 Bad Gateway response.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_REMOTE_ERROR])), 502
)
# we also check that a successful device doesn't hide the exception
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REMOTE_ERROR])
),
502,
)
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_REMOTE_ERROR, DEVICE_ACCEPTED])
),
502,
)
def test_overlong_requests_are_rejected(self):
# as a control case, first send a regular request.
# connect the site to a fake transport.
transport = StringTransport()
protocol = self.site.buildProtocol(IPv6Address("TCP", "::1", 2345))
protocol.makeConnection(transport)
protocol.dataReceived(
b"POST / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"0\r\n"
b"\r\n"
)
# we should get a 404
self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ")
# now send an oversized request
transport = StringTransport()
protocol = self.site.buildProtocol(IPv6Address("TCP", "::1", 2345))
protocol.makeConnection(transport)
protocol.dataReceived(
b"POST / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
)
# we deliberately send all the data in one big chunk, to ensure that
# twisted isn't buffering the data in the chunked transfer decoder.
# we start with the chunk size, in hex. (We won't actually send this much)
protocol.dataReceived(b"10000000\r\n")
sent = 0
while not transport.disconnected:
self.assertLess(sent, 0x10000000, "connection did not drop")
protocol.dataReceived(b"\0" * 1024)
sent += 1024
# default max upload size is 512K, so it should drop on the next buffer after
# that.
self.assertEqual(sent, 513 * 1024) | tests/test_pushgateway_api_v1.py |
from twisted.internet.address import IPv6Address
from twisted.internet.testing import StringTransport
from sygnal.exceptions import (
NotificationDispatchException,
TemporaryNotificationDispatchException,
)
from sygnal.notifications import Pushkin
from tests import testutils
DEVICE_RAISE_EXCEPTION = {
"app_id": "com.example.spqr",
"pushkey": "raise_exception",
"pushkey_ts": 1234,
}
DEVICE_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "remote_error",
"pushkey_ts": 1234,
}
DEVICE_TEMPORARY_REMOTE_ERROR = {
"app_id": "com.example.spqr",
"pushkey": "temporary_remote_error",
"pushkey_ts": 1234,
}
DEVICE_REJECTED = {
"app_id": "com.example.spqr",
"pushkey": "reject",
"pushkey_ts": 1234,
}
DEVICE_ACCEPTED = {
"app_id": "com.example.spqr",
"pushkey": "accept",
"pushkey_ts": 1234,
}
class TestPushkin(Pushkin):
"""
A synthetic Pushkin with simple rules.
"""
async def dispatch_notification(self, n, device, context):
if device.pushkey == "raise_exception":
raise Exception("Bad things have occurred!")
elif device.pushkey == "remote_error":
raise NotificationDispatchException("Synthetic failure")
elif device.pushkey == "temporary_remote_error":
raise TemporaryNotificationDispatchException("Synthetic failure")
elif device.pushkey == "reject":
return [device.pushkey]
elif device.pushkey == "accept":
return []
raise Exception(f"Unexpected fall-through. {device.pushkey}")
class PushGatewayApiV1TestCase(testutils.TestCase):
def config_setup(self, config):
"""
Set up a TestPushkin for the test.
"""
super(PushGatewayApiV1TestCase, self).config_setup(config)
config["apps"]["com.example.spqr"] = {
"type": "tests.test_pushgateway_api_v1.TestPushkin"
}
def test_good_requests_give_200(self):
"""
Test that good requests give a 200 response code.
"""
# 200 codes cause the result to be parsed instead of returning the code
self.assertNot(
isinstance(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REJECTED])
),
int,
)
)
def test_accepted_devices_are_not_rejected(self):
"""
Test that devices which are accepted by the Pushkin
do not lead to a rejection being returned to the homeserver.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_ACCEPTED])),
{"rejected": []},
)
def test_rejected_devices_are_rejected(self):
"""
Test that devices which are rejected by the Pushkin
DO lead to a rejection being returned to the homeserver.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_REJECTED])),
{"rejected": [DEVICE_REJECTED["pushkey"]]},
)
def test_only_rejected_devices_are_rejected(self):
"""
Test that devices which are rejected by the Pushkin
are the only ones to have a rejection returned to the homeserver,
even if other devices feature in the request.
"""
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_REJECTED, DEVICE_ACCEPTED])
),
{"rejected": [DEVICE_REJECTED["pushkey"]]},
)
def test_bad_requests_give_400(self):
"""
Test that bad requests lead to a 400 Bad Request response.
"""
self.assertEqual(self._request({}), 400)
def test_exceptions_give_500(self):
"""
Test that internal exceptions/errors lead to a 500 Internal Server Error
response.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_RAISE_EXCEPTION])), 500
)
# we also check that a successful device doesn't hide the exception
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_RAISE_EXCEPTION])
),
500,
)
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_RAISE_EXCEPTION, DEVICE_ACCEPTED])
),
500,
)
def test_remote_errors_give_502(self):
"""
Test that errors caused by remote services such as GCM or APNS
lead to a 502 Bad Gateway response.
"""
self.assertEqual(
self._request(self._make_dummy_notification([DEVICE_REMOTE_ERROR])), 502
)
# we also check that a successful device doesn't hide the exception
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_ACCEPTED, DEVICE_REMOTE_ERROR])
),
502,
)
self.assertEqual(
self._request(
self._make_dummy_notification([DEVICE_REMOTE_ERROR, DEVICE_ACCEPTED])
),
502,
)
def test_overlong_requests_are_rejected(self):
# as a control case, first send a regular request.
# connect the site to a fake transport.
transport = StringTransport()
protocol = self.site.buildProtocol(IPv6Address("TCP", "::1", 2345))
protocol.makeConnection(transport)
protocol.dataReceived(
b"POST / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
b"0\r\n"
b"\r\n"
)
# we should get a 404
self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ")
# now send an oversized request
transport = StringTransport()
protocol = self.site.buildProtocol(IPv6Address("TCP", "::1", 2345))
protocol.makeConnection(transport)
protocol.dataReceived(
b"POST / HTTP/1.1\r\n"
b"Connection: close\r\n"
b"Transfer-Encoding: chunked\r\n"
b"\r\n"
)
# we deliberately send all the data in one big chunk, to ensure that
# twisted isn't buffering the data in the chunked transfer decoder.
# we start with the chunk size, in hex. (We won't actually send this much)
protocol.dataReceived(b"10000000\r\n")
sent = 0
while not transport.disconnected:
self.assertLess(sent, 0x10000000, "connection did not drop")
protocol.dataReceived(b"\0" * 1024)
sent += 1024
# default max upload size is 512K, so it should drop on the next buffer after
# that.
self.assertEqual(sent, 513 * 1024) | 0.67405 | 0.262452 |
from telemetry import value as value_module
from telemetry.value import list_of_string_values
from telemetry.value import none_values
class StringValue(value_module.Value):
def __init__(self, page, name, units, value, important=True,
description=None, tir_label=None,
none_value_reason=None):
"""A single value (float, integer or string) result from a test.
A test that output a hash of the content in a page might produce a
string value:
StringValue(page, 'page_hash', 'hash', '74E377FF')
"""
super(StringValue, self).__init__(page, name, units, important, description,
tir_label)
assert value is None or isinstance(value, basestring)
none_values.ValidateNoneValueReason(value, none_value_reason)
self.value = value
self.none_value_reason = none_value_reason
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
return ('StringValue(%s, %s, %s, %s, important=%s, description=%s, '
'tir_label=%s)') % (
page_name,
self.name,
self.units,
self.value,
self.important,
self.description,
self.tir_label)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
# Buildbot's print_perf_results method likes to get lists for all values,
# even when they are scalar, so list-ize the return value.
return [self.value]
def GetRepresentativeNumber(self):
return self.value
def GetRepresentativeString(self):
return str(self.value)
@staticmethod
def GetJSONTypeName():
return 'string'
def AsDict(self):
d = super(StringValue, self).AsDict()
d['value'] = self.value
if self.none_value_reason is not None:
d['none_value_reason'] = self.none_value_reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
kwargs['value'] = value_dict['value']
if 'none_value_reason' in value_dict:
kwargs['none_value_reason'] = value_dict['none_value_reason']
if 'tir_label' in value_dict:
kwargs['tir_label'] = value_dict['tir_label']
return StringValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, None, v0.name, v0.tir_label)
@classmethod
def _MergeLikeValues(cls, values, page, name, tir_label):
v0 = values[0]
merged_value = [v.value for v in values]
none_value_reason = None
if None in merged_value:
merged_value = None
none_value_reason = none_values.MERGE_FAILURE_REASON
return list_of_string_values.ListOfStringValues(
page, name, v0.units, merged_value, important=v0.important,
tir_label=tir_label,
none_value_reason=none_value_reason) | app/src/thirdparty/telemetry/value/string.py |
from telemetry import value as value_module
from telemetry.value import list_of_string_values
from telemetry.value import none_values
class StringValue(value_module.Value):
def __init__(self, page, name, units, value, important=True,
description=None, tir_label=None,
none_value_reason=None):
"""A single value (float, integer or string) result from a test.
A test that output a hash of the content in a page might produce a
string value:
StringValue(page, 'page_hash', 'hash', '74E377FF')
"""
super(StringValue, self).__init__(page, name, units, important, description,
tir_label)
assert value is None or isinstance(value, basestring)
none_values.ValidateNoneValueReason(value, none_value_reason)
self.value = value
self.none_value_reason = none_value_reason
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
return ('StringValue(%s, %s, %s, %s, important=%s, description=%s, '
'tir_label=%s)') % (
page_name,
self.name,
self.units,
self.value,
self.important,
self.description,
self.tir_label)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
# Buildbot's print_perf_results method likes to get lists for all values,
# even when they are scalar, so list-ize the return value.
return [self.value]
def GetRepresentativeNumber(self):
return self.value
def GetRepresentativeString(self):
return str(self.value)
@staticmethod
def GetJSONTypeName():
return 'string'
def AsDict(self):
d = super(StringValue, self).AsDict()
d['value'] = self.value
if self.none_value_reason is not None:
d['none_value_reason'] = self.none_value_reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
kwargs['value'] = value_dict['value']
if 'none_value_reason' in value_dict:
kwargs['none_value_reason'] = value_dict['none_value_reason']
if 'tir_label' in value_dict:
kwargs['tir_label'] = value_dict['tir_label']
return StringValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, None, v0.name, v0.tir_label)
@classmethod
def _MergeLikeValues(cls, values, page, name, tir_label):
v0 = values[0]
merged_value = [v.value for v in values]
none_value_reason = None
if None in merged_value:
merged_value = None
none_value_reason = none_values.MERGE_FAILURE_REASON
return list_of_string_values.ListOfStringValues(
page, name, v0.units, merged_value, important=v0.important,
tir_label=tir_label,
none_value_reason=none_value_reason) | 0.841891 | 0.339499 |
import numpy as np
from lib.namedstruct import *
class SPEFile:
HDRNAMEMAX = 120
USERINFOMAX = 1000
COMMENTMAX = 80
LABELMAX = 16
FILEVERMAX = 16
DATEMAX = 10
ROIMAX = 10
TIMEMAX = 7
DTYPE_FLOAT = 0
DTYPE_LONG = 1
DTYPE_SHORT = 2
DTYPE_USHORT = 3
DSIZE = {
DTYPE_FLOAT: (4, 'f', np.float32),
DTYPE_LONG: (4, 'l', np.int32),
DTYPE_SHORT: (2, 'h', np.int16),
DTYPE_USHORT: (2, 'H', np.uint16)
}
_STRUCTINFO = [
('ControllerVersion', S16, 1), #0, Hardware Version
('LogicOutput', S16, 1), #2, Definition of Output BNC
('AmpHiCapLowNoise', U16, 1), #4, Amp Switching Mode
('xDimDet', U16, 1), #6, Detector x dimension of chip.
('mode', S16, 1), #8, timing mode
('exp_sec', FLOAT, 1), #10, alternitive exposure, in sec.
('VChipXdim', S16, 1), #14, Virtual Chip X dim
('VChipYdim', S16, 1), #16, Virtual Chip Y dim
('yDimDet', U16, 1), #18, y dimension of CCD or detector.
('date', STRING, DATEMAX), #20, date
('VirtualChipFlag', S16, 1), #30, On/Off
('Spare1', C, 2), #32
('noscan', S16, 1), #34, Old number of scans - should always be -1
('DetTemperature', FLOAT, 1), #36, Detector Temperature Set
('DetType', S16, 1), #40, CCD/DiodeArray type
('xdim', U16, 1), #42, actual # of pixels on x axis
('stdiode', S16, 1), #44, trigger diode
('DelayTime', FLOAT, 1), #46, Used with Async Mode
('ShutterControl', U16, 1), #50, Normal, Disabled Open, Disabled Closed
('AbsorbLive', S16, 1), #52, On/Off
('AbsorbMode', U16, 1), #54, Reference Strip or File
('CanDoVirtualChipFlag', S16, 1), #56, T/F Cont/Chip able to do Virtual Chip
('ThresholdMinLive', S16, 1), #58, On/Off
('ThresholdMinVal', FLOAT, 1), #60, Threshold Minimum Value
('ThresholdMaxLive', S16, 1), #64, On/Off
('ThresholdMaxVal', FLOAT, 1), #66, Threshold Maximum Value
('SpecAutoSpectroMode', S16, 1), #70, T/F Spectrograph Used
('SpecCenterWlNm', FLOAT, 1), #72, Center Wavelength in Nm
('SpecGlueFlag', S16, 1), #76, T/F File is Glued
('SpecGlueStartWlNm', FLOAT, 1), #78, Starting Wavelength in Nm
('SpecGlueEndWlNm', FLOAT, 1), #82, Starting Wavelength in Nm
('SpecGlueMinOvrlpNm', FLOAT, 1), #86, Minimum Overlap in Nm
('SpecGlueFinalResNm', FLOAT, 1), #90, Final Resolution in Nm
('PulserType', S16, 1), #94, 0=None, PG200=1, PTG=2, DG535=3
('CustomChipFlag', S16, 1), #96, T/F Custom Chip Used
('XPrePixels', S16, 1), #98, Pre Pixels in X direction
('XPostPixels', S16, 1), #100, Post Pixels in X direction
('YPrePixels', S16, 1), #102, Pre Pixels in Y direction
('YPostPixels', S16, 1), #104, Post Pixels in Y direction
('asynen', S16, 1), #106, asynchron enable flag 0 = off
('datatype', S16, 1), #108, experiment datatype
('PulserMode', S16, 1), #110, Repetitive/Sequential
('PulserOnChipAccums', U16, 1), #112, Num PTG On-Chip Accums
('PulserRepeatExp', U32, 1), #114, Num Exp Repeats (Pulser SW Accum)
('PulseRepWidth', FLOAT, 1), #118, Width Value for Repetitive pulse (usec)
('PulseRepDelay', FLOAT, 1), #122, Width Value for Repetitive pulse (usec)
('PulseSeqStartWidth', FLOAT, 1), #126, Start Width for Sequential pulse (usec)
('PulseSeqEndWidth', FLOAT, 1), #130, End Width for Sequential pulse (usec)
('PulseSeqStartDelay', FLOAT, 1), #134, Start Delay for Sequential pulse (usec)
('PulseSeqEndDelay', FLOAT, 1), #138, End Delay for Sequential pulse (usec)
('PulseSeqIncMode', S16, 1), #142, Increments: 1=Fixed, 2=Exponential
('PImaxUsed', S16, 1), #144, PI-Max type controller flag
('PImaxMode', S16, 1), #146, PI-Max mode
('PImaxGain', S16, 1), #148, PI-Max Gain
('BackGrndApplied', S16, 1), #150, 1 if background subtraction done
('PImax2nsBrdUsed', S16, 1), #152, T/F PI-Max 2ns Board Used
('minblk', U16, 1), #154, min. # of strips per skips
('numminblk', U16, 1), #156, # of min-blocks before geo skps
('SpecMirrorLocation', S16, 2), #158, Spectro Mirror Location, 0=Not Present
('SpecSlitLocation', S16, 4), #162, Spectro Slit Location, 0=Not Present
('CustomTimingFlag', S16, 1), #170, T/F Custom Timing Used
('ExperimentTimeLocal', STRING, TIMEMAX), #172, Experiment Local Time as hhmmss\0
('ExperimentTimeUTC', STRING, TIMEMAX), #179, Experiment UTC Time as hhmmss\0
('ExposUnits', S16, 1), #186, User Units for Exposure
('ADCoffset', U16, 1), #188, ADC offset
('ADCrate', U16, 1), #190, ADC rate
('ADCtype', U16, 1), #192, ADC type
('ADCresolution', U16, 1), #194, ADC resolution
('ADCbitAdjust', U16, 1), #196, ADC bit adjust
('gain', U16, 1), #198, gain
('Comments', C, 400), #200, File Comments
('geometric', U16, 1), #600, geometric ops: rotate 0x01,
('xlabel', STRING, LABELMAX), #602, intensity display string
('cleans', U16, 1), #618, cleans
('NumSkpPerCln', U16, 1), #620, number of skips per clean.
('SpecMirrorPos', S16, 2), #622, Spectrograph Mirror Positions
('SpecSlitPos', FLOAT, 4), #626, Spectrograph Slit Positions
('AutoCleansActive', S16, 1), #642, T/F
('UseContCleansInst', S16, 1), #644, T/F
('AbsorbStripNum', S16, 1), #646, Absorbance Strip Number
('SpecSlitPosUnits', S16, 1), #648, Spectrograph Slit Position Units
('SpecGrooves', FLOAT, 1), #650, Spectrograph Grating Grooves
('srccmp', S16, 1), #654, number of source comp. diodes
('ydim', U16, 1), #656, y dimension of raw data.
('scramble', S16, 1), #658, 0=scrambled,1=unscrambled
('ContinuousCleansFlag', S16, 1), #660, T/F Continuous Cleans Timing Option
('ExternalTriggerFlag', S16, 1), #662, T/F External Trigger Timing Option
('lnoscan', U32, 1), #664 Number of scans (Early WinX)
('lavgexp', U32, 1), #668 Number of Accumulations
('ReadoutTime', FLOAT, 1), #672, Experiment readout time
('TriggeredModeFlag', S16, 1), #676, T/F Triggered Timing Option
('Spare_2', C, 10), #678
('sw_version', STRING, FILEVERMAX), #688, Version of SW creating this file
('type', S16, 1), #704, 1 = new120 (Type II)
('flatFieldApplied', S16, 1), #706, 1 if flat field was applied.
('Spare_3', C, 16), #708,
('kin_trig_mode', S16, 1), #724, Kinetics Trigger Mode
('dlabel', STRING, LABELMAX), #726, Data label.
('Spare_4', C, 436), #742
('PulseFileName', STRING, HDRNAMEMAX), #1178, Name of Pulser File with
('AbsorbFileName', STRING, HDRNAMEMAX), #1298, Name of Absorbance File (if File Mode)
('NumExpRepeats', U32, 1), #1418, Number of Times experiment repeated
('NumExpAccums', U32, 1), #1422, Number of Time experiment accumulated
('YT_Flag', S16, 1), #1426, Set to 1 if this file contains YT data
('clkspd_us', FLOAT, 1), #1428, Vert Clock Speed in micro-sec
('HWaccumFlag', S16, 1), #1432, set to 1 if accum done by Hardware.
('StoreSync', S16, 1), #1434, set to 1 if store sync used
('BlemishApplied', S16, 1), #1436, set to 1 if blemish removal applied
('CosmicApplied', S16, 1), #1438, set to 1 if cosmic ray removal applied
('CosmicType', S16, 1), #1440, if cosmic ray applied, this is type
('CosmicThreshold', FLOAT, 1), #1442, Threshold of cosmic ray removal.
('NumFrames', U32, 1), #1446 number of frames in file.
('MaxIntensity', FLOAT, 1), #1450, max intensity of data (future)
('MinIntensity', FLOAT, 1), #1454, min intensity of data (future)
('ylabel', STRING, LABELMAX), #1458, y axis label.
('ShutterType', U16, 1), #1474, shutter type.
('shutterComp', FLOAT, 1), #1476, shutter compensation time.
('readoutMode', U16, 1), #1480, readout mode, full,kinetics, etc
('WindowSize', U16, 1), #1482, window size for kinetics only.
('clkspd', U16, 1), #1484, clock speed for kinetics & frame transfer
('interface_type', U16, 1), #1486, computer interface
('NumROIsInExperiment', S16, 1), #1488, May be more than the 10 allowed in
('Spare_5', C, 16), #1490,
('controllerNum', U16, 1), #1506, if multiple controller system will
('SWmade', U16, 1), #1508, Which software package created this file
('NumROI', S16, 1), #1510, number of ROIs used. if 0 assume 1.
('roi_info', U16, 10 * 6),
('FlatField', STRING, HDRNAMEMAX), #1632, Flat field file name.
('background', STRING, HDRNAMEMAX), #1752, background sub. file name.
('blemish', STRING, HDRNAMEMAX), #1872, blemish file name.
('file_header_ver', FLOAT, 1), #1992, version of this file header
('YT_info', C, 1000),
('WinView_id', U32, 1),
('xoffset', DOUBLE, 1), #3000, offset for absolute data scaling
('xfactor', DOUBLE, 1), #3008, factor for absolute data scaling
('xcurrent_unit', C, 1), #3016, selected scaling unit
('xreserved1', C, 1), #3017, reserved
('xstring', C, 40), #3018, special string for scaling
('xreserved2', C, 40), #3058, reserved
('xcalib_valid', U8, 1), #3098, flag if calibration is valid
('xinput_unit', U8, 1), #3099, current input units for
('xpolynom_unit', U8, 1), #3100, linear UNIT and used
('xpolynom_order', U8, 1), #3101, ORDER of calibration POLYNOM
('xcalib_count', U8, 1), #3102, valid calibration data pairs
('xpixel_position', DOUBLE, 10), #3103, pixel pos. of calibration data
('xcalib_value', DOUBLE, 10), #3183, calibration VALUE at above pos
('xpolynom_coeff', DOUBLE, 6), #3263, polynom COEFFICIENTS
('xlaser_position', DOUBLE, 1), #3311, laser wavenumber for relativ WN
('xreserved3', C, 1), #3319, reserved
('xnew_calib_flag', C, 1), #3320
('xcalib_label', C, 81), #3321, Calibration label (NULL term'd)
('xexpansion', C, 87), #3402, Calibration Expansion area
('yoffset', DOUBLE, 1), #3489, offset for absolute data scaling
('yfactor', DOUBLE, 1), #3497, factor for absolute data scaling
('ycurrent_unit', C, 1), #3505, selected scaling unit
('yreserved1', C, 1), #3506, reserved
('ystring', C, 40), #3507, special string for scaling
('yreserved2', C, 40), #3547, reserved
('ycalib_valid', U8, 1), #3587, flag if calibration is valid
('yinput_unit', U8, 1), #3588, current input units for
('ypolynom_unit', U8, 1), #3589, linear UNIT and used
('ypolynom_order', U8, 1), #3590, ORDER of calibration POLYNOM
('ycalib_count', U8, 1), #3591, valid calibration data pairs
('ypixel_position', DOUBLE, 10), #3592, pixel pos. of calibration data
('ycalib_value', DOUBLE, 10), #3672, calibration VALUE at above pos
('ypolynom_coeff', DOUBLE, 6), #3752, polynom COEFFICIENTS
('ylaser_position', DOUBLE, 1), #3800, laser wavenumber for relativ WN
('yreserved3', C, 1), #3808, reserved
('ynew_calib_flag', C, 1), #3809
('ycalib_label', C, 81), #3810, Calibration label (NULL term'd)
('yexpansion', C, 87), #3891, Calibration Expansion area
('Istring', STRING, 40), #3978, special intensity scaling string
('Spare_6', C, 25), #4018,
('SpecType', U8, 1), # 4043 spectrometer type (acton, spex, etc.)
('SpecModel', U8, 1), # 4044 spectrometer model (type dependent)
('PulseBurstUsed', U8, 1), # 4045 pulser burst mode on/off
('PulseBurstCount', U32, 1), #4046, pulser triggers per burst
('PulseBurstPeriod', DOUBLE, 1), #4050, pulser burst period (in usec)
('PulseBracketUsed', U8, 1), # 4058 pulser bracket pulsing on/off
('PulseBracketType', U8, 1), # 4059 pulser bracket pulsing type
('PulseTimeConstFast', DOUBLE, 1), #4060, pulser slow exponential time constant (in usec)
('PulseAmplitudeFast', DOUBLE, 1), #4068, pulser fast exponential amplitude constant
('PulseTimeConstSlow', DOUBLE, 1), #4076, pulser slow exponential time constant (in usec)
('PulseAmplitudeSlow', DOUBLE, 1), #4084, pulser slow exponential amplitude constant
('AnalogGain;', S16, 1), #4092, analog gain
('AvGainUsed', S16, 1), #4094, avalanche gain was used
('AvGain', S16, 1), #4096, avalanche gain value
('lastvalue', S16, 1), #4098, Always the LAST value in the header
]
def __init__(self, filename=None):
self._info = {}
self._filename = ''
self._data = None
# Little-endian
self._struct = NamedStruct(self._STRUCTINFO, alignment='<')
if filename:
self.load(filename)
def load(self, filename):
f = open(filename, 'rb')
header = f.read(4100)
info = self._struct.unpack(header)
self._info = info
typesize, formatchr, nptype = self.DSIZE[info['datatype']]
formatstr = '%s%s' % (formatchr, formatchr)
entries = info['xdim'] * info['ydim'] * info['NumFrames']
self._data = np.zeros(entries, dtype=nptype)
for i in range(entries):
elem = f.read(typesize)
if elem == '':
print 'Error reading SPE-file: unexpected EOF'
break
self._data[i] = struct.unpack(formatchr, elem)[0]
def convert_value(self, axis, value):
if not self._info['%scalib_valid' % axis]:
return value
val = 0.0
order = self._info['%spolynom_order' % axis]
for power in range(order + 1):
coef = self._info['%spolynom_coeff' % axis][power]
val += coef * (value + 1) ** power
return val
def get_info(self):
return self._info
def get_data(self):
xvals = np.array(
[self.convert_value('x', i) for i in range(len(self._data))])
yvals = np.array(
[self.convert_value('y', i) for i in self._data])
return np.column_stack((xvals, yvals))
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
fname = sys.argv[1]
else:
fname = 'test.spe'
spe = SPEFile(fname)
info = spe.get_info()
print 'Info:'
for line in spe._STRUCTINFO:
key = line[0]
val = info[key]
print ' %s => %r' % (key, val)
import matplotlib.pyplot as plt
xys = spe.get_data()
xs, ys = xys[:,0], xys[:,1]
plt.plot(xs, ys)
plt.xlim(min(xs), max(xs))
plt.show() | source/lib/file_support/winspec.py |
import numpy as np
from lib.namedstruct import *
class SPEFile:
HDRNAMEMAX = 120
USERINFOMAX = 1000
COMMENTMAX = 80
LABELMAX = 16
FILEVERMAX = 16
DATEMAX = 10
ROIMAX = 10
TIMEMAX = 7
DTYPE_FLOAT = 0
DTYPE_LONG = 1
DTYPE_SHORT = 2
DTYPE_USHORT = 3
DSIZE = {
DTYPE_FLOAT: (4, 'f', np.float32),
DTYPE_LONG: (4, 'l', np.int32),
DTYPE_SHORT: (2, 'h', np.int16),
DTYPE_USHORT: (2, 'H', np.uint16)
}
_STRUCTINFO = [
('ControllerVersion', S16, 1), #0, Hardware Version
('LogicOutput', S16, 1), #2, Definition of Output BNC
('AmpHiCapLowNoise', U16, 1), #4, Amp Switching Mode
('xDimDet', U16, 1), #6, Detector x dimension of chip.
('mode', S16, 1), #8, timing mode
('exp_sec', FLOAT, 1), #10, alternitive exposure, in sec.
('VChipXdim', S16, 1), #14, Virtual Chip X dim
('VChipYdim', S16, 1), #16, Virtual Chip Y dim
('yDimDet', U16, 1), #18, y dimension of CCD or detector.
('date', STRING, DATEMAX), #20, date
('VirtualChipFlag', S16, 1), #30, On/Off
('Spare1', C, 2), #32
('noscan', S16, 1), #34, Old number of scans - should always be -1
('DetTemperature', FLOAT, 1), #36, Detector Temperature Set
('DetType', S16, 1), #40, CCD/DiodeArray type
('xdim', U16, 1), #42, actual # of pixels on x axis
('stdiode', S16, 1), #44, trigger diode
('DelayTime', FLOAT, 1), #46, Used with Async Mode
('ShutterControl', U16, 1), #50, Normal, Disabled Open, Disabled Closed
('AbsorbLive', S16, 1), #52, On/Off
('AbsorbMode', U16, 1), #54, Reference Strip or File
('CanDoVirtualChipFlag', S16, 1), #56, T/F Cont/Chip able to do Virtual Chip
('ThresholdMinLive', S16, 1), #58, On/Off
('ThresholdMinVal', FLOAT, 1), #60, Threshold Minimum Value
('ThresholdMaxLive', S16, 1), #64, On/Off
('ThresholdMaxVal', FLOAT, 1), #66, Threshold Maximum Value
('SpecAutoSpectroMode', S16, 1), #70, T/F Spectrograph Used
('SpecCenterWlNm', FLOAT, 1), #72, Center Wavelength in Nm
('SpecGlueFlag', S16, 1), #76, T/F File is Glued
('SpecGlueStartWlNm', FLOAT, 1), #78, Starting Wavelength in Nm
('SpecGlueEndWlNm', FLOAT, 1), #82, Starting Wavelength in Nm
('SpecGlueMinOvrlpNm', FLOAT, 1), #86, Minimum Overlap in Nm
('SpecGlueFinalResNm', FLOAT, 1), #90, Final Resolution in Nm
('PulserType', S16, 1), #94, 0=None, PG200=1, PTG=2, DG535=3
('CustomChipFlag', S16, 1), #96, T/F Custom Chip Used
('XPrePixels', S16, 1), #98, Pre Pixels in X direction
('XPostPixels', S16, 1), #100, Post Pixels in X direction
('YPrePixels', S16, 1), #102, Pre Pixels in Y direction
('YPostPixels', S16, 1), #104, Post Pixels in Y direction
('asynen', S16, 1), #106, asynchron enable flag 0 = off
('datatype', S16, 1), #108, experiment datatype
('PulserMode', S16, 1), #110, Repetitive/Sequential
('PulserOnChipAccums', U16, 1), #112, Num PTG On-Chip Accums
('PulserRepeatExp', U32, 1), #114, Num Exp Repeats (Pulser SW Accum)
('PulseRepWidth', FLOAT, 1), #118, Width Value for Repetitive pulse (usec)
('PulseRepDelay', FLOAT, 1), #122, Width Value for Repetitive pulse (usec)
('PulseSeqStartWidth', FLOAT, 1), #126, Start Width for Sequential pulse (usec)
('PulseSeqEndWidth', FLOAT, 1), #130, End Width for Sequential pulse (usec)
('PulseSeqStartDelay', FLOAT, 1), #134, Start Delay for Sequential pulse (usec)
('PulseSeqEndDelay', FLOAT, 1), #138, End Delay for Sequential pulse (usec)
('PulseSeqIncMode', S16, 1), #142, Increments: 1=Fixed, 2=Exponential
('PImaxUsed', S16, 1), #144, PI-Max type controller flag
('PImaxMode', S16, 1), #146, PI-Max mode
('PImaxGain', S16, 1), #148, PI-Max Gain
('BackGrndApplied', S16, 1), #150, 1 if background subtraction done
('PImax2nsBrdUsed', S16, 1), #152, T/F PI-Max 2ns Board Used
('minblk', U16, 1), #154, min. # of strips per skips
('numminblk', U16, 1), #156, # of min-blocks before geo skps
('SpecMirrorLocation', S16, 2), #158, Spectro Mirror Location, 0=Not Present
('SpecSlitLocation', S16, 4), #162, Spectro Slit Location, 0=Not Present
('CustomTimingFlag', S16, 1), #170, T/F Custom Timing Used
('ExperimentTimeLocal', STRING, TIMEMAX), #172, Experiment Local Time as hhmmss\0
('ExperimentTimeUTC', STRING, TIMEMAX), #179, Experiment UTC Time as hhmmss\0
('ExposUnits', S16, 1), #186, User Units for Exposure
('ADCoffset', U16, 1), #188, ADC offset
('ADCrate', U16, 1), #190, ADC rate
('ADCtype', U16, 1), #192, ADC type
('ADCresolution', U16, 1), #194, ADC resolution
('ADCbitAdjust', U16, 1), #196, ADC bit adjust
('gain', U16, 1), #198, gain
('Comments', C, 400), #200, File Comments
('geometric', U16, 1), #600, geometric ops: rotate 0x01,
('xlabel', STRING, LABELMAX), #602, intensity display string
('cleans', U16, 1), #618, cleans
('NumSkpPerCln', U16, 1), #620, number of skips per clean.
('SpecMirrorPos', S16, 2), #622, Spectrograph Mirror Positions
('SpecSlitPos', FLOAT, 4), #626, Spectrograph Slit Positions
('AutoCleansActive', S16, 1), #642, T/F
('UseContCleansInst', S16, 1), #644, T/F
('AbsorbStripNum', S16, 1), #646, Absorbance Strip Number
('SpecSlitPosUnits', S16, 1), #648, Spectrograph Slit Position Units
('SpecGrooves', FLOAT, 1), #650, Spectrograph Grating Grooves
('srccmp', S16, 1), #654, number of source comp. diodes
('ydim', U16, 1), #656, y dimension of raw data.
('scramble', S16, 1), #658, 0=scrambled,1=unscrambled
('ContinuousCleansFlag', S16, 1), #660, T/F Continuous Cleans Timing Option
('ExternalTriggerFlag', S16, 1), #662, T/F External Trigger Timing Option
('lnoscan', U32, 1), #664 Number of scans (Early WinX)
('lavgexp', U32, 1), #668 Number of Accumulations
('ReadoutTime', FLOAT, 1), #672, Experiment readout time
('TriggeredModeFlag', S16, 1), #676, T/F Triggered Timing Option
('Spare_2', C, 10), #678
('sw_version', STRING, FILEVERMAX), #688, Version of SW creating this file
('type', S16, 1), #704, 1 = new120 (Type II)
('flatFieldApplied', S16, 1), #706, 1 if flat field was applied.
('Spare_3', C, 16), #708,
('kin_trig_mode', S16, 1), #724, Kinetics Trigger Mode
('dlabel', STRING, LABELMAX), #726, Data label.
('Spare_4', C, 436), #742
('PulseFileName', STRING, HDRNAMEMAX), #1178, Name of Pulser File with
('AbsorbFileName', STRING, HDRNAMEMAX), #1298, Name of Absorbance File (if File Mode)
('NumExpRepeats', U32, 1), #1418, Number of Times experiment repeated
('NumExpAccums', U32, 1), #1422, Number of Time experiment accumulated
('YT_Flag', S16, 1), #1426, Set to 1 if this file contains YT data
('clkspd_us', FLOAT, 1), #1428, Vert Clock Speed in micro-sec
('HWaccumFlag', S16, 1), #1432, set to 1 if accum done by Hardware.
('StoreSync', S16, 1), #1434, set to 1 if store sync used
('BlemishApplied', S16, 1), #1436, set to 1 if blemish removal applied
('CosmicApplied', S16, 1), #1438, set to 1 if cosmic ray removal applied
('CosmicType', S16, 1), #1440, if cosmic ray applied, this is type
('CosmicThreshold', FLOAT, 1), #1442, Threshold of cosmic ray removal.
('NumFrames', U32, 1), #1446 number of frames in file.
('MaxIntensity', FLOAT, 1), #1450, max intensity of data (future)
('MinIntensity', FLOAT, 1), #1454, min intensity of data (future)
('ylabel', STRING, LABELMAX), #1458, y axis label.
('ShutterType', U16, 1), #1474, shutter type.
('shutterComp', FLOAT, 1), #1476, shutter compensation time.
('readoutMode', U16, 1), #1480, readout mode, full,kinetics, etc
('WindowSize', U16, 1), #1482, window size for kinetics only.
('clkspd', U16, 1), #1484, clock speed for kinetics & frame transfer
('interface_type', U16, 1), #1486, computer interface
('NumROIsInExperiment', S16, 1), #1488, May be more than the 10 allowed in
('Spare_5', C, 16), #1490,
('controllerNum', U16, 1), #1506, if multiple controller system will
('SWmade', U16, 1), #1508, Which software package created this file
('NumROI', S16, 1), #1510, number of ROIs used. if 0 assume 1.
('roi_info', U16, 10 * 6),
('FlatField', STRING, HDRNAMEMAX), #1632, Flat field file name.
('background', STRING, HDRNAMEMAX), #1752, background sub. file name.
('blemish', STRING, HDRNAMEMAX), #1872, blemish file name.
('file_header_ver', FLOAT, 1), #1992, version of this file header
('YT_info', C, 1000),
('WinView_id', U32, 1),
('xoffset', DOUBLE, 1), #3000, offset for absolute data scaling
('xfactor', DOUBLE, 1), #3008, factor for absolute data scaling
('xcurrent_unit', C, 1), #3016, selected scaling unit
('xreserved1', C, 1), #3017, reserved
('xstring', C, 40), #3018, special string for scaling
('xreserved2', C, 40), #3058, reserved
('xcalib_valid', U8, 1), #3098, flag if calibration is valid
('xinput_unit', U8, 1), #3099, current input units for
('xpolynom_unit', U8, 1), #3100, linear UNIT and used
('xpolynom_order', U8, 1), #3101, ORDER of calibration POLYNOM
('xcalib_count', U8, 1), #3102, valid calibration data pairs
('xpixel_position', DOUBLE, 10), #3103, pixel pos. of calibration data
('xcalib_value', DOUBLE, 10), #3183, calibration VALUE at above pos
('xpolynom_coeff', DOUBLE, 6), #3263, polynom COEFFICIENTS
('xlaser_position', DOUBLE, 1), #3311, laser wavenumber for relativ WN
('xreserved3', C, 1), #3319, reserved
('xnew_calib_flag', C, 1), #3320
('xcalib_label', C, 81), #3321, Calibration label (NULL term'd)
('xexpansion', C, 87), #3402, Calibration Expansion area
('yoffset', DOUBLE, 1), #3489, offset for absolute data scaling
('yfactor', DOUBLE, 1), #3497, factor for absolute data scaling
('ycurrent_unit', C, 1), #3505, selected scaling unit
('yreserved1', C, 1), #3506, reserved
('ystring', C, 40), #3507, special string for scaling
('yreserved2', C, 40), #3547, reserved
('ycalib_valid', U8, 1), #3587, flag if calibration is valid
('yinput_unit', U8, 1), #3588, current input units for
('ypolynom_unit', U8, 1), #3589, linear UNIT and used
('ypolynom_order', U8, 1), #3590, ORDER of calibration POLYNOM
('ycalib_count', U8, 1), #3591, valid calibration data pairs
('ypixel_position', DOUBLE, 10), #3592, pixel pos. of calibration data
('ycalib_value', DOUBLE, 10), #3672, calibration VALUE at above pos
('ypolynom_coeff', DOUBLE, 6), #3752, polynom COEFFICIENTS
('ylaser_position', DOUBLE, 1), #3800, laser wavenumber for relativ WN
('yreserved3', C, 1), #3808, reserved
('ynew_calib_flag', C, 1), #3809
('ycalib_label', C, 81), #3810, Calibration label (NULL term'd)
('yexpansion', C, 87), #3891, Calibration Expansion area
('Istring', STRING, 40), #3978, special intensity scaling string
('Spare_6', C, 25), #4018,
('SpecType', U8, 1), # 4043 spectrometer type (acton, spex, etc.)
('SpecModel', U8, 1), # 4044 spectrometer model (type dependent)
('PulseBurstUsed', U8, 1), # 4045 pulser burst mode on/off
('PulseBurstCount', U32, 1), #4046, pulser triggers per burst
('PulseBurstPeriod', DOUBLE, 1), #4050, pulser burst period (in usec)
('PulseBracketUsed', U8, 1), # 4058 pulser bracket pulsing on/off
('PulseBracketType', U8, 1), # 4059 pulser bracket pulsing type
('PulseTimeConstFast', DOUBLE, 1), #4060, pulser slow exponential time constant (in usec)
('PulseAmplitudeFast', DOUBLE, 1), #4068, pulser fast exponential amplitude constant
('PulseTimeConstSlow', DOUBLE, 1), #4076, pulser slow exponential time constant (in usec)
('PulseAmplitudeSlow', DOUBLE, 1), #4084, pulser slow exponential amplitude constant
('AnalogGain;', S16, 1), #4092, analog gain
('AvGainUsed', S16, 1), #4094, avalanche gain was used
('AvGain', S16, 1), #4096, avalanche gain value
('lastvalue', S16, 1), #4098, Always the LAST value in the header
]
def __init__(self, filename=None):
self._info = {}
self._filename = ''
self._data = None
# Little-endian
self._struct = NamedStruct(self._STRUCTINFO, alignment='<')
if filename:
self.load(filename)
def load(self, filename):
f = open(filename, 'rb')
header = f.read(4100)
info = self._struct.unpack(header)
self._info = info
typesize, formatchr, nptype = self.DSIZE[info['datatype']]
formatstr = '%s%s' % (formatchr, formatchr)
entries = info['xdim'] * info['ydim'] * info['NumFrames']
self._data = np.zeros(entries, dtype=nptype)
for i in range(entries):
elem = f.read(typesize)
if elem == '':
print 'Error reading SPE-file: unexpected EOF'
break
self._data[i] = struct.unpack(formatchr, elem)[0]
def convert_value(self, axis, value):
if not self._info['%scalib_valid' % axis]:
return value
val = 0.0
order = self._info['%spolynom_order' % axis]
for power in range(order + 1):
coef = self._info['%spolynom_coeff' % axis][power]
val += coef * (value + 1) ** power
return val
def get_info(self):
return self._info
def get_data(self):
xvals = np.array(
[self.convert_value('x', i) for i in range(len(self._data))])
yvals = np.array(
[self.convert_value('y', i) for i in self._data])
return np.column_stack((xvals, yvals))
if __name__ == '__main__':
import sys
if len(sys.argv) == 2:
fname = sys.argv[1]
else:
fname = 'test.spe'
spe = SPEFile(fname)
info = spe.get_info()
print 'Info:'
for line in spe._STRUCTINFO:
key = line[0]
val = info[key]
print ' %s => %r' % (key, val)
import matplotlib.pyplot as plt
xys = spe.get_data()
xs, ys = xys[:,0], xys[:,1]
plt.plot(xs, ys)
plt.xlim(min(xs), max(xs))
plt.show() | 0.451568 | 0.298469 |
from __future__ import absolute_import
import os
import tarfile
import boto3
import numpy
import pytest
import tests.integ.local_mode_utils as local_mode_utils
from tests.integ import DATA_DIR, PYTHON_VERSION
from tests.integ.timeout import timeout
from sagemaker.local import LocalSession, LocalSagemakerRuntimeClient, LocalSagemakerClient
from sagemaker.mxnet import MXNet
from sagemaker.tensorflow import TensorFlow
DATA_PATH = os.path.join(DATA_DIR, 'iris', 'data')
DEFAULT_REGION = 'us-west-2'
class LocalNoS3Session(LocalSession):
"""
This Session sets local_code: True regardless of any config file settings
"""
def __init__(self):
super(LocalSession, self).__init__()
def _initialize(self, boto_session, sagemaker_client, sagemaker_runtime_client):
self.boto_session = boto3.Session(region_name=DEFAULT_REGION)
if self.config is None:
self.config = {
'local':
{
'local_code': True,
'region_name': DEFAULT_REGION
}
}
self._region_name = DEFAULT_REGION
self.sagemaker_client = LocalSagemakerClient(self)
self.sagemaker_runtime_client = LocalSagemakerRuntimeClient(self.config)
self.local_mode = True
@pytest.fixture(scope='module')
def mxnet_model(sagemaker_local_session, mxnet_full_version):
def _create_model(output_path):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
mx = MXNet(entry_point=script_path, role='SageMakerRole',
train_instance_count=1, train_instance_type='local',
output_path=output_path, framework_version=mxnet_full_version,
sagemaker_session=sagemaker_local_session)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
mx.fit({'train': train_input, 'test': test_input})
model = mx.create_model(1)
return model
return _create_model
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_mode(tf_full_version, sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH,
key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_distributed_local_mode(sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=3,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_data(sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_data_local_script():
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=LocalNoS3Session())
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
def test_local_mode_serving_from_s3_model(sagemaker_local_session, mxnet_model, mxnet_full_version):
path = 's3://%s' % sagemaker_local_session.default_bucket()
s3_model = mxnet_model(path)
s3_model.sagemaker_session = sagemaker_local_session
predictor = None
with local_mode_utils.lock():
try:
predictor = s3_model.deploy(initial_instance_count=1, instance_type='local')
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
if predictor:
predictor.delete_endpoint()
@pytest.mark.local_mode
def test_local_mode_serving_from_local_model(tmpdir, sagemaker_local_session, mxnet_model):
predictor = None
with local_mode_utils.lock():
try:
path = 'file://%s' % (str(tmpdir))
model = mxnet_model(path)
model.sagemaker_session = sagemaker_local_session
predictor = model.deploy(initial_instance_count=1, instance_type='local')
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
if predictor:
predictor.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_local_mode(sagemaker_local_session, mxnet_full_version):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
mx = MXNet(entry_point=script_path, role='SageMakerRole', py_version=PYTHON_VERSION,
train_instance_count=1, train_instance_type='local',
sagemaker_session=sagemaker_local_session, framework_version=mxnet_full_version)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
mx.fit({'train': train_input, 'test': test_input})
endpoint_name = mx.latest_training_job.name
with local_mode_utils.lock():
try:
predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
mx.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_local_data_local_script(mxnet_full_version):
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
script_path = os.path.join(data_path, 'mnist.py')
mx = MXNet(entry_point=script_path, role='SageMakerRole',
train_instance_count=1, train_instance_type='local',
framework_version=mxnet_full_version,
sagemaker_session=LocalNoS3Session())
train_input = 'file://' + os.path.join(data_path, 'train')
test_input = 'file://' + os.path.join(data_path, 'test')
mx.fit({'train': train_input, 'test': test_input})
endpoint_name = mx.latest_training_job.name
with local_mode_utils.lock():
try:
predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
mx.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_training_failure(sagemaker_local_session, mxnet_full_version, tmpdir):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'failure_script.py')
mx = MXNet(entry_point=script_path,
role='SageMakerRole',
framework_version=mxnet_full_version,
py_version=PYTHON_VERSION,
train_instance_count=1,
train_instance_type='local',
sagemaker_session=sagemaker_local_session,
output_path='file://{}'.format(tmpdir))
with pytest.raises(RuntimeError):
mx.fit()
with tarfile.open(os.path.join(str(tmpdir), 'output.tar.gz')) as tar:
tar.getmember('failure')
@pytest.mark.local_mode
def test_local_transform_mxnet(sagemaker_local_session, tmpdir, mxnet_full_version):
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
script_path = os.path.join(data_path, 'mnist.py')
mx = MXNet(entry_point=script_path, role='SageMakerRole', train_instance_count=1,
train_instance_type='ml.c4.xlarge', framework_version=mxnet_full_version,
sagemaker_session=sagemaker_local_session)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
with timeout(minutes=15):
mx.fit({'train': train_input, 'test': test_input})
transform_input_path = os.path.join(data_path, 'transform')
transform_input_key_prefix = 'integ-test-data/mxnet_mnist/transform'
transform_input = mx.sagemaker_session.upload_data(path=transform_input_path,
key_prefix=transform_input_key_prefix)
output_path = 'file://%s' % (str(tmpdir))
transformer = mx.transformer(1, 'local', assemble_with='Line', max_payload=1,
strategy='SingleRecord', output_path=output_path)
with local_mode_utils.lock():
transformer.transform(transform_input, content_type='text/csv', split_type='Line')
transformer.wait()
assert os.path.exists(os.path.join(str(tmpdir), 'data.csv.out')) | tests/integ/test_local_mode.py | from __future__ import absolute_import
import os
import tarfile
import boto3
import numpy
import pytest
import tests.integ.local_mode_utils as local_mode_utils
from tests.integ import DATA_DIR, PYTHON_VERSION
from tests.integ.timeout import timeout
from sagemaker.local import LocalSession, LocalSagemakerRuntimeClient, LocalSagemakerClient
from sagemaker.mxnet import MXNet
from sagemaker.tensorflow import TensorFlow
DATA_PATH = os.path.join(DATA_DIR, 'iris', 'data')
DEFAULT_REGION = 'us-west-2'
class LocalNoS3Session(LocalSession):
"""
This Session sets local_code: True regardless of any config file settings
"""
def __init__(self):
super(LocalSession, self).__init__()
def _initialize(self, boto_session, sagemaker_client, sagemaker_runtime_client):
self.boto_session = boto3.Session(region_name=DEFAULT_REGION)
if self.config is None:
self.config = {
'local':
{
'local_code': True,
'region_name': DEFAULT_REGION
}
}
self._region_name = DEFAULT_REGION
self.sagemaker_client = LocalSagemakerClient(self)
self.sagemaker_runtime_client = LocalSagemakerRuntimeClient(self.config)
self.local_mode = True
@pytest.fixture(scope='module')
def mxnet_model(sagemaker_local_session, mxnet_full_version):
def _create_model(output_path):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
mx = MXNet(entry_point=script_path, role='SageMakerRole',
train_instance_count=1, train_instance_type='local',
output_path=output_path, framework_version=mxnet_full_version,
sagemaker_session=sagemaker_local_session)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
mx.fit({'train': train_input, 'test': test_input})
model = mx.create_model(1)
return model
return _create_model
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_mode(tf_full_version, sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
framework_version=tf_full_version,
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = estimator.sagemaker_session.upload_data(path=DATA_PATH,
key_prefix='integ-test-data/tf_iris')
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_distributed_local_mode(sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=3,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_data(sagemaker_local_session):
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=sagemaker_local_session)
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
@pytest.mark.skipif(PYTHON_VERSION != 'py2', reason="TensorFlow image supports only python 2.")
def test_tf_local_data_local_script():
with timeout(minutes=5):
script_path = os.path.join(DATA_DIR, 'iris', 'iris-dnn-classifier.py')
estimator = TensorFlow(entry_point=script_path,
role='SageMakerRole',
training_steps=1,
evaluation_steps=1,
hyperparameters={'input_tensor_name': 'inputs'},
train_instance_count=1,
train_instance_type='local',
base_job_name='test-tf',
sagemaker_session=LocalNoS3Session())
inputs = 'file://' + DATA_PATH
estimator.fit(inputs)
print('job succeeded: {}'.format(estimator.latest_training_job.name))
endpoint_name = estimator.latest_training_job.name
with local_mode_utils.lock():
try:
json_predictor = estimator.deploy(initial_instance_count=1,
instance_type='local',
endpoint_name=endpoint_name)
features = [6.4, 3.2, 4.5, 1.5]
dict_result = json_predictor.predict({'inputs': features})
print('predict result: {}'.format(dict_result))
list_result = json_predictor.predict(features)
print('predict result: {}'.format(list_result))
assert dict_result == list_result
finally:
estimator.delete_endpoint()
@pytest.mark.local_mode
def test_local_mode_serving_from_s3_model(sagemaker_local_session, mxnet_model, mxnet_full_version):
path = 's3://%s' % sagemaker_local_session.default_bucket()
s3_model = mxnet_model(path)
s3_model.sagemaker_session = sagemaker_local_session
predictor = None
with local_mode_utils.lock():
try:
predictor = s3_model.deploy(initial_instance_count=1, instance_type='local')
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
if predictor:
predictor.delete_endpoint()
@pytest.mark.local_mode
def test_local_mode_serving_from_local_model(tmpdir, sagemaker_local_session, mxnet_model):
predictor = None
with local_mode_utils.lock():
try:
path = 'file://%s' % (str(tmpdir))
model = mxnet_model(path)
model.sagemaker_session = sagemaker_local_session
predictor = model.deploy(initial_instance_count=1, instance_type='local')
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
if predictor:
predictor.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_local_mode(sagemaker_local_session, mxnet_full_version):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'mnist.py')
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
mx = MXNet(entry_point=script_path, role='SageMakerRole', py_version=PYTHON_VERSION,
train_instance_count=1, train_instance_type='local',
sagemaker_session=sagemaker_local_session, framework_version=mxnet_full_version)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
mx.fit({'train': train_input, 'test': test_input})
endpoint_name = mx.latest_training_job.name
with local_mode_utils.lock():
try:
predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
mx.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_local_data_local_script(mxnet_full_version):
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
script_path = os.path.join(data_path, 'mnist.py')
mx = MXNet(entry_point=script_path, role='SageMakerRole',
train_instance_count=1, train_instance_type='local',
framework_version=mxnet_full_version,
sagemaker_session=LocalNoS3Session())
train_input = 'file://' + os.path.join(data_path, 'train')
test_input = 'file://' + os.path.join(data_path, 'test')
mx.fit({'train': train_input, 'test': test_input})
endpoint_name = mx.latest_training_job.name
with local_mode_utils.lock():
try:
predictor = mx.deploy(1, 'local', endpoint_name=endpoint_name)
data = numpy.zeros(shape=(1, 1, 28, 28))
predictor.predict(data)
finally:
mx.delete_endpoint()
@pytest.mark.local_mode
def test_mxnet_training_failure(sagemaker_local_session, mxnet_full_version, tmpdir):
script_path = os.path.join(DATA_DIR, 'mxnet_mnist', 'failure_script.py')
mx = MXNet(entry_point=script_path,
role='SageMakerRole',
framework_version=mxnet_full_version,
py_version=PYTHON_VERSION,
train_instance_count=1,
train_instance_type='local',
sagemaker_session=sagemaker_local_session,
output_path='file://{}'.format(tmpdir))
with pytest.raises(RuntimeError):
mx.fit()
with tarfile.open(os.path.join(str(tmpdir), 'output.tar.gz')) as tar:
tar.getmember('failure')
@pytest.mark.local_mode
def test_local_transform_mxnet(sagemaker_local_session, tmpdir, mxnet_full_version):
data_path = os.path.join(DATA_DIR, 'mxnet_mnist')
script_path = os.path.join(data_path, 'mnist.py')
mx = MXNet(entry_point=script_path, role='SageMakerRole', train_instance_count=1,
train_instance_type='ml.c4.xlarge', framework_version=mxnet_full_version,
sagemaker_session=sagemaker_local_session)
train_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'train'),
key_prefix='integ-test-data/mxnet_mnist/train')
test_input = mx.sagemaker_session.upload_data(path=os.path.join(data_path, 'test'),
key_prefix='integ-test-data/mxnet_mnist/test')
with timeout(minutes=15):
mx.fit({'train': train_input, 'test': test_input})
transform_input_path = os.path.join(data_path, 'transform')
transform_input_key_prefix = 'integ-test-data/mxnet_mnist/transform'
transform_input = mx.sagemaker_session.upload_data(path=transform_input_path,
key_prefix=transform_input_key_prefix)
output_path = 'file://%s' % (str(tmpdir))
transformer = mx.transformer(1, 'local', assemble_with='Line', max_payload=1,
strategy='SingleRecord', output_path=output_path)
with local_mode_utils.lock():
transformer.transform(transform_input, content_type='text/csv', split_type='Line')
transformer.wait()
assert os.path.exists(os.path.join(str(tmpdir), 'data.csv.out')) | 0.439026 | 0.196614 |
from tkinter import *
from tkinter import ttk
from work import MyWork
class MyGui:
def __init__(self, root):
'''setting the initial GUI: define different parts to the given grid
please note: if the variable changes when running the program, then you should define it as a public variable.
maxvlue: the default number is 100, which could be changing by the user. getting data from the GUI
progress: from 0-100, getting the data from the backend.
result: the caculate result. getting the data from the backend.
'''
self.rootwnd = root
root.title("MyGui Example with Background Worker Thread")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
ttk.Label(mainframe, text="Summation from 0 to: ").grid(column=1, row=1, sticky=E)
self.maxvalue = StringVar(value=100)
maxentry = ttk.Entry(mainframe, width=7, textvariable=self.maxvalue)
maxentry.grid(column=2, row=1, sticky=(W, E))
ttk.Label(mainframe, text="Progress (%): ").grid(column=1, row=2, sticky=E)
self.progress = StringVar()
ttk.Label(mainframe, textvariable=self.progress).grid(column=2, row=2, sticky=(W, E))
ttk.Label(mainframe, text="Result: ").grid(column=1, row=3, sticky=E)
self.sumvalue = StringVar()
ttk.Label(mainframe, textvariable=self.sumvalue).grid(column=2, row=3, sticky=(W, E))
self.startbtn = ttk.Button(mainframe, text="Start", command=self.calculate)
self.startbtn.grid(column=2, row=4, sticky=(W, E, S, N))
# setting border for all the widgets on the main frame
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
maxentry.focus()
root.bind("<Return>", self.calculate)
# https://stackoverflow.com/questions/41912004/how-to-use-tcl-tk-bind-function-on-tkinters-widgets-in-python
# register an event. using the showprogress function to update progress data, once the passing data has changed from the backend.
cmd = root.register(self.showprogress)
root.tk.call("bind", root, "<<ProgressEvent>>", cmd + " %d")
cmd = root.register(self.showresult)
root.tk.call("bind", root, "<<ResultEvent>>", cmd + " %d")
# all the widgets prensent adaptively
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
mainframe.columnconfigure(2, weight=1)
mainframe.rowconfigure(4, weight=1)
self.started = False
def calculate(self, *args):
'''passing and showing the backend data to frontend. if the caculate has been started, then you should not start again.'''
try:
if self.started:
self.work.stop()
self.startbtn["text"] = "Start"
self.started = False
else:
self.work = MyWork(int(self.maxvalue.get()), self.setprogress, self.setresult)
self.work.start()
self.startbtn["text"] = "Stop"
self.sumvalue.set("")
self.started = True
except ValueError:
pass
def setprogress(self, value):
'''add an event loop when the value changed and update the value at GUI
please mind you should register the event at the beginning.
'''
self.rootwnd.event_generate("<<ProgressEvent>>", data=value)
def showprogress(self, value):
'''update the progress value'''
self.progress.set(value)
def setresult(self, value):
self.rootwnd.event_generate("<<ResultEvent>>", data=value)
def showresult(self, value):
self.sumvalue.set(value)
self.startbtn["text"] = "Start"
self.started = False | src/gui.py | from tkinter import *
from tkinter import ttk
from work import MyWork
class MyGui:
def __init__(self, root):
'''setting the initial GUI: define different parts to the given grid
please note: if the variable changes when running the program, then you should define it as a public variable.
maxvlue: the default number is 100, which could be changing by the user. getting data from the GUI
progress: from 0-100, getting the data from the backend.
result: the caculate result. getting the data from the backend.
'''
self.rootwnd = root
root.title("MyGui Example with Background Worker Thread")
mainframe = ttk.Frame(root, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
ttk.Label(mainframe, text="Summation from 0 to: ").grid(column=1, row=1, sticky=E)
self.maxvalue = StringVar(value=100)
maxentry = ttk.Entry(mainframe, width=7, textvariable=self.maxvalue)
maxentry.grid(column=2, row=1, sticky=(W, E))
ttk.Label(mainframe, text="Progress (%): ").grid(column=1, row=2, sticky=E)
self.progress = StringVar()
ttk.Label(mainframe, textvariable=self.progress).grid(column=2, row=2, sticky=(W, E))
ttk.Label(mainframe, text="Result: ").grid(column=1, row=3, sticky=E)
self.sumvalue = StringVar()
ttk.Label(mainframe, textvariable=self.sumvalue).grid(column=2, row=3, sticky=(W, E))
self.startbtn = ttk.Button(mainframe, text="Start", command=self.calculate)
self.startbtn.grid(column=2, row=4, sticky=(W, E, S, N))
# setting border for all the widgets on the main frame
for child in mainframe.winfo_children():
child.grid_configure(padx=5, pady=5)
maxentry.focus()
root.bind("<Return>", self.calculate)
# https://stackoverflow.com/questions/41912004/how-to-use-tcl-tk-bind-function-on-tkinters-widgets-in-python
# register an event. using the showprogress function to update progress data, once the passing data has changed from the backend.
cmd = root.register(self.showprogress)
root.tk.call("bind", root, "<<ProgressEvent>>", cmd + " %d")
cmd = root.register(self.showresult)
root.tk.call("bind", root, "<<ResultEvent>>", cmd + " %d")
# all the widgets prensent adaptively
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
mainframe.columnconfigure(2, weight=1)
mainframe.rowconfigure(4, weight=1)
self.started = False
def calculate(self, *args):
'''passing and showing the backend data to frontend. if the caculate has been started, then you should not start again.'''
try:
if self.started:
self.work.stop()
self.startbtn["text"] = "Start"
self.started = False
else:
self.work = MyWork(int(self.maxvalue.get()), self.setprogress, self.setresult)
self.work.start()
self.startbtn["text"] = "Stop"
self.sumvalue.set("")
self.started = True
except ValueError:
pass
def setprogress(self, value):
'''add an event loop when the value changed and update the value at GUI
please mind you should register the event at the beginning.
'''
self.rootwnd.event_generate("<<ProgressEvent>>", data=value)
def showprogress(self, value):
'''update the progress value'''
self.progress.set(value)
def setresult(self, value):
self.rootwnd.event_generate("<<ResultEvent>>", data=value)
def showresult(self, value):
self.sumvalue.set(value)
self.startbtn["text"] = "Start"
self.started = False | 0.515376 | 0.348867 |
import os
import sys
import collector_center_sdk.api.collection_config.create_collection_config_pb2
import collector_center_sdk.api.collection_config.debug_collection_config_pb2
import collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2
import collector_center_sdk.api.collection_config.delete_collection_config_pb2
import collector_center_sdk.api.collection_config.detail_collection_config_pb2
import collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2
import collector_center_sdk.api.collection_config.disable_collection_config_pb2
import collector_center_sdk.api.collection_config.list_collection_config_pb2
import collector_center_sdk.api.collection_config.list_collection_config_job_pb2
import collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2
import collector_center_sdk.api.collection_config.update_collection_config_pb2
import collector_center_sdk.utils.http_util
import google.protobuf.json_format
class CollectionConfigClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse
"""
创建采集配置
:param request: create_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.CreateCollectionConfig"
uri = "/api/v1/collection_config"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def debug_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse
"""
调试采集配置
:param request: debug_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DebugCollectionConfig"
uri = "/api/v1/collection_config/debug"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def debug_collection_config_callback(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackRequest, int, str, int) -> collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse
"""
接收command回调
:param request: debug_collection_config_callback请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DebugCollectionConfigCallback"
uri = "/api/v1/debug/cmd/callback"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse
"""
删除采集配置
:param request: delete_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DeleteCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def detail_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse
"""
查看采集配置
:param request: detail_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DetailCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def detail_collection_config_debug(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugRequest, int, str, int) -> collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse
"""
查看采集配置调试结果
:param request: detail_collection_config_debug请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DetailCollectionConfigDebug"
uri = "/api/v1/debug/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def disable_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse
"""
启禁用采集配置
:param request: disable_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DisableCollectionConfig"
uri = "/api/v1/collection_config/{id}/disabled".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse
"""
查看采集配置列表
:param request: list_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.ListCollectionConfig"
uri = "/api/v1/collection_config"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_collection_config_jobs(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsRequest, int, str, int) -> collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse
"""
查看单个采集配置的任务
:param request: list_collection_config_jobs请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.ListCollectionConfigJobs"
uri = "/api/v1/collection_config/{confId}/jobs".format(
confId=request.confId,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def maintain_collection_config_job(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobRequest, int, str, int) -> collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse
"""
维护配置生成任务
:param request: maintain_collection_config_job请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.MaintainCollectionConfigJob"
uri = "/api/v1/collection_config/maintain_job"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def update_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse
"""
更新采集配置
:param request: update_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.UpdateCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp | collector_center_sdk/api/collection_config/collection_config_client.py | import os
import sys
import collector_center_sdk.api.collection_config.create_collection_config_pb2
import collector_center_sdk.api.collection_config.debug_collection_config_pb2
import collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2
import collector_center_sdk.api.collection_config.delete_collection_config_pb2
import collector_center_sdk.api.collection_config.detail_collection_config_pb2
import collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2
import collector_center_sdk.api.collection_config.disable_collection_config_pb2
import collector_center_sdk.api.collection_config.list_collection_config_pb2
import collector_center_sdk.api.collection_config.list_collection_config_job_pb2
import collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2
import collector_center_sdk.api.collection_config.update_collection_config_pb2
import collector_center_sdk.utils.http_util
import google.protobuf.json_format
class CollectionConfigClient(object):
def __init__(self, server_ip="", server_port=0, service_name="", host=""):
"""
初始化client
:param server_ip: 指定sdk请求的server_ip,为空时走名字服务路由
:param server_port: 指定sdk请求的server_port,与server_ip一起使用, 为空时走名字服务路由
:param service_name: 指定sdk请求的service_name, 为空时按契约名称路由。如果server_ip和service_name同时设置,server_ip优先级更高
:param host: 指定sdk请求服务的host名称, 如cmdb.easyops-only.com
"""
if server_ip == "" and server_port != 0 or server_ip != "" and server_port == 0:
raise Exception("server_ip和server_port必须同时指定")
self._server_ip = server_ip
self._server_port = server_port
self._service_name = service_name
self._host = host
def create_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse
"""
创建采集配置
:param request: create_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.CreateCollectionConfig"
uri = "/api/v1/collection_config"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.create_collection_config_pb2.CreateCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def debug_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse
"""
调试采集配置
:param request: debug_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DebugCollectionConfig"
uri = "/api/v1/collection_config/debug"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.debug_collection_config_pb2.DebugCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def debug_collection_config_callback(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackRequest, int, str, int) -> collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse
"""
接收command回调
:param request: debug_collection_config_callback请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DebugCollectionConfigCallback"
uri = "/api/v1/debug/cmd/callback"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.debug_collection_config_callback_pb2.DebugCollectionConfigCallbackResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def delete_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse
"""
删除采集配置
:param request: delete_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DeleteCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="DELETE",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.delete_collection_config_pb2.DeleteCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def detail_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse
"""
查看采集配置
:param request: detail_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DetailCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.detail_collection_config_pb2.DetailCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def detail_collection_config_debug(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugRequest, int, str, int) -> collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse
"""
查看采集配置调试结果
:param request: detail_collection_config_debug请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DetailCollectionConfigDebug"
uri = "/api/v1/debug/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.detail_collection_config_debug_pb2.DetailCollectionConfigDebugResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def disable_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse
"""
启禁用采集配置
:param request: disable_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.DisableCollectionConfig"
uri = "/api/v1/collection_config/{id}/disabled".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.disable_collection_config_pb2.DisableCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse
"""
查看采集配置列表
:param request: list_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.ListCollectionConfig"
uri = "/api/v1/collection_config"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.list_collection_config_pb2.ListCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def list_collection_config_jobs(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsRequest, int, str, int) -> collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse
"""
查看单个采集配置的任务
:param request: list_collection_config_jobs请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.ListCollectionConfigJobs"
uri = "/api/v1/collection_config/{confId}/jobs".format(
confId=request.confId,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="GET",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.list_collection_config_job_pb2.ListCollectionConfigJobsResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def maintain_collection_config_job(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobRequest, int, str, int) -> collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse
"""
维护配置生成任务
:param request: maintain_collection_config_job请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.MaintainCollectionConfigJob"
uri = "/api/v1/collection_config/maintain_job"
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="POST",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.maintain_collection_config_job_pb2.MaintainCollectionConfigJobResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp
def update_collection_config(self, request, org, user, timeout=10):
# type: (collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigRequest, int, str, int) -> collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse
"""
更新采集配置
:param request: update_collection_config请求
:param org: 客户的org编号,为数字
:param user: 调用api使用的用户名
:param timeout: 调用超时时间,单位秒
:return: collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse
"""
headers = {"org": org, "user": user}
route_name = ""
server_ip = self._server_ip
if self._service_name != "":
route_name = self._service_name
elif self._server_ip != "":
route_name = "easyops.api.collector_center.collection_config.UpdateCollectionConfig"
uri = "/api/v1/collection_config/{id}".format(
id=request.id,
)
requestParam = request
rsp_obj = collector_center_sdk.utils.http_util.do_api_request(
method="PUT",
src_name="logic.collector_center_sdk",
dst_name=route_name,
server_ip=server_ip,
server_port=self._server_port,
host=self._host,
uri=uri,
params=google.protobuf.json_format.MessageToDict(
requestParam, preserving_proto_field_name=True),
headers=headers,
timeout=timeout,
)
rsp = collector_center_sdk.api.collection_config.update_collection_config_pb2.UpdateCollectionConfigResponse()
google.protobuf.json_format.ParseDict(rsp_obj["data"], rsp, ignore_unknown_fields=True)
return rsp | 0.243822 | 0.056418 |
from __future__ import print_function
import logging
from pathlib import PurePath
from ophyd import (AreaDetector, CamBase, TIFFPlugin, Component as Cpt,
HDF5Plugin, Device, StatsPlugin, ProcessPlugin,
ROIPlugin, TransformPlugin)
from ophyd.areadetector.filestore_mixins import (
FileStoreTIFF, FileStorePluginBase)
from .utils import makedirs
from .trigger_mixins import (HxnModalTrigger, FileStoreBulkReadable)
logger = logging.getLogger(__name__)
class MerlinTiffPlugin(TIFFPlugin, FileStoreBulkReadable, FileStoreTIFF,
Device):
def mode_external(self):
total_points = self.parent.mode_settings.total_points.get()
self.stage_sigs[self.num_capture] = total_points
def get_frames_per_point(self):
mode = self.parent.mode_settings.mode.get()
if mode == 'external':
return 1
else:
return self.parent.cam.num_images.get()
class MerlinDetectorCam(CamBase):
pass
class MerlinDetector(AreaDetector):
cam = Cpt(MerlinDetectorCam, 'cam1:',
read_attrs=[],
configuration_attrs=['image_mode', 'trigger_mode',
'acquire_time', 'acquire_period'],
)
class MerlinFileStoreHDF5(FileStorePluginBase, FileStoreBulkReadable):
_spec = 'TPX_HDF5'
filestore_spec = _spec
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stage_sigs.update([(self.file_template, '%s%s_%6.6d.h5'),
(self.file_write_mode, 'Stream'),
(self.compression, 'zlib'),
(self.capture, 1)
])
def stage(self):
staged = super().stage()
res_kwargs = {'frame_per_point': 1}
logger.debug("Inserting resource with filename %s", self._fn)
self._generate_resource(res_kwargs)
return staged
def make_filename(self):
fn, read_path, write_path = super().make_filename()
mode_settings = self.parent.mode_settings
if mode_settings.make_directories.get():
makedirs(read_path)
return fn, read_path, write_path
class HDF5PluginWithFileStore(HDF5Plugin, MerlinFileStoreHDF5):
def stage(self):
mode_settings = self.parent.mode_settings
total_points = mode_settings.total_points.get()
self.stage_sigs[self.num_capture] = total_points
# ensure that setting capture is the last thing that's done
self.stage_sigs.move_to_end(self.capture)
return super().stage()
class HxnMerlinDetector(HxnModalTrigger, MerlinDetector):
hdf5 = Cpt(HDF5PluginWithFileStore, 'HDF1:',
read_attrs=[],
configuration_attrs=[],
write_path_template='/data/%Y/%m/%d/',
root='/data')
proc1 = Cpt(ProcessPlugin, 'Proc1:')
stats1 = Cpt(StatsPlugin, 'Stats1:')
stats2 = Cpt(StatsPlugin, 'Stats2:')
stats3 = Cpt(StatsPlugin, 'Stats3:')
stats4 = Cpt(StatsPlugin, 'Stats4:')
stats5 = Cpt(StatsPlugin, 'Stats5:')
transform1 = Cpt(TransformPlugin, 'Trans1:')
roi1 = Cpt(ROIPlugin, 'ROI1:')
roi2 = Cpt(ROIPlugin, 'ROI2:')
roi3 = Cpt(ROIPlugin, 'ROI3:')
roi4 = Cpt(ROIPlugin, 'ROI4:')
# tiff1 = Cpt(MerlinTiffPlugin, 'TIFF1:',
# read_attrs=[],
# configuration_attrs=[],
# write_path_template='/data/%Y/%m/%d/',
# root='/data')
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
**kwargs):
if read_attrs is None:
read_attrs = ['hdf5', 'cam']
if configuration_attrs is None:
configuration_attrs = ['hdf5', 'cam']
if 'hdf5' not in read_attrs:
# ensure that hdf5 is still added, or data acquisition will fail
read_attrs = list(read_attrs) + ['hdf5']
super().__init__(prefix, configuration_attrs=configuration_attrs,
read_attrs=read_attrs, **kwargs)
def mode_internal(self):
super().mode_internal()
count_time = self.count_time.get()
if count_time is not None:
self.stage_sigs[self.cam.acquire_time] = count_time
self.stage_sigs[self.cam.acquire_period] = count_time + 0.005
def mode_external(self):
super().mode_external()
# NOTE: these values specify a debounce time for external triggering so
# they should be set to < 0.5 the expected exposure time, or at
# minimum the lowest possible dead time = 1.64ms
expected_exposure = 0.001
min_dead_time = 0.00164
self.stage_sigs[self.cam.acquire_time] = expected_exposure
self.stage_sigs[self.cam.acquire_period] = expected_exposure + min_dead_time
self.cam.stage_sigs[self.cam.trigger_mode] = 'Trigger Enable' | hxntools/detectors/merlin.py | from __future__ import print_function
import logging
from pathlib import PurePath
from ophyd import (AreaDetector, CamBase, TIFFPlugin, Component as Cpt,
HDF5Plugin, Device, StatsPlugin, ProcessPlugin,
ROIPlugin, TransformPlugin)
from ophyd.areadetector.filestore_mixins import (
FileStoreTIFF, FileStorePluginBase)
from .utils import makedirs
from .trigger_mixins import (HxnModalTrigger, FileStoreBulkReadable)
logger = logging.getLogger(__name__)
class MerlinTiffPlugin(TIFFPlugin, FileStoreBulkReadable, FileStoreTIFF,
Device):
def mode_external(self):
total_points = self.parent.mode_settings.total_points.get()
self.stage_sigs[self.num_capture] = total_points
def get_frames_per_point(self):
mode = self.parent.mode_settings.mode.get()
if mode == 'external':
return 1
else:
return self.parent.cam.num_images.get()
class MerlinDetectorCam(CamBase):
pass
class MerlinDetector(AreaDetector):
cam = Cpt(MerlinDetectorCam, 'cam1:',
read_attrs=[],
configuration_attrs=['image_mode', 'trigger_mode',
'acquire_time', 'acquire_period'],
)
class MerlinFileStoreHDF5(FileStorePluginBase, FileStoreBulkReadable):
_spec = 'TPX_HDF5'
filestore_spec = _spec
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stage_sigs.update([(self.file_template, '%s%s_%6.6d.h5'),
(self.file_write_mode, 'Stream'),
(self.compression, 'zlib'),
(self.capture, 1)
])
def stage(self):
staged = super().stage()
res_kwargs = {'frame_per_point': 1}
logger.debug("Inserting resource with filename %s", self._fn)
self._generate_resource(res_kwargs)
return staged
def make_filename(self):
fn, read_path, write_path = super().make_filename()
mode_settings = self.parent.mode_settings
if mode_settings.make_directories.get():
makedirs(read_path)
return fn, read_path, write_path
class HDF5PluginWithFileStore(HDF5Plugin, MerlinFileStoreHDF5):
def stage(self):
mode_settings = self.parent.mode_settings
total_points = mode_settings.total_points.get()
self.stage_sigs[self.num_capture] = total_points
# ensure that setting capture is the last thing that's done
self.stage_sigs.move_to_end(self.capture)
return super().stage()
class HxnMerlinDetector(HxnModalTrigger, MerlinDetector):
hdf5 = Cpt(HDF5PluginWithFileStore, 'HDF1:',
read_attrs=[],
configuration_attrs=[],
write_path_template='/data/%Y/%m/%d/',
root='/data')
proc1 = Cpt(ProcessPlugin, 'Proc1:')
stats1 = Cpt(StatsPlugin, 'Stats1:')
stats2 = Cpt(StatsPlugin, 'Stats2:')
stats3 = Cpt(StatsPlugin, 'Stats3:')
stats4 = Cpt(StatsPlugin, 'Stats4:')
stats5 = Cpt(StatsPlugin, 'Stats5:')
transform1 = Cpt(TransformPlugin, 'Trans1:')
roi1 = Cpt(ROIPlugin, 'ROI1:')
roi2 = Cpt(ROIPlugin, 'ROI2:')
roi3 = Cpt(ROIPlugin, 'ROI3:')
roi4 = Cpt(ROIPlugin, 'ROI4:')
# tiff1 = Cpt(MerlinTiffPlugin, 'TIFF1:',
# read_attrs=[],
# configuration_attrs=[],
# write_path_template='/data/%Y/%m/%d/',
# root='/data')
def __init__(self, prefix, *, read_attrs=None, configuration_attrs=None,
**kwargs):
if read_attrs is None:
read_attrs = ['hdf5', 'cam']
if configuration_attrs is None:
configuration_attrs = ['hdf5', 'cam']
if 'hdf5' not in read_attrs:
# ensure that hdf5 is still added, or data acquisition will fail
read_attrs = list(read_attrs) + ['hdf5']
super().__init__(prefix, configuration_attrs=configuration_attrs,
read_attrs=read_attrs, **kwargs)
def mode_internal(self):
super().mode_internal()
count_time = self.count_time.get()
if count_time is not None:
self.stage_sigs[self.cam.acquire_time] = count_time
self.stage_sigs[self.cam.acquire_period] = count_time + 0.005
def mode_external(self):
super().mode_external()
# NOTE: these values specify a debounce time for external triggering so
# they should be set to < 0.5 the expected exposure time, or at
# minimum the lowest possible dead time = 1.64ms
expected_exposure = 0.001
min_dead_time = 0.00164
self.stage_sigs[self.cam.acquire_time] = expected_exposure
self.stage_sigs[self.cam.acquire_period] = expected_exposure + min_dead_time
self.cam.stage_sigs[self.cam.trigger_mode] = 'Trigger Enable' | 0.583322 | 0.110807 |
__docformat__ = 'restructuredtext'
_libs = {}
_libdirs = []
from .ctypes_preamble import *
from .ctypes_preamble import _variadic_function
from .ctypes_loader import *
add_library_search_dirs([])
# Begin libraries
_libs["grass_dbmiclient.7.8"] = load_library("grass_dbmiclient.7.8")
_libs["grass_dbmibase.7.8"] = load_library("grass_dbmibase.7.8")
# 2 libraries
# End libraries
# No modules
__int64_t = c_longlong # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/i386/_types.h: 46
__darwin_off_t = __int64_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/sys/_types.h: 71
fpos_t = __darwin_off_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 81
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 92
class struct___sbuf(Structure):
pass
struct___sbuf.__slots__ = [
'_base',
'_size',
]
struct___sbuf._fields_ = [
('_base', POINTER(c_ubyte)),
('_size', c_int),
]
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 98
class struct___sFILEX(Structure):
pass
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
class struct___sFILE(Structure):
pass
struct___sFILE.__slots__ = [
'_p',
'_r',
'_w',
'_flags',
'_file',
'_bf',
'_lbfsize',
'_cookie',
'_close',
'_read',
'_seek',
'_write',
'_ub',
'_extra',
'_ur',
'_ubuf',
'_nbuf',
'_lb',
'_blksize',
'_offset',
]
struct___sFILE._fields_ = [
('_p', POINTER(c_ubyte)),
('_r', c_int),
('_w', c_int),
('_flags', c_short),
('_file', c_short),
('_bf', struct___sbuf),
('_lbfsize', c_int),
('_cookie', POINTER(None)),
('_close', CFUNCTYPE(UNCHECKED(c_int), POINTER(None))),
('_read', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_seek', CFUNCTYPE(UNCHECKED(fpos_t), POINTER(None), fpos_t, c_int)),
('_write', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_ub', struct___sbuf),
('_extra', POINTER(struct___sFILEX)),
('_ur', c_int),
('_ubuf', c_ubyte * 3),
('_nbuf', c_ubyte * 1),
('_lb', struct___sbuf),
('_blksize', c_int),
('_offset', fpos_t),
]
FILE = struct___sFILE # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
dbAddress = POINTER(None) # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 144
dbToken = c_int # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 145
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
class struct__db_string(Structure):
pass
struct__db_string.__slots__ = [
'string',
'nalloc',
]
struct__db_string._fields_ = [
('string', String),
('nalloc', c_int),
]
dbString = struct__db_string # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 153
class struct__dbmscap(Structure):
pass
struct__dbmscap.__slots__ = [
'driverName',
'startup',
'comment',
'next',
]
struct__dbmscap._fields_ = [
('driverName', c_char * 256),
('startup', c_char * 256),
('comment', c_char * 256),
('next', POINTER(struct__dbmscap)),
]
dbDbmscap = struct__dbmscap # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 159
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
class struct__db_dirent(Structure):
pass
struct__db_dirent.__slots__ = [
'name',
'isdir',
'perm',
]
struct__db_dirent._fields_ = [
('name', dbString),
('isdir', c_int),
('perm', c_int),
]
dbDirent = struct__db_dirent # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
class struct__db_driver(Structure):
pass
struct__db_driver.__slots__ = [
'dbmscap',
'send',
'recv',
'pid',
]
struct__db_driver._fields_ = [
('dbmscap', dbDbmscap),
('send', POINTER(FILE)),
('recv', POINTER(FILE)),
('pid', c_int),
]
dbDriver = struct__db_driver # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
class struct__db_handle(Structure):
pass
struct__db_handle.__slots__ = [
'dbName',
'dbSchema',
]
struct__db_handle._fields_ = [
('dbName', dbString),
('dbSchema', dbString),
]
dbHandle = struct__db_handle # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
class struct__db_date_time(Structure):
pass
struct__db_date_time.__slots__ = [
'current',
'year',
'month',
'day',
'hour',
'minute',
'seconds',
]
struct__db_date_time._fields_ = [
('current', c_char),
('year', c_int),
('month', c_int),
('day', c_int),
('hour', c_int),
('minute', c_int),
('seconds', c_double),
]
dbDateTime = struct__db_date_time # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
class struct__db_value(Structure):
pass
struct__db_value.__slots__ = [
'isNull',
'i',
'd',
's',
't',
]
struct__db_value._fields_ = [
('isNull', c_char),
('i', c_int),
('d', c_double),
('s', dbString),
('t', dbDateTime),
]
dbValue = struct__db_value # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
class struct__db_column(Structure):
pass
struct__db_column.__slots__ = [
'columnName',
'description',
'sqlDataType',
'hostDataType',
'value',
'dataLen',
'precision',
'scale',
'nullAllowed',
'hasDefaultValue',
'useDefaultValue',
'defaultValue',
'select',
'update',
]
struct__db_column._fields_ = [
('columnName', dbString),
('description', dbString),
('sqlDataType', c_int),
('hostDataType', c_int),
('value', dbValue),
('dataLen', c_int),
('precision', c_int),
('scale', c_int),
('nullAllowed', c_char),
('hasDefaultValue', c_char),
('useDefaultValue', c_char),
('defaultValue', dbValue),
('select', c_int),
('update', c_int),
]
dbColumn = struct__db_column # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
class struct__db_table(Structure):
pass
struct__db_table.__slots__ = [
'tableName',
'description',
'numColumns',
'columns',
'priv_insert',
'priv_delete',
]
struct__db_table._fields_ = [
('tableName', dbString),
('description', dbString),
('numColumns', c_int),
('columns', POINTER(dbColumn)),
('priv_insert', c_int),
('priv_delete', c_int),
]
dbTable = struct__db_table # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
class struct__db_cursor(Structure):
pass
struct__db_cursor.__slots__ = [
'token',
'driver',
'table',
'column_flags',
'type',
'mode',
]
struct__db_cursor._fields_ = [
('token', dbToken),
('driver', POINTER(dbDriver)),
('table', POINTER(dbTable)),
('column_flags', POINTER(c_short)),
('type', c_int),
('mode', c_int),
]
dbCursor = struct__db_cursor # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
class struct__db_index(Structure):
pass
struct__db_index.__slots__ = [
'indexName',
'tableName',
'numColumns',
'columnNames',
'unique',
]
struct__db_index._fields_ = [
('indexName', dbString),
('tableName', dbString),
('numColumns', c_int),
('columnNames', POINTER(dbString)),
('unique', c_char),
]
dbIndex = struct__db_index # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
class struct__db_driver_state(Structure):
pass
struct__db_driver_state.__slots__ = [
'dbname',
'dbschema',
'open',
'ncursors',
'cursor_list',
]
struct__db_driver_state._fields_ = [
('dbname', String),
('dbschema', String),
('open', c_int),
('ncursors', c_int),
('cursor_list', POINTER(POINTER(dbCursor))),
]
dbDriverState = struct__db_driver_state # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
class struct_anon_7(Structure):
pass
struct_anon_7.__slots__ = [
'cat',
'val',
]
struct_anon_7._fields_ = [
('cat', c_int),
('val', c_int),
]
dbCatValI = struct_anon_7 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 270
class union_anon_8(Union):
pass
union_anon_8.__slots__ = [
'i',
'd',
's',
't',
]
union_anon_8._fields_ = [
('i', c_int),
('d', c_double),
('s', POINTER(dbString)),
('t', POINTER(dbDateTime)),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
class struct_anon_9(Structure):
pass
struct_anon_9.__slots__ = [
'cat',
'isNull',
'val',
]
struct_anon_9._fields_ = [
('cat', c_int),
('isNull', c_int),
('val', union_anon_8),
]
dbCatVal = struct_anon_9 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
class struct_anon_10(Structure):
pass
struct_anon_10.__slots__ = [
'n_values',
'alloc',
'ctype',
'value',
]
struct_anon_10._fields_ = [
('n_values', c_int),
('alloc', c_int),
('ctype', c_int),
('value', POINTER(dbCatVal)),
]
dbCatValArray = struct_anon_10 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
class struct__db_connection(Structure):
pass
struct__db_connection.__slots__ = [
'driverName',
'hostName',
'databaseName',
'schemaName',
'port',
'user',
'password',
'keycol',
'group',
]
struct__db_connection._fields_ = [
('driverName', String),
('hostName', String),
('databaseName', String),
('schemaName', String),
('port', String),
('user', String),
('password', String),
('keycol', String),
('group', String),
]
dbConnection = struct__db_connection # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
class struct_anon_11(Structure):
pass
struct_anon_11.__slots__ = [
'count',
'alloc',
'table',
'key',
'cat',
'where',
'label',
]
struct_anon_11._fields_ = [
('count', c_int),
('alloc', c_int),
('table', String),
('key', String),
('cat', POINTER(c_int)),
('where', POINTER(POINTER(c_char))),
('label', POINTER(POINTER(c_char))),
]
dbRclsRule = struct_anon_11 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 4
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_lowercase'):
continue
db_Cstring_to_lowercase = _lib.db_Cstring_to_lowercase
db_Cstring_to_lowercase.argtypes = [String]
db_Cstring_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 5
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_uppercase'):
continue
db_Cstring_to_uppercase = _lib.db_Cstring_to_uppercase
db_Cstring_to_uppercase.argtypes = [String]
db_Cstring_to_uppercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 6
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_add_column'):
continue
db_add_column = _lib.db_add_column
db_add_column.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbColumn)]
db_add_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 7
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__add_cursor_to_driver_state'):
continue
db__add_cursor_to_driver_state = _lib.db__add_cursor_to_driver_state
db__add_cursor_to_driver_state.argtypes = [POINTER(dbCursor)]
db__add_cursor_to_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 8
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_column_flags'):
continue
db_alloc_cursor_column_flags = _lib.db_alloc_cursor_column_flags
db_alloc_cursor_column_flags.argtypes = [POINTER(dbCursor)]
db_alloc_cursor_column_flags.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 9
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_table'):
continue
db_alloc_cursor_table = _lib.db_alloc_cursor_table
db_alloc_cursor_table.argtypes = [POINTER(dbCursor), c_int]
db_alloc_cursor_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 10
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_table_column'):
continue
db_append_table_column = _lib.db_append_table_column
db_append_table_column.argtypes = [POINTER(dbTable), POINTER(dbColumn)]
db_append_table_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 11
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_dirent_array'):
continue
db_alloc_dirent_array = _lib.db_alloc_dirent_array
db_alloc_dirent_array.argtypes = [c_int]
db_alloc_dirent_array.restype = POINTER(dbDirent)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 12
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_handle_array'):
continue
db_alloc_handle_array = _lib.db_alloc_handle_array
db_alloc_handle_array.argtypes = [c_int]
db_alloc_handle_array.restype = POINTER(dbHandle)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 13
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_array'):
continue
db_alloc_index_array = _lib.db_alloc_index_array
db_alloc_index_array.argtypes = [c_int]
db_alloc_index_array.restype = POINTER(dbIndex)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 14
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_columns'):
continue
db_alloc_index_columns = _lib.db_alloc_index_columns
db_alloc_index_columns.argtypes = [POINTER(dbIndex), c_int]
db_alloc_index_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 15
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_string_array'):
continue
db_alloc_string_array = _lib.db_alloc_string_array
db_alloc_string_array.argtypes = [c_int]
db_alloc_string_array.restype = POINTER(dbString)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 16
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_table'):
continue
db_alloc_table = _lib.db_alloc_table
db_alloc_table.argtypes = [c_int]
db_alloc_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 17
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_string'):
continue
db_append_string = _lib.db_append_string
db_append_string.argtypes = [POINTER(dbString), String]
db_append_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 18
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_errors'):
continue
db_auto_print_errors = _lib.db_auto_print_errors
db_auto_print_errors.argtypes = [c_int]
db_auto_print_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 19
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_protocol_errors'):
continue
db_auto_print_protocol_errors = _lib.db_auto_print_protocol_errors
db_auto_print_protocol_errors.argtypes = [c_int]
db_auto_print_protocol_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 20
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_bind_update'):
continue
db_bind_update = _lib.db_bind_update
db_bind_update.argtypes = [POINTER(dbCursor)]
db_bind_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 21
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_calloc'):
continue
db_calloc = _lib.db_calloc
db_calloc.argtypes = [c_int, c_int]
db_calloc.restype = POINTER(c_ubyte)
db_calloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 22
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_alloc'):
continue
db_CatValArray_alloc = _lib.db_CatValArray_alloc
db_CatValArray_alloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_alloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 23
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_realloc'):
continue
db_CatValArray_realloc = _lib.db_CatValArray_realloc
db_CatValArray_realloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_realloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 24
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_free'):
continue
db_CatValArray_free = _lib.db_CatValArray_free
db_CatValArray_free.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_free.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 25
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_init'):
continue
db_CatValArray_init = _lib.db_CatValArray_init
db_CatValArray_init.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_init.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 26
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort'):
continue
db_CatValArray_sort = _lib.db_CatValArray_sort
db_CatValArray_sort.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 27
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort_by_value'):
continue
db_CatValArray_sort_by_value = _lib.db_CatValArray_sort_by_value
db_CatValArray_sort_by_value.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort_by_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 28
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value'):
continue
db_CatValArray_get_value = _lib.db_CatValArray_get_value
db_CatValArray_get_value.argtypes = [POINTER(dbCatValArray), c_int, POINTER(POINTER(dbCatVal))]
db_CatValArray_get_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 29
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_int'):
continue
db_CatValArray_get_value_int = _lib.db_CatValArray_get_value_int
db_CatValArray_get_value_int.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_int)]
db_CatValArray_get_value_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 30
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_double'):
continue
db_CatValArray_get_value_double = _lib.db_CatValArray_get_value_double
db_CatValArray_get_value_double.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_double)]
db_CatValArray_get_value_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 32
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_char_to_lowercase'):
continue
db_char_to_lowercase = _lib.db_char_to_lowercase
db_char_to_lowercase.argtypes = [String]
db_char_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 33
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_char_to_uppercase'):
continue
db_char_to_uppercase = _lib.db_char_to_uppercase
db_char_to_uppercase.argtypes = [String]
db_char_to_uppercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 34
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_clear_error'):
continue
db_clear_error = _lib.db_clear_error
db_clear_error.argtypes = []
db_clear_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 35
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_clone_table'):
continue
db_clone_table = _lib.db_clone_table
db_clone_table.argtypes = [POINTER(dbTable)]
db_clone_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 36
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__close_all_cursors'):
continue
db__close_all_cursors = _lib.db__close_all_cursors
db__close_all_cursors.argtypes = []
db__close_all_cursors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 37
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_cursor'):
continue
db_close_cursor = _lib.db_close_cursor
db_close_cursor.argtypes = [POINTER(dbCursor)]
db_close_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 38
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_database'):
continue
db_close_database = _lib.db_close_database
db_close_database.argtypes = [POINTER(dbDriver)]
db_close_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 39
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_database_shutdown_driver'):
continue
db_close_database_shutdown_driver = _lib.db_close_database_shutdown_driver
db_close_database_shutdown_driver.argtypes = [POINTER(dbDriver)]
db_close_database_shutdown_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 40
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_column_sqltype'):
continue
db_column_sqltype = _lib.db_column_sqltype
db_column_sqltype.argtypes = [POINTER(dbDriver), String, String]
db_column_sqltype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 41
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_column_Ctype'):
continue
db_column_Ctype = _lib.db_column_Ctype
db_column_Ctype.argtypes = [POINTER(dbDriver), String, String]
db_column_Ctype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 42
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_column_default_value'):
continue
db_convert_Cstring_to_column_default_value = _lib.db_convert_Cstring_to_column_default_value
db_convert_Cstring_to_column_default_value.argtypes = [String, POINTER(dbColumn)]
db_convert_Cstring_to_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 44
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_column_value'):
continue
db_convert_Cstring_to_column_value = _lib.db_convert_Cstring_to_column_value
db_convert_Cstring_to_column_value.argtypes = [String, POINTER(dbColumn)]
db_convert_Cstring_to_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 46
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_value'):
continue
db_convert_Cstring_to_value = _lib.db_convert_Cstring_to_value
db_convert_Cstring_to_value.argtypes = [String, c_int, POINTER(dbValue)]
db_convert_Cstring_to_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 48
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_value_datetime'):
continue
db_convert_Cstring_to_value_datetime = _lib.db_convert_Cstring_to_value_datetime
db_convert_Cstring_to_value_datetime.argtypes = [String, c_int, POINTER(dbValue)]
db_convert_Cstring_to_value_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 50
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_column_default_value_to_string'):
continue
db_convert_column_default_value_to_string = _lib.db_convert_column_default_value_to_string
db_convert_column_default_value_to_string.argtypes = [POINTER(dbColumn), POINTER(dbString)]
db_convert_column_default_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 52
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_column_value_to_string'):
continue
db_convert_column_value_to_string = _lib.db_convert_column_value_to_string
db_convert_column_value_to_string.argtypes = [POINTER(dbColumn), POINTER(dbString)]
db_convert_column_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 53
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_value_datetime_into_string'):
continue
db_convert_value_datetime_into_string = _lib.db_convert_value_datetime_into_string
db_convert_value_datetime_into_string.argtypes = [POINTER(dbValue), c_int, POINTER(dbString)]
db_convert_value_datetime_into_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 55
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_value_to_string'):
continue
db_convert_value_to_string = _lib.db_convert_value_to_string
db_convert_value_to_string.argtypes = [POINTER(dbValue), c_int, POINTER(dbString)]
db_convert_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 57
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_column'):
continue
db_copy_column = _lib.db_copy_column
db_copy_column.argtypes = [POINTER(dbColumn), POINTER(dbColumn)]
db_copy_column.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 58
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_dbmscap_entry'):
continue
db_copy_dbmscap_entry = _lib.db_copy_dbmscap_entry
db_copy_dbmscap_entry.argtypes = [POINTER(dbDbmscap), POINTER(dbDbmscap)]
db_copy_dbmscap_entry.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 59
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_string'):
continue
db_copy_string = _lib.db_copy_string
db_copy_string.argtypes = [POINTER(dbString), POINTER(dbString)]
db_copy_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 60
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_table_to_sql'):
continue
db_table_to_sql = _lib.db_table_to_sql
db_table_to_sql.argtypes = [POINTER(dbTable), POINTER(dbString)]
db_table_to_sql.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 61
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table'):
continue
db_copy_table = _lib.db_copy_table
db_copy_table.argtypes = [String, String, String, String, String, String]
db_copy_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 63
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_where'):
continue
db_copy_table_where = _lib.db_copy_table_where
db_copy_table_where.argtypes = [String, String, String, String, String, String, String]
db_copy_table_where.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 66
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_select'):
continue
db_copy_table_select = _lib.db_copy_table_select
db_copy_table_select.argtypes = [String, String, String, String, String, String, String]
db_copy_table_select.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 69
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_by_ints'):
continue
db_copy_table_by_ints = _lib.db_copy_table_by_ints
db_copy_table_by_ints.argtypes = [String, String, String, String, String, String, String, POINTER(c_int), c_int]
db_copy_table_by_ints.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 72
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_value'):
continue
db_copy_value = _lib.db_copy_value
db_copy_value.argtypes = [POINTER(dbValue), POINTER(dbValue)]
db_copy_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 73
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_database'):
continue
db_create_database = _lib.db_create_database
db_create_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_create_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 74
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_index'):
continue
db_create_index = _lib.db_create_index
db_create_index.argtypes = [POINTER(dbDriver), POINTER(dbIndex)]
db_create_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 75
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_index2'):
continue
db_create_index2 = _lib.db_create_index2
db_create_index2.argtypes = [POINTER(dbDriver), String, String]
db_create_index2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 77
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_table'):
continue
db_create_table = _lib.db_create_table
db_create_table.argtypes = [POINTER(dbDriver), POINTER(dbTable)]
db_create_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 78
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_add_column'):
continue
db_d_add_column = _lib.db_d_add_column
db_d_add_column.argtypes = []
db_d_add_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 79
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_bind_update'):
continue
db_d_bind_update = _lib.db_d_bind_update
db_d_bind_update.argtypes = []
db_d_bind_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 80
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_dbmscap_filename'):
continue
db_dbmscap_filename = _lib.db_dbmscap_filename
db_dbmscap_filename.argtypes = []
db_dbmscap_filename.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 81
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_close_cursor'):
continue
db_d_close_cursor = _lib.db_d_close_cursor
db_d_close_cursor.argtypes = []
db_d_close_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 82
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_close_database'):
continue
db_d_close_database = _lib.db_d_close_database
db_d_close_database.argtypes = []
db_d_close_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 83
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_database'):
continue
db_d_create_database = _lib.db_d_create_database
db_d_create_database.argtypes = []
db_d_create_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 84
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_index'):
continue
db_d_create_index = _lib.db_d_create_index
db_d_create_index.argtypes = []
db_d_create_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 85
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_table'):
continue
db_d_create_table = _lib.db_d_create_table
db_d_create_table.argtypes = []
db_d_create_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 86
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_delete'):
continue
db_d_delete = _lib.db_d_delete
db_d_delete.argtypes = []
db_d_delete.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 87
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_delete_database'):
continue
db_d_delete_database = _lib.db_d_delete_database
db_d_delete_database.argtypes = []
db_d_delete_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 88
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_describe_table'):
continue
db_d_describe_table = _lib.db_d_describe_table
db_d_describe_table.argtypes = []
db_d_describe_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 89
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_column'):
continue
db_d_drop_column = _lib.db_d_drop_column
db_d_drop_column.argtypes = []
db_d_drop_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 90
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_index'):
continue
db_d_drop_index = _lib.db_d_drop_index
db_d_drop_index.argtypes = []
db_d_drop_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 91
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_table'):
continue
db_d_drop_table = _lib.db_d_drop_table
db_d_drop_table.argtypes = []
db_d_drop_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 92
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug'):
continue
db_debug = _lib.db_debug
db_debug.argtypes = [String]
db_debug.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 93
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug_off'):
continue
db_debug_off = _lib.db_debug_off
db_debug_off.argtypes = []
db_debug_off.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 94
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug_on'):
continue
db_debug_on = _lib.db_debug_on
db_debug_on.argtypes = []
db_debug_on.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 95
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete'):
continue
db_delete = _lib.db_delete
db_delete.argtypes = [POINTER(dbCursor)]
db_delete.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 96
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete_database'):
continue
db_delete_database = _lib.db_delete_database
db_delete_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_delete_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 97
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete_table'):
continue
db_delete_table = _lib.db_delete_table
db_delete_table.argtypes = [String, String, String]
db_delete_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 98
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_describe_table'):
continue
db_describe_table = _lib.db_describe_table
db_describe_table.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(POINTER(dbTable))]
db_describe_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 99
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_execute_immediate'):
continue
db_d_execute_immediate = _lib.db_d_execute_immediate
db_d_execute_immediate.argtypes = []
db_d_execute_immediate.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 100
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_begin_transaction'):
continue
db_d_begin_transaction = _lib.db_d_begin_transaction
db_d_begin_transaction.argtypes = []
db_d_begin_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 101
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_commit_transaction'):
continue
db_d_commit_transaction = _lib.db_d_commit_transaction
db_d_commit_transaction.argtypes = []
db_d_commit_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 102
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_fetch'):
continue
db_d_fetch = _lib.db_d_fetch
db_d_fetch.argtypes = []
db_d_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 103
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_find_database'):
continue
db_d_find_database = _lib.db_d_find_database
db_d_find_database.argtypes = []
db_d_find_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 104
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_get_num_rows'):
continue
db_d_get_num_rows = _lib.db_d_get_num_rows
db_d_get_num_rows.argtypes = []
db_d_get_num_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 105
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_grant_on_table'):
continue
db_d_grant_on_table = _lib.db_d_grant_on_table
db_d_grant_on_table.argtypes = []
db_d_grant_on_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 106
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_insert'):
continue
db_d_insert = _lib.db_d_insert
db_d_insert.argtypes = []
db_d_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 107
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_init_error'):
continue
db_d_init_error = _lib.db_d_init_error
db_d_init_error.argtypes = [String]
db_d_init_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 108
for _lib in _libs.values():
if hasattr(_lib, 'db_d_append_error'):
_func = _lib.db_d_append_error
_restype = None
_errcheck = None
_argtypes = [String]
db_d_append_error = _variadic_function(_func,_restype,_argtypes,_errcheck)
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 110
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_report_error'):
continue
db_d_report_error = _lib.db_d_report_error
db_d_report_error.argtypes = []
db_d_report_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 111
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_dirent'):
continue
db_dirent = _lib.db_dirent
db_dirent.argtypes = [String, POINTER(c_int)]
db_dirent.restype = POINTER(dbDirent)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 112
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_databases'):
continue
db_d_list_databases = _lib.db_d_list_databases
db_d_list_databases.argtypes = []
db_d_list_databases.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 113
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_indexes'):
continue
db_d_list_indexes = _lib.db_d_list_indexes
db_d_list_indexes.argtypes = []
db_d_list_indexes.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 114
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_tables'):
continue
db_d_list_tables = _lib.db_d_list_tables
db_d_list_tables.argtypes = []
db_d_list_tables.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 115
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_database'):
continue
db_d_open_database = _lib.db_d_open_database
db_d_open_database.argtypes = []
db_d_open_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 116
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_insert_cursor'):
continue
db_d_open_insert_cursor = _lib.db_d_open_insert_cursor
db_d_open_insert_cursor.argtypes = []
db_d_open_insert_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 117
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_select_cursor'):
continue
db_d_open_select_cursor = _lib.db_d_open_select_cursor
db_d_open_select_cursor.argtypes = []
db_d_open_select_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 118
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_update_cursor'):
continue
db_d_open_update_cursor = _lib.db_d_open_update_cursor
db_d_open_update_cursor.argtypes = []
db_d_open_update_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 119
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_double_quote_string'):
continue
db_double_quote_string = _lib.db_double_quote_string
db_double_quote_string.argtypes = [POINTER(dbString)]
db_double_quote_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 120
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_driver'):
continue
db_driver = _lib.db_driver
db_driver.argtypes = [c_int, POINTER(POINTER(c_char))]
db_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 122
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_driver_mkdir'):
continue
db_driver_mkdir = _lib.db_driver_mkdir
db_driver_mkdir.argtypes = [String, c_int, c_int]
db_driver_mkdir.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 123
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_column'):
continue
db_drop_column = _lib.db_drop_column
db_drop_column.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString)]
db_drop_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 125
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__drop_cursor_from_driver_state'):
continue
db__drop_cursor_from_driver_state = _lib.db__drop_cursor_from_driver_state
db__drop_cursor_from_driver_state.argtypes = [POINTER(dbCursor)]
db__drop_cursor_from_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 126
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_index'):
continue
db_drop_index = _lib.db_drop_index
db_drop_index.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_drop_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 127
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_table'):
continue
db_drop_table = _lib.db_drop_table
db_drop_table.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_drop_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 128
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_token'):
continue
db_drop_token = _lib.db_drop_token
db_drop_token.argtypes = [dbToken]
db_drop_token.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 129
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_update'):
continue
db_d_update = _lib.db_d_update
db_d_update.argtypes = []
db_d_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 130
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_version'):
continue
db_d_version = _lib.db_d_version
db_d_version.argtypes = []
db_d_version.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 131
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_enlarge_string'):
continue
db_enlarge_string = _lib.db_enlarge_string
db_enlarge_string.argtypes = [POINTER(dbString), c_int]
db_enlarge_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 132
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_error'):
continue
db_error = _lib.db_error
db_error.argtypes = [String]
db_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 133
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_execute_immediate'):
continue
db_execute_immediate = _lib.db_execute_immediate
db_execute_immediate.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_execute_immediate.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 134
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_begin_transaction'):
continue
db_begin_transaction = _lib.db_begin_transaction
db_begin_transaction.argtypes = [POINTER(dbDriver)]
db_begin_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 135
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_commit_transaction'):
continue
db_commit_transaction = _lib.db_commit_transaction
db_commit_transaction.argtypes = [POINTER(dbDriver)]
db_commit_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 136
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_fetch'):
continue
db_fetch = _lib.db_fetch
db_fetch.argtypes = [POINTER(dbCursor), c_int, POINTER(c_int)]
db_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 137
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_find_database'):
continue
db_find_database = _lib.db_find_database
db_find_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle), POINTER(c_int)]
db_find_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 138
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_find_token'):
continue
db_find_token = _lib.db_find_token
db_find_token.argtypes = [dbToken]
db_find_token.restype = dbAddress
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 139
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free'):
continue
db_free = _lib.db_free
db_free.argtypes = [POINTER(None)]
db_free.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 140
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_column'):
continue
db_free_column = _lib.db_free_column
db_free_column.argtypes = [POINTER(dbColumn)]
db_free_column.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 141
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_cursor'):
continue
db_free_cursor = _lib.db_free_cursor
db_free_cursor.argtypes = [POINTER(dbCursor)]
db_free_cursor.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 142
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_cursor_column_flags'):
continue
db_free_cursor_column_flags = _lib.db_free_cursor_column_flags
db_free_cursor_column_flags.argtypes = [POINTER(dbCursor)]
db_free_cursor_column_flags.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 143
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_dbmscap'):
continue
db_free_dbmscap = _lib.db_free_dbmscap
db_free_dbmscap.argtypes = [POINTER(dbDbmscap)]
db_free_dbmscap.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 144
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_dirent_array'):
continue
db_free_dirent_array = _lib.db_free_dirent_array
db_free_dirent_array.argtypes = [POINTER(dbDirent), c_int]
db_free_dirent_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 145
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_handle'):
continue
db_free_handle = _lib.db_free_handle
db_free_handle.argtypes = [POINTER(dbHandle)]
db_free_handle.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 146
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_handle_array'):
continue
db_free_handle_array = _lib.db_free_handle_array
db_free_handle_array.argtypes = [POINTER(dbHandle), c_int]
db_free_handle_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 147
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_index'):
continue
db_free_index = _lib.db_free_index
db_free_index.argtypes = [POINTER(dbIndex)]
db_free_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 148
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_index_array'):
continue
db_free_index_array = _lib.db_free_index_array
db_free_index_array.argtypes = [POINTER(dbIndex), c_int]
db_free_index_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 149
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_string'):
continue
db_free_string = _lib.db_free_string
db_free_string.argtypes = [POINTER(dbString)]
db_free_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 150
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_string_array'):
continue
db_free_string_array = _lib.db_free_string_array
db_free_string_array.argtypes = [POINTER(dbString), c_int]
db_free_string_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 151
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_table'):
continue
db_free_table = _lib.db_free_table
db_free_table.argtypes = [POINTER(dbTable)]
db_free_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 152
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column'):
continue
db_get_column = _lib.db_get_column
db_get_column.argtypes = [POINTER(dbDriver), String, String, POINTER(POINTER(dbColumn))]
db_get_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 154
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_default_value'):
continue
db_get_column_default_value = _lib.db_get_column_default_value
db_get_column_default_value.argtypes = [POINTER(dbColumn)]
db_get_column_default_value.restype = POINTER(dbValue)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 155
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_description'):
continue
db_get_column_description = _lib.db_get_column_description
db_get_column_description.argtypes = [POINTER(dbColumn)]
db_get_column_description.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 156
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_host_type'):
continue
db_get_column_host_type = _lib.db_get_column_host_type
db_get_column_host_type.argtypes = [POINTER(dbColumn)]
db_get_column_host_type.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 157
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_length'):
continue
db_get_column_length = _lib.db_get_column_length
db_get_column_length.argtypes = [POINTER(dbColumn)]
db_get_column_length.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 158
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_name'):
continue
db_get_column_name = _lib.db_get_column_name
db_get_column_name.argtypes = [POINTER(dbColumn)]
db_get_column_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 159
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_precision'):
continue
db_get_column_precision = _lib.db_get_column_precision
db_get_column_precision.argtypes = [POINTER(dbColumn)]
db_get_column_precision.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 160
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_scale'):
continue
db_get_column_scale = _lib.db_get_column_scale
db_get_column_scale.argtypes = [POINTER(dbColumn)]
db_get_column_scale.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 161
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_select_priv'):
continue
db_get_column_select_priv = _lib.db_get_column_select_priv
db_get_column_select_priv.argtypes = [POINTER(dbColumn)]
db_get_column_select_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 162
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_sqltype'):
continue
db_get_column_sqltype = _lib.db_get_column_sqltype
db_get_column_sqltype.argtypes = [POINTER(dbColumn)]
db_get_column_sqltype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 163
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_update_priv'):
continue
db_get_column_update_priv = _lib.db_get_column_update_priv
db_get_column_update_priv.argtypes = [POINTER(dbColumn)]
db_get_column_update_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 164
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_value'):
continue
db_get_column_value = _lib.db_get_column_value
db_get_column_value.argtypes = [POINTER(dbColumn)]
db_get_column_value.restype = POINTER(dbValue)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 165
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_connection'):
continue
db_get_connection = _lib.db_get_connection
db_get_connection.argtypes = [POINTER(dbConnection)]
db_get_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 166
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_number_of_columns'):
continue
db_get_cursor_number_of_columns = _lib.db_get_cursor_number_of_columns
db_get_cursor_number_of_columns.argtypes = [POINTER(dbCursor)]
db_get_cursor_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 167
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_table'):
continue
db_get_cursor_table = _lib.db_get_cursor_table
db_get_cursor_table.argtypes = [POINTER(dbCursor)]
db_get_cursor_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 168
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_token'):
continue
db_get_cursor_token = _lib.db_get_cursor_token
db_get_cursor_token.argtypes = [POINTER(dbCursor)]
db_get_cursor_token.restype = dbToken
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 169
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_driver_name'):
continue
db_get_default_driver_name = _lib.db_get_default_driver_name
db_get_default_driver_name.argtypes = []
db_get_default_driver_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 170
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_database_name'):
continue
db_get_default_database_name = _lib.db_get_default_database_name
db_get_default_database_name.argtypes = []
db_get_default_database_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 171
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_schema_name'):
continue
db_get_default_schema_name = _lib.db_get_default_schema_name
db_get_default_schema_name.argtypes = []
db_get_default_schema_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 172
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_group_name'):
continue
db_get_default_group_name = _lib.db_get_default_group_name
db_get_default_group_name.argtypes = []
db_get_default_group_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 173
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__get_driver_state'):
continue
db__get_driver_state = _lib.db__get_driver_state
db__get_driver_state.argtypes = []
db__get_driver_state.restype = POINTER(dbDriverState)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 174
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_code'):
continue
db_get_error_code = _lib.db_get_error_code
db_get_error_code.argtypes = []
db_get_error_code.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 175
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_msg'):
continue
db_get_error_msg = _lib.db_get_error_msg
db_get_error_msg.argtypes = []
db_get_error_msg.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 176
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_who'):
continue
db_get_error_who = _lib.db_get_error_who
db_get_error_who.argtypes = []
db_get_error_who.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 177
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_handle_dbname'):
continue
db_get_handle_dbname = _lib.db_get_handle_dbname
db_get_handle_dbname.argtypes = [POINTER(dbHandle)]
db_get_handle_dbname.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 178
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_handle_dbschema'):
continue
db_get_handle_dbschema = _lib.db_get_handle_dbschema
db_get_handle_dbschema.argtypes = [POINTER(dbHandle)]
db_get_handle_dbschema.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 179
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_column_name'):
continue
db_get_index_column_name = _lib.db_get_index_column_name
db_get_index_column_name.argtypes = [POINTER(dbIndex), c_int]
db_get_index_column_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 180
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_name'):
continue
db_get_index_name = _lib.db_get_index_name
db_get_index_name.argtypes = [POINTER(dbIndex)]
db_get_index_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 181
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_number_of_columns'):
continue
db_get_index_number_of_columns = _lib.db_get_index_number_of_columns
db_get_index_number_of_columns.argtypes = [POINTER(dbIndex)]
db_get_index_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 182
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_table_name'):
continue
db_get_index_table_name = _lib.db_get_index_table_name
db_get_index_table_name.argtypes = [POINTER(dbIndex)]
db_get_index_table_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 183
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_num_rows'):
continue
db_get_num_rows = _lib.db_get_num_rows
db_get_num_rows.argtypes = [POINTER(dbCursor)]
db_get_num_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 184
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_string'):
continue
db_get_string = _lib.db_get_string
db_get_string.argtypes = [POINTER(dbString)]
if sizeof(c_int) == sizeof(c_void_p):
db_get_string.restype = ReturnString
else:
db_get_string.restype = String
db_get_string.errcheck = ReturnString
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 185
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_column'):
continue
db_get_table_column = _lib.db_get_table_column
db_get_table_column.argtypes = [POINTER(dbTable), c_int]
db_get_table_column.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 186
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_column_by_name'):
continue
db_get_table_column_by_name = _lib.db_get_table_column_by_name
db_get_table_column_by_name.argtypes = [POINTER(dbTable), String]
db_get_table_column_by_name.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 187
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_delete_priv'):
continue
db_get_table_delete_priv = _lib.db_get_table_delete_priv
db_get_table_delete_priv.argtypes = [POINTER(dbTable)]
db_get_table_delete_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 188
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_description'):
continue
db_get_table_description = _lib.db_get_table_description
db_get_table_description.argtypes = [POINTER(dbTable)]
db_get_table_description.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 189
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_insert_priv'):
continue
db_get_table_insert_priv = _lib.db_get_table_insert_priv
db_get_table_insert_priv.argtypes = [POINTER(dbTable)]
db_get_table_insert_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 190
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_name'):
continue
db_get_table_name = _lib.db_get_table_name
db_get_table_name.argtypes = [POINTER(dbTable)]
db_get_table_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 191
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_number_of_columns'):
continue
db_get_table_number_of_columns = _lib.db_get_table_number_of_columns
db_get_table_number_of_columns.argtypes = [POINTER(dbTable)]
db_get_table_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 192
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_number_of_rows'):
continue
db_get_table_number_of_rows = _lib.db_get_table_number_of_rows
db_get_table_number_of_rows.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_get_table_number_of_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 193
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_select_priv'):
continue
db_get_table_select_priv = _lib.db_get_table_select_priv
db_get_table_select_priv.argtypes = [POINTER(dbTable)]
db_get_table_select_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 194
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_update_priv'):
continue
db_get_table_update_priv = _lib.db_get_table_update_priv
db_get_table_update_priv.argtypes = [POINTER(dbTable)]
db_get_table_update_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 195
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_as_double'):
continue
db_get_value_as_double = _lib.db_get_value_as_double
db_get_value_as_double.argtypes = [POINTER(dbValue), c_int]
db_get_value_as_double.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 196
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_day'):
continue
db_get_value_day = _lib.db_get_value_day
db_get_value_day.argtypes = [POINTER(dbValue)]
db_get_value_day.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 197
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_double'):
continue
db_get_value_double = _lib.db_get_value_double
db_get_value_double.argtypes = [POINTER(dbValue)]
db_get_value_double.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 198
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_hour'):
continue
db_get_value_hour = _lib.db_get_value_hour
db_get_value_hour.argtypes = [POINTER(dbValue)]
db_get_value_hour.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 199
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_int'):
continue
db_get_value_int = _lib.db_get_value_int
db_get_value_int.argtypes = [POINTER(dbValue)]
db_get_value_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 200
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_minute'):
continue
db_get_value_minute = _lib.db_get_value_minute
db_get_value_minute.argtypes = [POINTER(dbValue)]
db_get_value_minute.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 201
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_month'):
continue
db_get_value_month = _lib.db_get_value_month
db_get_value_month.argtypes = [POINTER(dbValue)]
db_get_value_month.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 202
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_seconds'):
continue
db_get_value_seconds = _lib.db_get_value_seconds
db_get_value_seconds.argtypes = [POINTER(dbValue)]
db_get_value_seconds.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 203
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_string'):
continue
db_get_value_string = _lib.db_get_value_string
db_get_value_string.argtypes = [POINTER(dbValue)]
db_get_value_string.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 204
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_year'):
continue
db_get_value_year = _lib.db_get_value_year
db_get_value_year.argtypes = [POINTER(dbValue)]
db_get_value_year.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 205
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_grant_on_table'):
continue
db_grant_on_table = _lib.db_grant_on_table
db_grant_on_table.argtypes = [POINTER(dbDriver), String, c_int, c_int]
db_grant_on_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 207
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_has_dbms'):
continue
db_has_dbms = _lib.db_has_dbms
db_has_dbms.argtypes = []
db_has_dbms.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 208
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_column'):
continue
db_init_column = _lib.db_init_column
db_init_column.argtypes = [POINTER(dbColumn)]
db_init_column.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 209
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_cursor'):
continue
db_init_cursor = _lib.db_init_cursor
db_init_cursor.argtypes = [POINTER(dbCursor)]
db_init_cursor.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 210
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__init_driver_state'):
continue
db__init_driver_state = _lib.db__init_driver_state
db__init_driver_state.argtypes = []
db__init_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 211
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_handle'):
continue
db_init_handle = _lib.db_init_handle
db_init_handle.argtypes = [POINTER(dbHandle)]
db_init_handle.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 212
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_index'):
continue
db_init_index = _lib.db_init_index
db_init_index.argtypes = [POINTER(dbIndex)]
db_init_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 213
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_string'):
continue
db_init_string = _lib.db_init_string
db_init_string.argtypes = [POINTER(dbString)]
db_init_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 214
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_table'):
continue
db_init_table = _lib.db_init_table
db_init_table.argtypes = [POINTER(dbTable)]
db_init_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 215
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_insert'):
continue
db_insert = _lib.db_insert
db_insert.argtypes = [POINTER(dbCursor)]
db_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 216
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_interval_range'):
continue
db_interval_range = _lib.db_interval_range
db_interval_range.argtypes = [c_int, POINTER(c_int), POINTER(c_int)]
db_interval_range.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 217
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_isdir'):
continue
db_isdir = _lib.db_isdir
db_isdir.argtypes = [String]
db_isdir.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 218
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_legal_tablename'):
continue
db_legal_tablename = _lib.db_legal_tablename
db_legal_tablename.argtypes = [String]
db_legal_tablename.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 219
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_databases'):
continue
db_list_databases = _lib.db_list_databases
db_list_databases.argtypes = [POINTER(dbDriver), POINTER(dbString), c_int, POINTER(POINTER(dbHandle)), POINTER(c_int)]
db_list_databases.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 221
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_drivers'):
continue
db_list_drivers = _lib.db_list_drivers
db_list_drivers.argtypes = []
db_list_drivers.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 222
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_indexes'):
continue
db_list_indexes = _lib.db_list_indexes
db_list_indexes.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(POINTER(dbIndex)), POINTER(c_int)]
db_list_indexes.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 224
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_tables'):
continue
db_list_tables = _lib.db_list_tables
db_list_tables.argtypes = [POINTER(dbDriver), POINTER(POINTER(dbString)), POINTER(c_int), c_int]
db_list_tables.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 226
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_malloc'):
continue
db_malloc = _lib.db_malloc
db_malloc.argtypes = [c_int]
db_malloc.restype = POINTER(c_ubyte)
db_malloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 227
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__mark_database_closed'):
continue
db__mark_database_closed = _lib.db__mark_database_closed
db__mark_database_closed.argtypes = []
db__mark_database_closed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 228
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__mark_database_open'):
continue
db__mark_database_open = _lib.db__mark_database_open
db__mark_database_open.argtypes = [String, String]
db__mark_database_open.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 229
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_memory_error'):
continue
db_memory_error = _lib.db_memory_error
db_memory_error.argtypes = []
db_memory_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 230
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_new_token'):
continue
db_new_token = _lib.db_new_token
db_new_token.argtypes = [dbAddress]
db_new_token.restype = dbToken
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 231
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_nocase_compare'):
continue
db_nocase_compare = _lib.db_nocase_compare
db_nocase_compare.argtypes = [String, String]
db_nocase_compare.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 232
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_noproc_error'):
continue
db_noproc_error = _lib.db_noproc_error
db_noproc_error.argtypes = [c_int]
db_noproc_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 233
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_database'):
continue
db_open_database = _lib.db_open_database
db_open_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_open_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 234
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_insert_cursor'):
continue
db_open_insert_cursor = _lib.db_open_insert_cursor
db_open_insert_cursor.argtypes = [POINTER(dbDriver), POINTER(dbCursor)]
db_open_insert_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 235
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_select_cursor'):
continue
db_open_select_cursor = _lib.db_open_select_cursor
db_open_select_cursor.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbCursor), c_int]
db_open_select_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 237
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_update_cursor'):
continue
db_open_update_cursor = _lib.db_open_update_cursor
db_open_update_cursor.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString), POINTER(dbCursor), c_int]
db_open_update_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 239
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_column_definition'):
continue
db_print_column_definition = _lib.db_print_column_definition
db_print_column_definition.argtypes = [POINTER(FILE), POINTER(dbColumn)]
db_print_column_definition.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 240
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_error'):
continue
db_print_error = _lib.db_print_error
db_print_error.argtypes = []
db_print_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 241
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_index'):
continue
db_print_index = _lib.db_print_index
db_print_index.argtypes = [POINTER(FILE), POINTER(dbIndex)]
db_print_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 242
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_table_definition'):
continue
db_print_table_definition = _lib.db_print_table_definition
db_print_table_definition.argtypes = [POINTER(FILE), POINTER(dbTable)]
db_print_table_definition.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 243
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_procedure_not_implemented'):
continue
db_procedure_not_implemented = _lib.db_procedure_not_implemented
db_procedure_not_implemented.argtypes = [String]
db_procedure_not_implemented.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 244
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_protocol_error'):
continue
db_protocol_error = _lib.db_protocol_error
db_protocol_error.argtypes = []
db_protocol_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 245
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_read_dbmscap'):
continue
db_read_dbmscap = _lib.db_read_dbmscap
db_read_dbmscap.argtypes = []
db_read_dbmscap.restype = POINTER(dbDbmscap)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 246
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_realloc'):
continue
db_realloc = _lib.db_realloc
db_realloc.argtypes = [POINTER(None), c_int]
db_realloc.restype = POINTER(c_ubyte)
db_realloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 247
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_char'):
continue
db__recv_char = _lib.db__recv_char
db__recv_char.argtypes = [String]
db__recv_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 248
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_default_value'):
continue
db__recv_column_default_value = _lib.db__recv_column_default_value
db__recv_column_default_value.argtypes = [POINTER(dbColumn)]
db__recv_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 249
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_definition'):
continue
db__recv_column_definition = _lib.db__recv_column_definition
db__recv_column_definition.argtypes = [POINTER(dbColumn)]
db__recv_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 250
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_value'):
continue
db__recv_column_value = _lib.db__recv_column_value
db__recv_column_value.argtypes = [POINTER(dbColumn)]
db__recv_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 251
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_datetime'):
continue
db__recv_datetime = _lib.db__recv_datetime
db__recv_datetime.argtypes = [POINTER(dbDateTime)]
db__recv_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 252
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double'):
continue
db__recv_double = _lib.db__recv_double
db__recv_double.argtypes = [POINTER(c_double)]
db__recv_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 253
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double_array'):
continue
db__recv_double_array = _lib.db__recv_double_array
db__recv_double_array.argtypes = [POINTER(POINTER(c_double)), POINTER(c_int)]
db__recv_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 254
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float'):
continue
db__recv_float = _lib.db__recv_float
db__recv_float.argtypes = [POINTER(c_float)]
db__recv_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 255
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float_array'):
continue
db__recv_float_array = _lib.db__recv_float_array
db__recv_float_array.argtypes = [POINTER(POINTER(c_float)), POINTER(c_int)]
db__recv_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 256
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_handle'):
continue
db__recv_handle = _lib.db__recv_handle
db__recv_handle.argtypes = [POINTER(dbHandle)]
db__recv_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 257
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index'):
continue
db__recv_index = _lib.db__recv_index
db__recv_index.argtypes = [POINTER(dbIndex)]
db__recv_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 258
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index_array'):
continue
db__recv_index_array = _lib.db__recv_index_array
db__recv_index_array.argtypes = [POINTER(POINTER(dbIndex)), POINTER(c_int)]
db__recv_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 259
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int'):
continue
db__recv_int = _lib.db__recv_int
db__recv_int.argtypes = [POINTER(c_int)]
db__recv_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 260
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int_array'):
continue
db__recv_int_array = _lib.db__recv_int_array
db__recv_int_array.argtypes = [POINTER(POINTER(c_int)), POINTER(c_int)]
db__recv_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 261
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_procnum'):
continue
db__recv_procnum = _lib.db__recv_procnum
db__recv_procnum.argtypes = [POINTER(c_int)]
db__recv_procnum.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 262
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_return_code'):
continue
db__recv_return_code = _lib.db__recv_return_code
db__recv_return_code.argtypes = [POINTER(c_int)]
db__recv_return_code.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 263
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short'):
continue
db__recv_short = _lib.db__recv_short
db__recv_short.argtypes = [POINTER(c_short)]
db__recv_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 264
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short_array'):
continue
db__recv_short_array = _lib.db__recv_short_array
db__recv_short_array.argtypes = [POINTER(POINTER(c_short)), POINTER(c_int)]
db__recv_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 265
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string'):
continue
db__recv_string = _lib.db__recv_string
db__recv_string.argtypes = [POINTER(dbString)]
db__recv_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 266
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string_array'):
continue
db__recv_string_array = _lib.db__recv_string_array
db__recv_string_array.argtypes = [POINTER(POINTER(dbString)), POINTER(c_int)]
db__recv_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 267
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_data'):
continue
db__recv_table_data = _lib.db__recv_table_data
db__recv_table_data.argtypes = [POINTER(dbTable)]
db__recv_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 268
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_definition'):
continue
db__recv_table_definition = _lib.db__recv_table_definition
db__recv_table_definition.argtypes = [POINTER(POINTER(dbTable))]
db__recv_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 269
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_token'):
continue
db__recv_token = _lib.db__recv_token
db__recv_token.argtypes = [POINTER(dbToken)]
db__recv_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 270
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_value'):
continue
db__recv_value = _lib.db__recv_value
db__recv_value.argtypes = [POINTER(dbValue), c_int]
db__recv_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 271
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_Cstring'):
continue
db__send_Cstring = _lib.db__send_Cstring
db__send_Cstring.argtypes = [String]
db__send_Cstring.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 272
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_char'):
continue
db__send_char = _lib.db__send_char
db__send_char.argtypes = [c_int]
db__send_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 273
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_default_value'):
continue
db__send_column_default_value = _lib.db__send_column_default_value
db__send_column_default_value.argtypes = [POINTER(dbColumn)]
db__send_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 274
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_definition'):
continue
db__send_column_definition = _lib.db__send_column_definition
db__send_column_definition.argtypes = [POINTER(dbColumn)]
db__send_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 275
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_value'):
continue
db__send_column_value = _lib.db__send_column_value
db__send_column_value.argtypes = [POINTER(dbColumn)]
db__send_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 276
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_datetime'):
continue
db__send_datetime = _lib.db__send_datetime
db__send_datetime.argtypes = [POINTER(dbDateTime)]
db__send_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 277
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double'):
continue
db__send_double = _lib.db__send_double
db__send_double.argtypes = [c_double]
db__send_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 278
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double_array'):
continue
db__send_double_array = _lib.db__send_double_array
db__send_double_array.argtypes = [POINTER(c_double), c_int]
db__send_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 279
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_failure'):
continue
db__send_failure = _lib.db__send_failure
db__send_failure.argtypes = []
db__send_failure.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 280
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float'):
continue
db__send_float = _lib.db__send_float
db__send_float.argtypes = [c_float]
db__send_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 281
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float_array'):
continue
db__send_float_array = _lib.db__send_float_array
db__send_float_array.argtypes = [POINTER(c_float), c_int]
db__send_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 282
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_handle'):
continue
db__send_handle = _lib.db__send_handle
db__send_handle.argtypes = [POINTER(dbHandle)]
db__send_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 283
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index'):
continue
db__send_index = _lib.db__send_index
db__send_index.argtypes = [POINTER(dbIndex)]
db__send_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 284
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index_array'):
continue
db__send_index_array = _lib.db__send_index_array
db__send_index_array.argtypes = [POINTER(dbIndex), c_int]
db__send_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 285
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int'):
continue
db__send_int = _lib.db__send_int
db__send_int.argtypes = [c_int]
db__send_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 286
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int_array'):
continue
db__send_int_array = _lib.db__send_int_array
db__send_int_array.argtypes = [POINTER(c_int), c_int]
db__send_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 287
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_not_implemented'):
continue
db__send_procedure_not_implemented = _lib.db__send_procedure_not_implemented
db__send_procedure_not_implemented.argtypes = [c_int]
db__send_procedure_not_implemented.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 288
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_ok'):
continue
db__send_procedure_ok = _lib.db__send_procedure_ok
db__send_procedure_ok.argtypes = [c_int]
db__send_procedure_ok.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 289
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short'):
continue
db__send_short = _lib.db__send_short
db__send_short.argtypes = [c_int]
db__send_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 290
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short_array'):
continue
db__send_short_array = _lib.db__send_short_array
db__send_short_array.argtypes = [POINTER(c_short), c_int]
db__send_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 291
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string'):
continue
db__send_string = _lib.db__send_string
db__send_string.argtypes = [POINTER(dbString)]
db__send_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 292
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string_array'):
continue
db__send_string_array = _lib.db__send_string_array
db__send_string_array.argtypes = [POINTER(dbString), c_int]
db__send_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 293
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_success'):
continue
db__send_success = _lib.db__send_success
db__send_success.argtypes = []
db__send_success.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 294
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_data'):
continue
db__send_table_data = _lib.db__send_table_data
db__send_table_data.argtypes = [POINTER(dbTable)]
db__send_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 295
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_definition'):
continue
db__send_table_definition = _lib.db__send_table_definition
db__send_table_definition.argtypes = [POINTER(dbTable)]
db__send_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 296
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_token'):
continue
db__send_token = _lib.db__send_token
db__send_token.argtypes = [POINTER(dbToken)]
db__send_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 297
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_value'):
continue
db__send_value = _lib.db__send_value
db__send_value.argtypes = [POINTER(dbValue), c_int]
db__send_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 298
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_CatValArray'):
continue
db_select_CatValArray = _lib.db_select_CatValArray
db_select_CatValArray.argtypes = [POINTER(dbDriver), String, String, String, String, POINTER(dbCatValArray)]
db_select_CatValArray.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 301
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_int'):
continue
db_select_int = _lib.db_select_int
db_select_int.argtypes = [POINTER(dbDriver), String, String, String, POINTER(POINTER(c_int))]
db_select_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 303
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_value'):
continue
db_select_value = _lib.db_select_value
db_select_value.argtypes = [POINTER(dbDriver), String, String, c_int, String, POINTER(dbValue)]
db_select_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 305
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_description'):
continue
db_set_column_description = _lib.db_set_column_description
db_set_column_description.argtypes = [POINTER(dbColumn), String]
db_set_column_description.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 306
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_defined_default_value'):
continue
db_set_column_has_defined_default_value = _lib.db_set_column_has_defined_default_value
db_set_column_has_defined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_defined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 307
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_undefined_default_value'):
continue
db_set_column_has_undefined_default_value = _lib.db_set_column_has_undefined_default_value
db_set_column_has_undefined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_undefined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 308
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_host_type'):
continue
db_set_column_host_type = _lib.db_set_column_host_type
db_set_column_host_type.argtypes = [POINTER(dbColumn), c_int]
db_set_column_host_type.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 309
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_length'):
continue
db_set_column_length = _lib.db_set_column_length
db_set_column_length.argtypes = [POINTER(dbColumn), c_int]
db_set_column_length.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 310
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_name'):
continue
db_set_column_name = _lib.db_set_column_name
db_set_column_name.argtypes = [POINTER(dbColumn), String]
db_set_column_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 311
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_null_allowed'):
continue
db_set_column_null_allowed = _lib.db_set_column_null_allowed
db_set_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_set_column_null_allowed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 312
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_precision'):
continue
db_set_column_precision = _lib.db_set_column_precision
db_set_column_precision.argtypes = [POINTER(dbColumn), c_int]
db_set_column_precision.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 313
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_scale'):
continue
db_set_column_scale = _lib.db_set_column_scale
db_set_column_scale.argtypes = [POINTER(dbColumn), c_int]
db_set_column_scale.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 314
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_select_priv_granted'):
continue
db_set_column_select_priv_granted = _lib.db_set_column_select_priv_granted
db_set_column_select_priv_granted.argtypes = [POINTER(dbColumn)]
db_set_column_select_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 315
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_select_priv_not_granted'):
continue
db_set_column_select_priv_not_granted = _lib.db_set_column_select_priv_not_granted
db_set_column_select_priv_not_granted.argtypes = [POINTER(dbColumn)]
db_set_column_select_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 316
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_sqltype'):
continue
db_set_column_sqltype = _lib.db_set_column_sqltype
db_set_column_sqltype.argtypes = [POINTER(dbColumn), c_int]
db_set_column_sqltype.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 317
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_update_priv_granted'):
continue
db_set_column_update_priv_granted = _lib.db_set_column_update_priv_granted
db_set_column_update_priv_granted.argtypes = [POINTER(dbColumn)]
db_set_column_update_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 318
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_update_priv_not_granted'):
continue
db_set_column_update_priv_not_granted = _lib.db_set_column_update_priv_not_granted
db_set_column_update_priv_not_granted.argtypes = [POINTER(dbColumn)]
db_set_column_update_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 319
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_use_default_value'):
continue
db_set_column_use_default_value = _lib.db_set_column_use_default_value
db_set_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_use_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 320
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_connection'):
continue
db_set_connection = _lib.db_set_connection
db_set_connection.argtypes = [POINTER(dbConnection)]
db_set_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 321
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_column_flag'):
continue
db_set_cursor_column_flag = _lib.db_set_cursor_column_flag
db_set_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_column_flag.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 322
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_column_for_update'):
continue
db_set_cursor_column_for_update = _lib.db_set_cursor_column_for_update
db_set_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_column_for_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 323
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode'):
continue
db_set_cursor_mode = _lib.db_set_cursor_mode
db_set_cursor_mode.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_mode.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 324
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode_insensitive'):
continue
db_set_cursor_mode_insensitive = _lib.db_set_cursor_mode_insensitive
db_set_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_set_cursor_mode_insensitive.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 325
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode_scroll'):
continue
db_set_cursor_mode_scroll = _lib.db_set_cursor_mode_scroll
db_set_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_set_cursor_mode_scroll.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 326
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_table'):
continue
db_set_cursor_table = _lib.db_set_cursor_table
db_set_cursor_table.argtypes = [POINTER(dbCursor), POINTER(dbTable)]
db_set_cursor_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 327
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_token'):
continue
db_set_cursor_token = _lib.db_set_cursor_token
db_set_cursor_token.argtypes = [POINTER(dbCursor), dbToken]
db_set_cursor_token.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 328
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_insert'):
continue
db_set_cursor_type_insert = _lib.db_set_cursor_type_insert
db_set_cursor_type_insert.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_insert.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 329
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_readonly'):
continue
db_set_cursor_type_readonly = _lib.db_set_cursor_type_readonly
db_set_cursor_type_readonly.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_readonly.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 330
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_update'):
continue
db_set_cursor_type_update = _lib.db_set_cursor_type_update
db_set_cursor_type_update.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 331
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_default_connection'):
continue
db_set_default_connection = _lib.db_set_default_connection
db_set_default_connection.argtypes = []
db_set_default_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 332
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_error_who'):
continue
db_set_error_who = _lib.db_set_error_who
db_set_error_who.argtypes = [String]
db_set_error_who.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 333
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_handle'):
continue
db_set_handle = _lib.db_set_handle
db_set_handle.argtypes = [POINTER(dbHandle), String, String]
db_set_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 334
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_error_handler_driver'):
continue
db_set_error_handler_driver = _lib.db_set_error_handler_driver
db_set_error_handler_driver.argtypes = [POINTER(dbDriver)]
db_set_error_handler_driver.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 335
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_error_handler_driver'):
continue
db_unset_error_handler_driver = _lib.db_unset_error_handler_driver
db_unset_error_handler_driver.argtypes = [POINTER(dbDriver)]
db_unset_error_handler_driver.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 336
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_column_name'):
continue
db_set_index_column_name = _lib.db_set_index_column_name
db_set_index_column_name.argtypes = [POINTER(dbIndex), c_int, String]
db_set_index_column_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 338
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_name'):
continue
db_set_index_name = _lib.db_set_index_name
db_set_index_name.argtypes = [POINTER(dbIndex), String]
db_set_index_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 339
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_table_name'):
continue
db_set_index_table_name = _lib.db_set_index_table_name
db_set_index_table_name.argtypes = [POINTER(dbIndex), String]
db_set_index_table_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 340
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_type_non_unique'):
continue
db_set_index_type_non_unique = _lib.db_set_index_type_non_unique
db_set_index_type_non_unique.argtypes = [POINTER(dbIndex)]
db_set_index_type_non_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 341
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_type_unique'):
continue
db_set_index_type_unique = _lib.db_set_index_type_unique
db_set_index_type_unique.argtypes = [POINTER(dbIndex)]
db_set_index_type_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 342
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__set_protocol_fds'):
continue
db__set_protocol_fds = _lib.db__set_protocol_fds
db__set_protocol_fds.argtypes = [POINTER(FILE), POINTER(FILE)]
db__set_protocol_fds.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 343
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_string'):
continue
db_set_string = _lib.db_set_string
db_set_string.argtypes = [POINTER(dbString), String]
db_set_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 344
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_string_no_copy'):
continue
db_set_string_no_copy = _lib.db_set_string_no_copy
db_set_string_no_copy.argtypes = [POINTER(dbString), String]
db_set_string_no_copy.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 345
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_column'):
continue
db_set_table_column = _lib.db_set_table_column
db_set_table_column.argtypes = [POINTER(dbTable), c_int, POINTER(dbColumn)]
db_set_table_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 346
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_delete_priv_granted'):
continue
db_set_table_delete_priv_granted = _lib.db_set_table_delete_priv_granted
db_set_table_delete_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_delete_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 347
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_delete_priv_not_granted'):
continue
db_set_table_delete_priv_not_granted = _lib.db_set_table_delete_priv_not_granted
db_set_table_delete_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_delete_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 348
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_description'):
continue
db_set_table_description = _lib.db_set_table_description
db_set_table_description.argtypes = [POINTER(dbTable), String]
db_set_table_description.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 349
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_insert_priv_granted'):
continue
db_set_table_insert_priv_granted = _lib.db_set_table_insert_priv_granted
db_set_table_insert_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_insert_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 350
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_insert_priv_not_granted'):
continue
db_set_table_insert_priv_not_granted = _lib.db_set_table_insert_priv_not_granted
db_set_table_insert_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_insert_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 351
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_name'):
continue
db_set_table_name = _lib.db_set_table_name
db_set_table_name.argtypes = [POINTER(dbTable), String]
db_set_table_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 352
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_select_priv_granted'):
continue
db_set_table_select_priv_granted = _lib.db_set_table_select_priv_granted
db_set_table_select_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_select_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 353
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_select_priv_not_granted'):
continue
db_set_table_select_priv_not_granted = _lib.db_set_table_select_priv_not_granted
db_set_table_select_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_select_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 354
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_update_priv_granted'):
continue
db_set_table_update_priv_granted = _lib.db_set_table_update_priv_granted
db_set_table_update_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_update_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 355
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_update_priv_not_granted'):
continue
db_set_table_update_priv_not_granted = _lib.db_set_table_update_priv_not_granted
db_set_table_update_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_update_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 356
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_datetime_current'):
continue
db_set_value_datetime_current = _lib.db_set_value_datetime_current
db_set_value_datetime_current.argtypes = [POINTER(dbValue)]
db_set_value_datetime_current.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 357
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_datetime_not_current'):
continue
db_set_value_datetime_not_current = _lib.db_set_value_datetime_not_current
db_set_value_datetime_not_current.argtypes = [POINTER(dbValue)]
db_set_value_datetime_not_current.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 358
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_day'):
continue
db_set_value_day = _lib.db_set_value_day
db_set_value_day.argtypes = [POINTER(dbValue), c_int]
db_set_value_day.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 359
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_double'):
continue
db_set_value_double = _lib.db_set_value_double
db_set_value_double.argtypes = [POINTER(dbValue), c_double]
db_set_value_double.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 360
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_hour'):
continue
db_set_value_hour = _lib.db_set_value_hour
db_set_value_hour.argtypes = [POINTER(dbValue), c_int]
db_set_value_hour.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 361
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_int'):
continue
db_set_value_int = _lib.db_set_value_int
db_set_value_int.argtypes = [POINTER(dbValue), c_int]
db_set_value_int.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 362
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_minute'):
continue
db_set_value_minute = _lib.db_set_value_minute
db_set_value_minute.argtypes = [POINTER(dbValue), c_int]
db_set_value_minute.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 363
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_month'):
continue
db_set_value_month = _lib.db_set_value_month
db_set_value_month.argtypes = [POINTER(dbValue), c_int]
db_set_value_month.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 364
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_not_null'):
continue
db_set_value_not_null = _lib.db_set_value_not_null
db_set_value_not_null.argtypes = [POINTER(dbValue)]
db_set_value_not_null.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 365
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_null'):
continue
db_set_value_null = _lib.db_set_value_null
db_set_value_null.argtypes = [POINTER(dbValue)]
db_set_value_null.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 366
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_seconds'):
continue
db_set_value_seconds = _lib.db_set_value_seconds
db_set_value_seconds.argtypes = [POINTER(dbValue), c_double]
db_set_value_seconds.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 367
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_string'):
continue
db_set_value_string = _lib.db_set_value_string
db_set_value_string.argtypes = [POINTER(dbValue), String]
db_set_value_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 368
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_year'):
continue
db_set_value_year = _lib.db_set_value_year
db_set_value_year.argtypes = [POINTER(dbValue), c_int]
db_set_value_year.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 369
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_shutdown_driver'):
continue
db_shutdown_driver = _lib.db_shutdown_driver
db_shutdown_driver.argtypes = [POINTER(dbDriver)]
db_shutdown_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 370
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sqltype_name'):
continue
db_sqltype_name = _lib.db_sqltype_name
db_sqltype_name.argtypes = [c_int]
db_sqltype_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 371
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sqltype_to_Ctype'):
continue
db_sqltype_to_Ctype = _lib.db_sqltype_to_Ctype
db_sqltype_to_Ctype.argtypes = [c_int]
db_sqltype_to_Ctype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 372
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_start_driver'):
continue
db_start_driver = _lib.db_start_driver
db_start_driver.argtypes = [String]
db_start_driver.restype = POINTER(dbDriver)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 373
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_start_driver_open_database'):
continue
db_start_driver_open_database = _lib.db_start_driver_open_database
db_start_driver_open_database.argtypes = [String, String]
db_start_driver_open_database.restype = POINTER(dbDriver)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 374
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__start_procedure_call'):
continue
db__start_procedure_call = _lib.db__start_procedure_call
db__start_procedure_call.argtypes = [c_int]
db__start_procedure_call.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 375
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_store'):
continue
db_store = _lib.db_store
db_store.argtypes = [String]
if sizeof(c_int) == sizeof(c_void_p):
db_store.restype = ReturnString
else:
db_store.restype = String
db_store.errcheck = ReturnString
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 376
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_strip'):
continue
db_strip = _lib.db_strip
db_strip.argtypes = [String]
db_strip.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 377
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_syserror'):
continue
db_syserror = _lib.db_syserror
db_syserror.argtypes = [String]
db_syserror.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 378
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_table_exists'):
continue
db_table_exists = _lib.db_table_exists
db_table_exists.argtypes = [String, String, String]
db_table_exists.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 380
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_default_value'):
continue
db_test_column_has_default_value = _lib.db_test_column_has_default_value
db_test_column_has_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 381
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_defined_default_value'):
continue
db_test_column_has_defined_default_value = _lib.db_test_column_has_defined_default_value
db_test_column_has_defined_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_defined_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 382
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_undefined_default_value'):
continue
db_test_column_has_undefined_default_value = _lib.db_test_column_has_undefined_default_value
db_test_column_has_undefined_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_undefined_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 383
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_null_allowed'):
continue
db_test_column_null_allowed = _lib.db_test_column_null_allowed
db_test_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_test_column_null_allowed.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 384
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_use_default_value'):
continue
db_test_column_use_default_value = _lib.db_test_column_use_default_value
db_test_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_use_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 385
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_any_column_flag'):
continue
db_test_cursor_any_column_flag = _lib.db_test_cursor_any_column_flag
db_test_cursor_any_column_flag.argtypes = [POINTER(dbCursor)]
db_test_cursor_any_column_flag.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 386
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_any_column_for_update'):
continue
db_test_cursor_any_column_for_update = _lib.db_test_cursor_any_column_for_update
db_test_cursor_any_column_for_update.argtypes = [POINTER(dbCursor)]
db_test_cursor_any_column_for_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 387
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_column_flag'):
continue
db_test_cursor_column_flag = _lib.db_test_cursor_column_flag
db_test_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_test_cursor_column_flag.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 388
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_column_for_update'):
continue
db_test_cursor_column_for_update = _lib.db_test_cursor_column_for_update
db_test_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_test_cursor_column_for_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 389
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_mode_insensitive'):
continue
db_test_cursor_mode_insensitive = _lib.db_test_cursor_mode_insensitive
db_test_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_test_cursor_mode_insensitive.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 390
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_mode_scroll'):
continue
db_test_cursor_mode_scroll = _lib.db_test_cursor_mode_scroll
db_test_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_test_cursor_mode_scroll.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 391
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_fetch'):
continue
db_test_cursor_type_fetch = _lib.db_test_cursor_type_fetch
db_test_cursor_type_fetch.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 392
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_insert'):
continue
db_test_cursor_type_insert = _lib.db_test_cursor_type_insert
db_test_cursor_type_insert.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 393
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_update'):
continue
db_test_cursor_type_update = _lib.db_test_cursor_type_update
db_test_cursor_type_update.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 394
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__test_database_open'):
continue
db__test_database_open = _lib.db__test_database_open
db__test_database_open.argtypes = []
db__test_database_open.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 395
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_index_type_unique'):
continue
db_test_index_type_unique = _lib.db_test_index_type_unique
db_test_index_type_unique.argtypes = [POINTER(dbIndex)]
db_test_index_type_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 396
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_value_datetime_current'):
continue
db_test_value_datetime_current = _lib.db_test_value_datetime_current
db_test_value_datetime_current.argtypes = [POINTER(dbValue)]
db_test_value_datetime_current.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 397
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_value_isnull'):
continue
db_test_value_isnull = _lib.db_test_value_isnull
db_test_value_isnull.argtypes = [POINTER(dbValue)]
db_test_value_isnull.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 398
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_has_default_value'):
continue
db_unset_column_has_default_value = _lib.db_unset_column_has_default_value
db_unset_column_has_default_value.argtypes = [POINTER(dbColumn)]
db_unset_column_has_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 399
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_null_allowed'):
continue
db_unset_column_null_allowed = _lib.db_unset_column_null_allowed
db_unset_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_unset_column_null_allowed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 400
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_use_default_value'):
continue
db_unset_column_use_default_value = _lib.db_unset_column_use_default_value
db_unset_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_unset_column_use_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 401
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_column_flag'):
continue
db_unset_cursor_column_flag = _lib.db_unset_cursor_column_flag
db_unset_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_unset_cursor_column_flag.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 402
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_column_for_update'):
continue
db_unset_cursor_column_for_update = _lib.db_unset_cursor_column_for_update
db_unset_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_unset_cursor_column_for_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 403
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode'):
continue
db_unset_cursor_mode = _lib.db_unset_cursor_mode
db_unset_cursor_mode.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 404
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode_insensitive'):
continue
db_unset_cursor_mode_insensitive = _lib.db_unset_cursor_mode_insensitive
db_unset_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode_insensitive.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 405
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode_scroll'):
continue
db_unset_cursor_mode_scroll = _lib.db_unset_cursor_mode_scroll
db_unset_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode_scroll.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 406
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_update'):
continue
db_update = _lib.db_update
db_update.argtypes = [POINTER(dbCursor)]
db_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 407
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_gversion'):
continue
db_gversion = _lib.db_gversion
db_gversion.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString)]
db_gversion.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 409
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_whoami'):
continue
db_whoami = _lib.db_whoami
db_whoami.argtypes = []
db_whoami.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 410
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_zero'):
continue
db_zero = _lib.db_zero
db_zero.argtypes = [POINTER(None), c_int]
db_zero.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 411
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_zero_string'):
continue
db_zero_string = _lib.db_zero_string
db_zero_string.argtypes = [POINTER(dbString)]
db_zero_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 412
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sizeof_string'):
continue
db_sizeof_string = _lib.db_sizeof_string
db_sizeof_string.argtypes = [POINTER(dbString)]
db_sizeof_string.restype = c_uint
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 413
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_login'):
continue
db_set_login = _lib.db_set_login
db_set_login.argtypes = [String, String, String, String]
db_set_login.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 414
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_login2'):
continue
db_set_login2 = _lib.db_set_login2
db_set_login2.argtypes = [String, String, String, String, String, String, c_int]
db_set_login2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 416
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login'):
continue
db_get_login = _lib.db_get_login
db_get_login.argtypes = [String, String, POINTER(POINTER(c_char)), POINTER(POINTER(c_char))]
db_get_login.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 417
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login2'):
continue
db_get_login2 = _lib.db_get_login2
db_get_login2.argtypes = [String, String, POINTER(POINTER(c_char)), POINTER(POINTER(c_char)), POINTER(POINTER(c_char)), POINTER(POINTER(c_char))]
db_get_login2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 419
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login_dump'):
continue
db_get_login_dump = _lib.db_get_login_dump
db_get_login_dump.argtypes = [POINTER(FILE)]
db_get_login_dump.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 18
try:
DB_VERSION = '0'
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 21
try:
DB_DEFAULT_DRIVER = 'sqlite'
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 27
try:
DB_PROC_VERSION = 999
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 29
try:
DB_PROC_CLOSE_DATABASE = 101
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 30
try:
DB_PROC_CREATE_DATABASE = 102
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 31
try:
DB_PROC_DELETE_DATABASE = 103
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 32
try:
DB_PROC_FIND_DATABASE = 104
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 33
try:
DB_PROC_LIST_DATABASES = 105
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 34
try:
DB_PROC_OPEN_DATABASE = 106
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 35
try:
DB_PROC_SHUTDOWN_DRIVER = 107
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 37
try:
DB_PROC_CLOSE_CURSOR = 201
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 38
try:
DB_PROC_DELETE = 202
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 39
try:
DB_PROC_FETCH = 203
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 40
try:
DB_PROC_INSERT = 204
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 41
try:
DB_PROC_OPEN_INSERT_CURSOR = 205
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 42
try:
DB_PROC_OPEN_SELECT_CURSOR = 206
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 43
try:
DB_PROC_OPEN_UPDATE_CURSOR = 207
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 44
try:
DB_PROC_UPDATE = 208
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 45
try:
DB_PROC_ROWS = 209
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 46
try:
DB_PROC_BIND_UPDATE = 220
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 47
try:
DB_PROC_BIND_INSERT = 221
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 49
try:
DB_PROC_EXECUTE_IMMEDIATE = 301
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 50
try:
DB_PROC_BEGIN_TRANSACTION = 302
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 51
try:
DB_PROC_COMMIT_TRANSACTION = 303
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 53
try:
DB_PROC_CREATE_TABLE = 401
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 54
try:
DB_PROC_DESCRIBE_TABLE = 402
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 55
try:
DB_PROC_DROP_TABLE = 403
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 56
try:
DB_PROC_LIST_TABLES = 404
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 57
try:
DB_PROC_ADD_COLUMN = 405
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 58
try:
DB_PROC_DROP_COLUMN = 406
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 59
try:
DB_PROC_GRANT_ON_TABLE = 407
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 61
try:
DB_PROC_CREATE_INDEX = 701
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 62
try:
DB_PROC_LIST_INDEXES = 702
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 63
try:
DB_PROC_DROP_INDEX = 703
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 66
try:
DB_PERM_R = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 67
try:
DB_PERM_W = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 68
try:
DB_PERM_X = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 71
try:
DB_OK = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 72
try:
DB_FAILED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 73
try:
DB_NOPROC = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 74
try:
DB_MEMORY_ERR = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 75
try:
DB_PROTOCOL_ERR = (-2)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 76
try:
DB_EOF = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 79
try:
DB_SQL_TYPE_UNKNOWN = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 81
try:
DB_SQL_TYPE_CHARACTER = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 82
try:
DB_SQL_TYPE_SMALLINT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 83
try:
DB_SQL_TYPE_INTEGER = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 84
try:
DB_SQL_TYPE_REAL = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 85
try:
DB_SQL_TYPE_DOUBLE_PRECISION = 6
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 86
try:
DB_SQL_TYPE_DECIMAL = 7
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 87
try:
DB_SQL_TYPE_NUMERIC = 8
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 88
try:
DB_SQL_TYPE_DATE = 9
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 89
try:
DB_SQL_TYPE_TIME = 10
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 90
try:
DB_SQL_TYPE_TIMESTAMP = 11
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 91
try:
DB_SQL_TYPE_INTERVAL = 12
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 92
try:
DB_SQL_TYPE_TEXT = 13
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 94
try:
DB_SQL_TYPE_SERIAL = 21
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 97
try:
DB_YEAR = 16384
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 98
try:
DB_MONTH = 8192
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 99
try:
DB_DAY = 4096
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 100
try:
DB_HOUR = 2048
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 101
try:
DB_MINUTE = 1024
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 102
try:
DB_SECOND = 512
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 103
try:
DB_FRACTION = 256
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 104
try:
DB_DATETIME_MASK = 65280
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 107
try:
DB_C_TYPE_STRING = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 108
try:
DB_C_TYPE_INT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 109
try:
DB_C_TYPE_DOUBLE = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 110
try:
DB_C_TYPE_DATETIME = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 113
try:
DB_CURRENT = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 114
try:
DB_NEXT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 115
try:
DB_PREVIOUS = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 116
try:
DB_FIRST = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 117
try:
DB_LAST = 5
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 120
try:
DB_READONLY = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 121
try:
DB_INSERT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 122
try:
DB_UPDATE = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 123
try:
DB_SEQUENTIAL = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 124
try:
DB_SCROLL = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 125
try:
DB_INSENSITIVE = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 128
try:
DB_GRANTED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 129
try:
DB_NOT_GRANTED = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 132
try:
DB_PRIV_SELECT = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 134
try:
DB_GROUP = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 135
try:
DB_PUBLIC = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 138
try:
DB_DEFINED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 139
try:
DB_UNDEFINED = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 142
try:
DB_SQL_MAX = 8192
except:
pass
_db_string = struct__db_string # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
_dbmscap = struct__dbmscap # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 153
_db_dirent = struct__db_dirent # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
_db_driver = struct__db_driver # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
_db_handle = struct__db_handle # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
_db_date_time = struct__db_date_time # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
_db_value = struct__db_value # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
_db_column = struct__db_column # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
_db_table = struct__db_table # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
_db_cursor = struct__db_cursor # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
_db_index = struct__db_index # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
_db_driver_state = struct__db_driver_state # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
_db_connection = struct__db_connection # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
# No inserted files | lib/dbmi.py | __docformat__ = 'restructuredtext'
_libs = {}
_libdirs = []
from .ctypes_preamble import *
from .ctypes_preamble import _variadic_function
from .ctypes_loader import *
add_library_search_dirs([])
# Begin libraries
_libs["grass_dbmiclient.7.8"] = load_library("grass_dbmiclient.7.8")
_libs["grass_dbmibase.7.8"] = load_library("grass_dbmibase.7.8")
# 2 libraries
# End libraries
# No modules
__int64_t = c_longlong # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/i386/_types.h: 46
__darwin_off_t = __int64_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/sys/_types.h: 71
fpos_t = __darwin_off_t # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 81
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 92
class struct___sbuf(Structure):
pass
struct___sbuf.__slots__ = [
'_base',
'_size',
]
struct___sbuf._fields_ = [
('_base', POINTER(c_ubyte)),
('_size', c_int),
]
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 98
class struct___sFILEX(Structure):
pass
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
class struct___sFILE(Structure):
pass
struct___sFILE.__slots__ = [
'_p',
'_r',
'_w',
'_flags',
'_file',
'_bf',
'_lbfsize',
'_cookie',
'_close',
'_read',
'_seek',
'_write',
'_ub',
'_extra',
'_ur',
'_ubuf',
'_nbuf',
'_lb',
'_blksize',
'_offset',
]
struct___sFILE._fields_ = [
('_p', POINTER(c_ubyte)),
('_r', c_int),
('_w', c_int),
('_flags', c_short),
('_file', c_short),
('_bf', struct___sbuf),
('_lbfsize', c_int),
('_cookie', POINTER(None)),
('_close', CFUNCTYPE(UNCHECKED(c_int), POINTER(None))),
('_read', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_seek', CFUNCTYPE(UNCHECKED(fpos_t), POINTER(None), fpos_t, c_int)),
('_write', CFUNCTYPE(UNCHECKED(c_int), POINTER(None), String, c_int)),
('_ub', struct___sbuf),
('_extra', POINTER(struct___sFILEX)),
('_ur', c_int),
('_ubuf', c_ubyte * 3),
('_nbuf', c_ubyte * 1),
('_lb', struct___sbuf),
('_blksize', c_int),
('_offset', fpos_t),
]
FILE = struct___sFILE # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/usr/include/_stdio.h: 157
dbAddress = POINTER(None) # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 144
dbToken = c_int # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 145
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
class struct__db_string(Structure):
pass
struct__db_string.__slots__ = [
'string',
'nalloc',
]
struct__db_string._fields_ = [
('string', String),
('nalloc', c_int),
]
dbString = struct__db_string # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 153
class struct__dbmscap(Structure):
pass
struct__dbmscap.__slots__ = [
'driverName',
'startup',
'comment',
'next',
]
struct__dbmscap._fields_ = [
('driverName', c_char * 256),
('startup', c_char * 256),
('comment', c_char * 256),
('next', POINTER(struct__dbmscap)),
]
dbDbmscap = struct__dbmscap # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 159
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
class struct__db_dirent(Structure):
pass
struct__db_dirent.__slots__ = [
'name',
'isdir',
'perm',
]
struct__db_dirent._fields_ = [
('name', dbString),
('isdir', c_int),
('perm', c_int),
]
dbDirent = struct__db_dirent # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
class struct__db_driver(Structure):
pass
struct__db_driver.__slots__ = [
'dbmscap',
'send',
'recv',
'pid',
]
struct__db_driver._fields_ = [
('dbmscap', dbDbmscap),
('send', POINTER(FILE)),
('recv', POINTER(FILE)),
('pid', c_int),
]
dbDriver = struct__db_driver # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
class struct__db_handle(Structure):
pass
struct__db_handle.__slots__ = [
'dbName',
'dbSchema',
]
struct__db_handle._fields_ = [
('dbName', dbString),
('dbSchema', dbString),
]
dbHandle = struct__db_handle # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
class struct__db_date_time(Structure):
pass
struct__db_date_time.__slots__ = [
'current',
'year',
'month',
'day',
'hour',
'minute',
'seconds',
]
struct__db_date_time._fields_ = [
('current', c_char),
('year', c_int),
('month', c_int),
('day', c_int),
('hour', c_int),
('minute', c_int),
('seconds', c_double),
]
dbDateTime = struct__db_date_time # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
class struct__db_value(Structure):
pass
struct__db_value.__slots__ = [
'isNull',
'i',
'd',
's',
't',
]
struct__db_value._fields_ = [
('isNull', c_char),
('i', c_int),
('d', c_double),
('s', dbString),
('t', dbDateTime),
]
dbValue = struct__db_value # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
class struct__db_column(Structure):
pass
struct__db_column.__slots__ = [
'columnName',
'description',
'sqlDataType',
'hostDataType',
'value',
'dataLen',
'precision',
'scale',
'nullAllowed',
'hasDefaultValue',
'useDefaultValue',
'defaultValue',
'select',
'update',
]
struct__db_column._fields_ = [
('columnName', dbString),
('description', dbString),
('sqlDataType', c_int),
('hostDataType', c_int),
('value', dbValue),
('dataLen', c_int),
('precision', c_int),
('scale', c_int),
('nullAllowed', c_char),
('hasDefaultValue', c_char),
('useDefaultValue', c_char),
('defaultValue', dbValue),
('select', c_int),
('update', c_int),
]
dbColumn = struct__db_column # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
class struct__db_table(Structure):
pass
struct__db_table.__slots__ = [
'tableName',
'description',
'numColumns',
'columns',
'priv_insert',
'priv_delete',
]
struct__db_table._fields_ = [
('tableName', dbString),
('description', dbString),
('numColumns', c_int),
('columns', POINTER(dbColumn)),
('priv_insert', c_int),
('priv_delete', c_int),
]
dbTable = struct__db_table # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
class struct__db_cursor(Structure):
pass
struct__db_cursor.__slots__ = [
'token',
'driver',
'table',
'column_flags',
'type',
'mode',
]
struct__db_cursor._fields_ = [
('token', dbToken),
('driver', POINTER(dbDriver)),
('table', POINTER(dbTable)),
('column_flags', POINTER(c_short)),
('type', c_int),
('mode', c_int),
]
dbCursor = struct__db_cursor # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
class struct__db_index(Structure):
pass
struct__db_index.__slots__ = [
'indexName',
'tableName',
'numColumns',
'columnNames',
'unique',
]
struct__db_index._fields_ = [
('indexName', dbString),
('tableName', dbString),
('numColumns', c_int),
('columnNames', POINTER(dbString)),
('unique', c_char),
]
dbIndex = struct__db_index # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
class struct__db_driver_state(Structure):
pass
struct__db_driver_state.__slots__ = [
'dbname',
'dbschema',
'open',
'ncursors',
'cursor_list',
]
struct__db_driver_state._fields_ = [
('dbname', String),
('dbschema', String),
('open', c_int),
('ncursors', c_int),
('cursor_list', POINTER(POINTER(dbCursor))),
]
dbDriverState = struct__db_driver_state # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
class struct_anon_7(Structure):
pass
struct_anon_7.__slots__ = [
'cat',
'val',
]
struct_anon_7._fields_ = [
('cat', c_int),
('val', c_int),
]
dbCatValI = struct_anon_7 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 263
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 270
class union_anon_8(Union):
pass
union_anon_8.__slots__ = [
'i',
'd',
's',
't',
]
union_anon_8._fields_ = [
('i', c_int),
('d', c_double),
('s', POINTER(dbString)),
('t', POINTER(dbDateTime)),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
class struct_anon_9(Structure):
pass
struct_anon_9.__slots__ = [
'cat',
'isNull',
'val',
]
struct_anon_9._fields_ = [
('cat', c_int),
('isNull', c_int),
('val', union_anon_8),
]
dbCatVal = struct_anon_9 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 281
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
class struct_anon_10(Structure):
pass
struct_anon_10.__slots__ = [
'n_values',
'alloc',
'ctype',
'value',
]
struct_anon_10._fields_ = [
('n_values', c_int),
('alloc', c_int),
('ctype', c_int),
('value', POINTER(dbCatVal)),
]
dbCatValArray = struct_anon_10 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 290
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
class struct__db_connection(Structure):
pass
struct__db_connection.__slots__ = [
'driverName',
'hostName',
'databaseName',
'schemaName',
'port',
'user',
'password',
'keycol',
'group',
]
struct__db_connection._fields_ = [
('driverName', String),
('hostName', String),
('databaseName', String),
('schemaName', String),
('port', String),
('user', String),
('password', String),
('keycol', String),
('group', String),
]
dbConnection = struct__db_connection # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
class struct_anon_11(Structure):
pass
struct_anon_11.__slots__ = [
'count',
'alloc',
'table',
'key',
'cat',
'where',
'label',
]
struct_anon_11._fields_ = [
('count', c_int),
('alloc', c_int),
('table', String),
('key', String),
('cat', POINTER(c_int)),
('where', POINTER(POINTER(c_char))),
('label', POINTER(POINTER(c_char))),
]
dbRclsRule = struct_anon_11 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 316
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 4
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_lowercase'):
continue
db_Cstring_to_lowercase = _lib.db_Cstring_to_lowercase
db_Cstring_to_lowercase.argtypes = [String]
db_Cstring_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 5
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_Cstring_to_uppercase'):
continue
db_Cstring_to_uppercase = _lib.db_Cstring_to_uppercase
db_Cstring_to_uppercase.argtypes = [String]
db_Cstring_to_uppercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 6
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_add_column'):
continue
db_add_column = _lib.db_add_column
db_add_column.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbColumn)]
db_add_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 7
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__add_cursor_to_driver_state'):
continue
db__add_cursor_to_driver_state = _lib.db__add_cursor_to_driver_state
db__add_cursor_to_driver_state.argtypes = [POINTER(dbCursor)]
db__add_cursor_to_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 8
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_column_flags'):
continue
db_alloc_cursor_column_flags = _lib.db_alloc_cursor_column_flags
db_alloc_cursor_column_flags.argtypes = [POINTER(dbCursor)]
db_alloc_cursor_column_flags.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 9
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_cursor_table'):
continue
db_alloc_cursor_table = _lib.db_alloc_cursor_table
db_alloc_cursor_table.argtypes = [POINTER(dbCursor), c_int]
db_alloc_cursor_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 10
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_table_column'):
continue
db_append_table_column = _lib.db_append_table_column
db_append_table_column.argtypes = [POINTER(dbTable), POINTER(dbColumn)]
db_append_table_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 11
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_dirent_array'):
continue
db_alloc_dirent_array = _lib.db_alloc_dirent_array
db_alloc_dirent_array.argtypes = [c_int]
db_alloc_dirent_array.restype = POINTER(dbDirent)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 12
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_handle_array'):
continue
db_alloc_handle_array = _lib.db_alloc_handle_array
db_alloc_handle_array.argtypes = [c_int]
db_alloc_handle_array.restype = POINTER(dbHandle)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 13
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_array'):
continue
db_alloc_index_array = _lib.db_alloc_index_array
db_alloc_index_array.argtypes = [c_int]
db_alloc_index_array.restype = POINTER(dbIndex)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 14
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_index_columns'):
continue
db_alloc_index_columns = _lib.db_alloc_index_columns
db_alloc_index_columns.argtypes = [POINTER(dbIndex), c_int]
db_alloc_index_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 15
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_string_array'):
continue
db_alloc_string_array = _lib.db_alloc_string_array
db_alloc_string_array.argtypes = [c_int]
db_alloc_string_array.restype = POINTER(dbString)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 16
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_alloc_table'):
continue
db_alloc_table = _lib.db_alloc_table
db_alloc_table.argtypes = [c_int]
db_alloc_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 17
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_append_string'):
continue
db_append_string = _lib.db_append_string
db_append_string.argtypes = [POINTER(dbString), String]
db_append_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 18
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_errors'):
continue
db_auto_print_errors = _lib.db_auto_print_errors
db_auto_print_errors.argtypes = [c_int]
db_auto_print_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 19
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_auto_print_protocol_errors'):
continue
db_auto_print_protocol_errors = _lib.db_auto_print_protocol_errors
db_auto_print_protocol_errors.argtypes = [c_int]
db_auto_print_protocol_errors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 20
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_bind_update'):
continue
db_bind_update = _lib.db_bind_update
db_bind_update.argtypes = [POINTER(dbCursor)]
db_bind_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 21
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_calloc'):
continue
db_calloc = _lib.db_calloc
db_calloc.argtypes = [c_int, c_int]
db_calloc.restype = POINTER(c_ubyte)
db_calloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 22
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_alloc'):
continue
db_CatValArray_alloc = _lib.db_CatValArray_alloc
db_CatValArray_alloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_alloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 23
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_realloc'):
continue
db_CatValArray_realloc = _lib.db_CatValArray_realloc
db_CatValArray_realloc.argtypes = [POINTER(dbCatValArray), c_int]
db_CatValArray_realloc.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 24
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_free'):
continue
db_CatValArray_free = _lib.db_CatValArray_free
db_CatValArray_free.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_free.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 25
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_init'):
continue
db_CatValArray_init = _lib.db_CatValArray_init
db_CatValArray_init.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_init.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 26
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort'):
continue
db_CatValArray_sort = _lib.db_CatValArray_sort
db_CatValArray_sort.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 27
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_sort_by_value'):
continue
db_CatValArray_sort_by_value = _lib.db_CatValArray_sort_by_value
db_CatValArray_sort_by_value.argtypes = [POINTER(dbCatValArray)]
db_CatValArray_sort_by_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 28
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value'):
continue
db_CatValArray_get_value = _lib.db_CatValArray_get_value
db_CatValArray_get_value.argtypes = [POINTER(dbCatValArray), c_int, POINTER(POINTER(dbCatVal))]
db_CatValArray_get_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 29
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_int'):
continue
db_CatValArray_get_value_int = _lib.db_CatValArray_get_value_int
db_CatValArray_get_value_int.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_int)]
db_CatValArray_get_value_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 30
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_CatValArray_get_value_double'):
continue
db_CatValArray_get_value_double = _lib.db_CatValArray_get_value_double
db_CatValArray_get_value_double.argtypes = [POINTER(dbCatValArray), c_int, POINTER(c_double)]
db_CatValArray_get_value_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 32
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_char_to_lowercase'):
continue
db_char_to_lowercase = _lib.db_char_to_lowercase
db_char_to_lowercase.argtypes = [String]
db_char_to_lowercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 33
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_char_to_uppercase'):
continue
db_char_to_uppercase = _lib.db_char_to_uppercase
db_char_to_uppercase.argtypes = [String]
db_char_to_uppercase.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 34
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_clear_error'):
continue
db_clear_error = _lib.db_clear_error
db_clear_error.argtypes = []
db_clear_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 35
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_clone_table'):
continue
db_clone_table = _lib.db_clone_table
db_clone_table.argtypes = [POINTER(dbTable)]
db_clone_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 36
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__close_all_cursors'):
continue
db__close_all_cursors = _lib.db__close_all_cursors
db__close_all_cursors.argtypes = []
db__close_all_cursors.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 37
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_cursor'):
continue
db_close_cursor = _lib.db_close_cursor
db_close_cursor.argtypes = [POINTER(dbCursor)]
db_close_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 38
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_database'):
continue
db_close_database = _lib.db_close_database
db_close_database.argtypes = [POINTER(dbDriver)]
db_close_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 39
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_close_database_shutdown_driver'):
continue
db_close_database_shutdown_driver = _lib.db_close_database_shutdown_driver
db_close_database_shutdown_driver.argtypes = [POINTER(dbDriver)]
db_close_database_shutdown_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 40
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_column_sqltype'):
continue
db_column_sqltype = _lib.db_column_sqltype
db_column_sqltype.argtypes = [POINTER(dbDriver), String, String]
db_column_sqltype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 41
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_column_Ctype'):
continue
db_column_Ctype = _lib.db_column_Ctype
db_column_Ctype.argtypes = [POINTER(dbDriver), String, String]
db_column_Ctype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 42
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_column_default_value'):
continue
db_convert_Cstring_to_column_default_value = _lib.db_convert_Cstring_to_column_default_value
db_convert_Cstring_to_column_default_value.argtypes = [String, POINTER(dbColumn)]
db_convert_Cstring_to_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 44
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_column_value'):
continue
db_convert_Cstring_to_column_value = _lib.db_convert_Cstring_to_column_value
db_convert_Cstring_to_column_value.argtypes = [String, POINTER(dbColumn)]
db_convert_Cstring_to_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 46
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_value'):
continue
db_convert_Cstring_to_value = _lib.db_convert_Cstring_to_value
db_convert_Cstring_to_value.argtypes = [String, c_int, POINTER(dbValue)]
db_convert_Cstring_to_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 48
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_Cstring_to_value_datetime'):
continue
db_convert_Cstring_to_value_datetime = _lib.db_convert_Cstring_to_value_datetime
db_convert_Cstring_to_value_datetime.argtypes = [String, c_int, POINTER(dbValue)]
db_convert_Cstring_to_value_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 50
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_column_default_value_to_string'):
continue
db_convert_column_default_value_to_string = _lib.db_convert_column_default_value_to_string
db_convert_column_default_value_to_string.argtypes = [POINTER(dbColumn), POINTER(dbString)]
db_convert_column_default_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 52
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_column_value_to_string'):
continue
db_convert_column_value_to_string = _lib.db_convert_column_value_to_string
db_convert_column_value_to_string.argtypes = [POINTER(dbColumn), POINTER(dbString)]
db_convert_column_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 53
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_value_datetime_into_string'):
continue
db_convert_value_datetime_into_string = _lib.db_convert_value_datetime_into_string
db_convert_value_datetime_into_string.argtypes = [POINTER(dbValue), c_int, POINTER(dbString)]
db_convert_value_datetime_into_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 55
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_convert_value_to_string'):
continue
db_convert_value_to_string = _lib.db_convert_value_to_string
db_convert_value_to_string.argtypes = [POINTER(dbValue), c_int, POINTER(dbString)]
db_convert_value_to_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 57
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_column'):
continue
db_copy_column = _lib.db_copy_column
db_copy_column.argtypes = [POINTER(dbColumn), POINTER(dbColumn)]
db_copy_column.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 58
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_dbmscap_entry'):
continue
db_copy_dbmscap_entry = _lib.db_copy_dbmscap_entry
db_copy_dbmscap_entry.argtypes = [POINTER(dbDbmscap), POINTER(dbDbmscap)]
db_copy_dbmscap_entry.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 59
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_string'):
continue
db_copy_string = _lib.db_copy_string
db_copy_string.argtypes = [POINTER(dbString), POINTER(dbString)]
db_copy_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 60
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_table_to_sql'):
continue
db_table_to_sql = _lib.db_table_to_sql
db_table_to_sql.argtypes = [POINTER(dbTable), POINTER(dbString)]
db_table_to_sql.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 61
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table'):
continue
db_copy_table = _lib.db_copy_table
db_copy_table.argtypes = [String, String, String, String, String, String]
db_copy_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 63
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_where'):
continue
db_copy_table_where = _lib.db_copy_table_where
db_copy_table_where.argtypes = [String, String, String, String, String, String, String]
db_copy_table_where.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 66
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_select'):
continue
db_copy_table_select = _lib.db_copy_table_select
db_copy_table_select.argtypes = [String, String, String, String, String, String, String]
db_copy_table_select.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 69
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_table_by_ints'):
continue
db_copy_table_by_ints = _lib.db_copy_table_by_ints
db_copy_table_by_ints.argtypes = [String, String, String, String, String, String, String, POINTER(c_int), c_int]
db_copy_table_by_ints.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 72
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_copy_value'):
continue
db_copy_value = _lib.db_copy_value
db_copy_value.argtypes = [POINTER(dbValue), POINTER(dbValue)]
db_copy_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 73
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_database'):
continue
db_create_database = _lib.db_create_database
db_create_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_create_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 74
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_index'):
continue
db_create_index = _lib.db_create_index
db_create_index.argtypes = [POINTER(dbDriver), POINTER(dbIndex)]
db_create_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 75
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_index2'):
continue
db_create_index2 = _lib.db_create_index2
db_create_index2.argtypes = [POINTER(dbDriver), String, String]
db_create_index2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 77
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_create_table'):
continue
db_create_table = _lib.db_create_table
db_create_table.argtypes = [POINTER(dbDriver), POINTER(dbTable)]
db_create_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 78
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_add_column'):
continue
db_d_add_column = _lib.db_d_add_column
db_d_add_column.argtypes = []
db_d_add_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 79
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_bind_update'):
continue
db_d_bind_update = _lib.db_d_bind_update
db_d_bind_update.argtypes = []
db_d_bind_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 80
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_dbmscap_filename'):
continue
db_dbmscap_filename = _lib.db_dbmscap_filename
db_dbmscap_filename.argtypes = []
db_dbmscap_filename.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 81
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_close_cursor'):
continue
db_d_close_cursor = _lib.db_d_close_cursor
db_d_close_cursor.argtypes = []
db_d_close_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 82
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_close_database'):
continue
db_d_close_database = _lib.db_d_close_database
db_d_close_database.argtypes = []
db_d_close_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 83
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_database'):
continue
db_d_create_database = _lib.db_d_create_database
db_d_create_database.argtypes = []
db_d_create_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 84
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_index'):
continue
db_d_create_index = _lib.db_d_create_index
db_d_create_index.argtypes = []
db_d_create_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 85
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_create_table'):
continue
db_d_create_table = _lib.db_d_create_table
db_d_create_table.argtypes = []
db_d_create_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 86
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_delete'):
continue
db_d_delete = _lib.db_d_delete
db_d_delete.argtypes = []
db_d_delete.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 87
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_delete_database'):
continue
db_d_delete_database = _lib.db_d_delete_database
db_d_delete_database.argtypes = []
db_d_delete_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 88
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_describe_table'):
continue
db_d_describe_table = _lib.db_d_describe_table
db_d_describe_table.argtypes = []
db_d_describe_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 89
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_column'):
continue
db_d_drop_column = _lib.db_d_drop_column
db_d_drop_column.argtypes = []
db_d_drop_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 90
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_index'):
continue
db_d_drop_index = _lib.db_d_drop_index
db_d_drop_index.argtypes = []
db_d_drop_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 91
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_drop_table'):
continue
db_d_drop_table = _lib.db_d_drop_table
db_d_drop_table.argtypes = []
db_d_drop_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 92
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug'):
continue
db_debug = _lib.db_debug
db_debug.argtypes = [String]
db_debug.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 93
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug_off'):
continue
db_debug_off = _lib.db_debug_off
db_debug_off.argtypes = []
db_debug_off.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 94
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_debug_on'):
continue
db_debug_on = _lib.db_debug_on
db_debug_on.argtypes = []
db_debug_on.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 95
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete'):
continue
db_delete = _lib.db_delete
db_delete.argtypes = [POINTER(dbCursor)]
db_delete.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 96
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete_database'):
continue
db_delete_database = _lib.db_delete_database
db_delete_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_delete_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 97
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_delete_table'):
continue
db_delete_table = _lib.db_delete_table
db_delete_table.argtypes = [String, String, String]
db_delete_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 98
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_describe_table'):
continue
db_describe_table = _lib.db_describe_table
db_describe_table.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(POINTER(dbTable))]
db_describe_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 99
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_execute_immediate'):
continue
db_d_execute_immediate = _lib.db_d_execute_immediate
db_d_execute_immediate.argtypes = []
db_d_execute_immediate.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 100
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_begin_transaction'):
continue
db_d_begin_transaction = _lib.db_d_begin_transaction
db_d_begin_transaction.argtypes = []
db_d_begin_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 101
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_commit_transaction'):
continue
db_d_commit_transaction = _lib.db_d_commit_transaction
db_d_commit_transaction.argtypes = []
db_d_commit_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 102
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_fetch'):
continue
db_d_fetch = _lib.db_d_fetch
db_d_fetch.argtypes = []
db_d_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 103
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_find_database'):
continue
db_d_find_database = _lib.db_d_find_database
db_d_find_database.argtypes = []
db_d_find_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 104
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_get_num_rows'):
continue
db_d_get_num_rows = _lib.db_d_get_num_rows
db_d_get_num_rows.argtypes = []
db_d_get_num_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 105
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_grant_on_table'):
continue
db_d_grant_on_table = _lib.db_d_grant_on_table
db_d_grant_on_table.argtypes = []
db_d_grant_on_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 106
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_insert'):
continue
db_d_insert = _lib.db_d_insert
db_d_insert.argtypes = []
db_d_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 107
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_init_error'):
continue
db_d_init_error = _lib.db_d_init_error
db_d_init_error.argtypes = [String]
db_d_init_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 108
for _lib in _libs.values():
if hasattr(_lib, 'db_d_append_error'):
_func = _lib.db_d_append_error
_restype = None
_errcheck = None
_argtypes = [String]
db_d_append_error = _variadic_function(_func,_restype,_argtypes,_errcheck)
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 110
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_report_error'):
continue
db_d_report_error = _lib.db_d_report_error
db_d_report_error.argtypes = []
db_d_report_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 111
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_dirent'):
continue
db_dirent = _lib.db_dirent
db_dirent.argtypes = [String, POINTER(c_int)]
db_dirent.restype = POINTER(dbDirent)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 112
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_databases'):
continue
db_d_list_databases = _lib.db_d_list_databases
db_d_list_databases.argtypes = []
db_d_list_databases.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 113
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_indexes'):
continue
db_d_list_indexes = _lib.db_d_list_indexes
db_d_list_indexes.argtypes = []
db_d_list_indexes.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 114
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_list_tables'):
continue
db_d_list_tables = _lib.db_d_list_tables
db_d_list_tables.argtypes = []
db_d_list_tables.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 115
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_database'):
continue
db_d_open_database = _lib.db_d_open_database
db_d_open_database.argtypes = []
db_d_open_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 116
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_insert_cursor'):
continue
db_d_open_insert_cursor = _lib.db_d_open_insert_cursor
db_d_open_insert_cursor.argtypes = []
db_d_open_insert_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 117
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_select_cursor'):
continue
db_d_open_select_cursor = _lib.db_d_open_select_cursor
db_d_open_select_cursor.argtypes = []
db_d_open_select_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 118
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_open_update_cursor'):
continue
db_d_open_update_cursor = _lib.db_d_open_update_cursor
db_d_open_update_cursor.argtypes = []
db_d_open_update_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 119
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_double_quote_string'):
continue
db_double_quote_string = _lib.db_double_quote_string
db_double_quote_string.argtypes = [POINTER(dbString)]
db_double_quote_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 120
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_driver'):
continue
db_driver = _lib.db_driver
db_driver.argtypes = [c_int, POINTER(POINTER(c_char))]
db_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 122
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_driver_mkdir'):
continue
db_driver_mkdir = _lib.db_driver_mkdir
db_driver_mkdir.argtypes = [String, c_int, c_int]
db_driver_mkdir.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 123
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_column'):
continue
db_drop_column = _lib.db_drop_column
db_drop_column.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString)]
db_drop_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 125
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__drop_cursor_from_driver_state'):
continue
db__drop_cursor_from_driver_state = _lib.db__drop_cursor_from_driver_state
db__drop_cursor_from_driver_state.argtypes = [POINTER(dbCursor)]
db__drop_cursor_from_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 126
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_index'):
continue
db_drop_index = _lib.db_drop_index
db_drop_index.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_drop_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 127
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_table'):
continue
db_drop_table = _lib.db_drop_table
db_drop_table.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_drop_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 128
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_drop_token'):
continue
db_drop_token = _lib.db_drop_token
db_drop_token.argtypes = [dbToken]
db_drop_token.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 129
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_update'):
continue
db_d_update = _lib.db_d_update
db_d_update.argtypes = []
db_d_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 130
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_d_version'):
continue
db_d_version = _lib.db_d_version
db_d_version.argtypes = []
db_d_version.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 131
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_enlarge_string'):
continue
db_enlarge_string = _lib.db_enlarge_string
db_enlarge_string.argtypes = [POINTER(dbString), c_int]
db_enlarge_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 132
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_error'):
continue
db_error = _lib.db_error
db_error.argtypes = [String]
db_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 133
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_execute_immediate'):
continue
db_execute_immediate = _lib.db_execute_immediate
db_execute_immediate.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_execute_immediate.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 134
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_begin_transaction'):
continue
db_begin_transaction = _lib.db_begin_transaction
db_begin_transaction.argtypes = [POINTER(dbDriver)]
db_begin_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 135
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_commit_transaction'):
continue
db_commit_transaction = _lib.db_commit_transaction
db_commit_transaction.argtypes = [POINTER(dbDriver)]
db_commit_transaction.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 136
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_fetch'):
continue
db_fetch = _lib.db_fetch
db_fetch.argtypes = [POINTER(dbCursor), c_int, POINTER(c_int)]
db_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 137
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_find_database'):
continue
db_find_database = _lib.db_find_database
db_find_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle), POINTER(c_int)]
db_find_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 138
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_find_token'):
continue
db_find_token = _lib.db_find_token
db_find_token.argtypes = [dbToken]
db_find_token.restype = dbAddress
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 139
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free'):
continue
db_free = _lib.db_free
db_free.argtypes = [POINTER(None)]
db_free.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 140
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_column'):
continue
db_free_column = _lib.db_free_column
db_free_column.argtypes = [POINTER(dbColumn)]
db_free_column.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 141
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_cursor'):
continue
db_free_cursor = _lib.db_free_cursor
db_free_cursor.argtypes = [POINTER(dbCursor)]
db_free_cursor.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 142
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_cursor_column_flags'):
continue
db_free_cursor_column_flags = _lib.db_free_cursor_column_flags
db_free_cursor_column_flags.argtypes = [POINTER(dbCursor)]
db_free_cursor_column_flags.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 143
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_dbmscap'):
continue
db_free_dbmscap = _lib.db_free_dbmscap
db_free_dbmscap.argtypes = [POINTER(dbDbmscap)]
db_free_dbmscap.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 144
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_dirent_array'):
continue
db_free_dirent_array = _lib.db_free_dirent_array
db_free_dirent_array.argtypes = [POINTER(dbDirent), c_int]
db_free_dirent_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 145
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_handle'):
continue
db_free_handle = _lib.db_free_handle
db_free_handle.argtypes = [POINTER(dbHandle)]
db_free_handle.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 146
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_handle_array'):
continue
db_free_handle_array = _lib.db_free_handle_array
db_free_handle_array.argtypes = [POINTER(dbHandle), c_int]
db_free_handle_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 147
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_index'):
continue
db_free_index = _lib.db_free_index
db_free_index.argtypes = [POINTER(dbIndex)]
db_free_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 148
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_index_array'):
continue
db_free_index_array = _lib.db_free_index_array
db_free_index_array.argtypes = [POINTER(dbIndex), c_int]
db_free_index_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 149
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_string'):
continue
db_free_string = _lib.db_free_string
db_free_string.argtypes = [POINTER(dbString)]
db_free_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 150
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_string_array'):
continue
db_free_string_array = _lib.db_free_string_array
db_free_string_array.argtypes = [POINTER(dbString), c_int]
db_free_string_array.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 151
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_free_table'):
continue
db_free_table = _lib.db_free_table
db_free_table.argtypes = [POINTER(dbTable)]
db_free_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 152
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column'):
continue
db_get_column = _lib.db_get_column
db_get_column.argtypes = [POINTER(dbDriver), String, String, POINTER(POINTER(dbColumn))]
db_get_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 154
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_default_value'):
continue
db_get_column_default_value = _lib.db_get_column_default_value
db_get_column_default_value.argtypes = [POINTER(dbColumn)]
db_get_column_default_value.restype = POINTER(dbValue)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 155
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_description'):
continue
db_get_column_description = _lib.db_get_column_description
db_get_column_description.argtypes = [POINTER(dbColumn)]
db_get_column_description.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 156
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_host_type'):
continue
db_get_column_host_type = _lib.db_get_column_host_type
db_get_column_host_type.argtypes = [POINTER(dbColumn)]
db_get_column_host_type.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 157
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_length'):
continue
db_get_column_length = _lib.db_get_column_length
db_get_column_length.argtypes = [POINTER(dbColumn)]
db_get_column_length.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 158
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_name'):
continue
db_get_column_name = _lib.db_get_column_name
db_get_column_name.argtypes = [POINTER(dbColumn)]
db_get_column_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 159
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_precision'):
continue
db_get_column_precision = _lib.db_get_column_precision
db_get_column_precision.argtypes = [POINTER(dbColumn)]
db_get_column_precision.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 160
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_scale'):
continue
db_get_column_scale = _lib.db_get_column_scale
db_get_column_scale.argtypes = [POINTER(dbColumn)]
db_get_column_scale.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 161
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_select_priv'):
continue
db_get_column_select_priv = _lib.db_get_column_select_priv
db_get_column_select_priv.argtypes = [POINTER(dbColumn)]
db_get_column_select_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 162
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_sqltype'):
continue
db_get_column_sqltype = _lib.db_get_column_sqltype
db_get_column_sqltype.argtypes = [POINTER(dbColumn)]
db_get_column_sqltype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 163
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_update_priv'):
continue
db_get_column_update_priv = _lib.db_get_column_update_priv
db_get_column_update_priv.argtypes = [POINTER(dbColumn)]
db_get_column_update_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 164
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_column_value'):
continue
db_get_column_value = _lib.db_get_column_value
db_get_column_value.argtypes = [POINTER(dbColumn)]
db_get_column_value.restype = POINTER(dbValue)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 165
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_connection'):
continue
db_get_connection = _lib.db_get_connection
db_get_connection.argtypes = [POINTER(dbConnection)]
db_get_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 166
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_number_of_columns'):
continue
db_get_cursor_number_of_columns = _lib.db_get_cursor_number_of_columns
db_get_cursor_number_of_columns.argtypes = [POINTER(dbCursor)]
db_get_cursor_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 167
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_table'):
continue
db_get_cursor_table = _lib.db_get_cursor_table
db_get_cursor_table.argtypes = [POINTER(dbCursor)]
db_get_cursor_table.restype = POINTER(dbTable)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 168
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_cursor_token'):
continue
db_get_cursor_token = _lib.db_get_cursor_token
db_get_cursor_token.argtypes = [POINTER(dbCursor)]
db_get_cursor_token.restype = dbToken
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 169
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_driver_name'):
continue
db_get_default_driver_name = _lib.db_get_default_driver_name
db_get_default_driver_name.argtypes = []
db_get_default_driver_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 170
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_database_name'):
continue
db_get_default_database_name = _lib.db_get_default_database_name
db_get_default_database_name.argtypes = []
db_get_default_database_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 171
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_schema_name'):
continue
db_get_default_schema_name = _lib.db_get_default_schema_name
db_get_default_schema_name.argtypes = []
db_get_default_schema_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 172
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_default_group_name'):
continue
db_get_default_group_name = _lib.db_get_default_group_name
db_get_default_group_name.argtypes = []
db_get_default_group_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 173
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__get_driver_state'):
continue
db__get_driver_state = _lib.db__get_driver_state
db__get_driver_state.argtypes = []
db__get_driver_state.restype = POINTER(dbDriverState)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 174
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_code'):
continue
db_get_error_code = _lib.db_get_error_code
db_get_error_code.argtypes = []
db_get_error_code.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 175
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_msg'):
continue
db_get_error_msg = _lib.db_get_error_msg
db_get_error_msg.argtypes = []
db_get_error_msg.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 176
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_error_who'):
continue
db_get_error_who = _lib.db_get_error_who
db_get_error_who.argtypes = []
db_get_error_who.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 177
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_handle_dbname'):
continue
db_get_handle_dbname = _lib.db_get_handle_dbname
db_get_handle_dbname.argtypes = [POINTER(dbHandle)]
db_get_handle_dbname.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 178
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_handle_dbschema'):
continue
db_get_handle_dbschema = _lib.db_get_handle_dbschema
db_get_handle_dbschema.argtypes = [POINTER(dbHandle)]
db_get_handle_dbschema.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 179
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_column_name'):
continue
db_get_index_column_name = _lib.db_get_index_column_name
db_get_index_column_name.argtypes = [POINTER(dbIndex), c_int]
db_get_index_column_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 180
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_name'):
continue
db_get_index_name = _lib.db_get_index_name
db_get_index_name.argtypes = [POINTER(dbIndex)]
db_get_index_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 181
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_number_of_columns'):
continue
db_get_index_number_of_columns = _lib.db_get_index_number_of_columns
db_get_index_number_of_columns.argtypes = [POINTER(dbIndex)]
db_get_index_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 182
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_index_table_name'):
continue
db_get_index_table_name = _lib.db_get_index_table_name
db_get_index_table_name.argtypes = [POINTER(dbIndex)]
db_get_index_table_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 183
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_num_rows'):
continue
db_get_num_rows = _lib.db_get_num_rows
db_get_num_rows.argtypes = [POINTER(dbCursor)]
db_get_num_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 184
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_string'):
continue
db_get_string = _lib.db_get_string
db_get_string.argtypes = [POINTER(dbString)]
if sizeof(c_int) == sizeof(c_void_p):
db_get_string.restype = ReturnString
else:
db_get_string.restype = String
db_get_string.errcheck = ReturnString
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 185
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_column'):
continue
db_get_table_column = _lib.db_get_table_column
db_get_table_column.argtypes = [POINTER(dbTable), c_int]
db_get_table_column.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 186
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_column_by_name'):
continue
db_get_table_column_by_name = _lib.db_get_table_column_by_name
db_get_table_column_by_name.argtypes = [POINTER(dbTable), String]
db_get_table_column_by_name.restype = POINTER(dbColumn)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 187
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_delete_priv'):
continue
db_get_table_delete_priv = _lib.db_get_table_delete_priv
db_get_table_delete_priv.argtypes = [POINTER(dbTable)]
db_get_table_delete_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 188
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_description'):
continue
db_get_table_description = _lib.db_get_table_description
db_get_table_description.argtypes = [POINTER(dbTable)]
db_get_table_description.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 189
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_insert_priv'):
continue
db_get_table_insert_priv = _lib.db_get_table_insert_priv
db_get_table_insert_priv.argtypes = [POINTER(dbTable)]
db_get_table_insert_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 190
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_name'):
continue
db_get_table_name = _lib.db_get_table_name
db_get_table_name.argtypes = [POINTER(dbTable)]
db_get_table_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 191
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_number_of_columns'):
continue
db_get_table_number_of_columns = _lib.db_get_table_number_of_columns
db_get_table_number_of_columns.argtypes = [POINTER(dbTable)]
db_get_table_number_of_columns.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 192
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_number_of_rows'):
continue
db_get_table_number_of_rows = _lib.db_get_table_number_of_rows
db_get_table_number_of_rows.argtypes = [POINTER(dbDriver), POINTER(dbString)]
db_get_table_number_of_rows.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 193
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_select_priv'):
continue
db_get_table_select_priv = _lib.db_get_table_select_priv
db_get_table_select_priv.argtypes = [POINTER(dbTable)]
db_get_table_select_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 194
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_table_update_priv'):
continue
db_get_table_update_priv = _lib.db_get_table_update_priv
db_get_table_update_priv.argtypes = [POINTER(dbTable)]
db_get_table_update_priv.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 195
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_as_double'):
continue
db_get_value_as_double = _lib.db_get_value_as_double
db_get_value_as_double.argtypes = [POINTER(dbValue), c_int]
db_get_value_as_double.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 196
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_day'):
continue
db_get_value_day = _lib.db_get_value_day
db_get_value_day.argtypes = [POINTER(dbValue)]
db_get_value_day.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 197
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_double'):
continue
db_get_value_double = _lib.db_get_value_double
db_get_value_double.argtypes = [POINTER(dbValue)]
db_get_value_double.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 198
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_hour'):
continue
db_get_value_hour = _lib.db_get_value_hour
db_get_value_hour.argtypes = [POINTER(dbValue)]
db_get_value_hour.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 199
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_int'):
continue
db_get_value_int = _lib.db_get_value_int
db_get_value_int.argtypes = [POINTER(dbValue)]
db_get_value_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 200
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_minute'):
continue
db_get_value_minute = _lib.db_get_value_minute
db_get_value_minute.argtypes = [POINTER(dbValue)]
db_get_value_minute.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 201
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_month'):
continue
db_get_value_month = _lib.db_get_value_month
db_get_value_month.argtypes = [POINTER(dbValue)]
db_get_value_month.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 202
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_seconds'):
continue
db_get_value_seconds = _lib.db_get_value_seconds
db_get_value_seconds.argtypes = [POINTER(dbValue)]
db_get_value_seconds.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 203
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_string'):
continue
db_get_value_string = _lib.db_get_value_string
db_get_value_string.argtypes = [POINTER(dbValue)]
db_get_value_string.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 204
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_value_year'):
continue
db_get_value_year = _lib.db_get_value_year
db_get_value_year.argtypes = [POINTER(dbValue)]
db_get_value_year.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 205
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_grant_on_table'):
continue
db_grant_on_table = _lib.db_grant_on_table
db_grant_on_table.argtypes = [POINTER(dbDriver), String, c_int, c_int]
db_grant_on_table.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 207
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_has_dbms'):
continue
db_has_dbms = _lib.db_has_dbms
db_has_dbms.argtypes = []
db_has_dbms.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 208
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_column'):
continue
db_init_column = _lib.db_init_column
db_init_column.argtypes = [POINTER(dbColumn)]
db_init_column.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 209
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_cursor'):
continue
db_init_cursor = _lib.db_init_cursor
db_init_cursor.argtypes = [POINTER(dbCursor)]
db_init_cursor.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 210
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__init_driver_state'):
continue
db__init_driver_state = _lib.db__init_driver_state
db__init_driver_state.argtypes = []
db__init_driver_state.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 211
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_handle'):
continue
db_init_handle = _lib.db_init_handle
db_init_handle.argtypes = [POINTER(dbHandle)]
db_init_handle.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 212
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_index'):
continue
db_init_index = _lib.db_init_index
db_init_index.argtypes = [POINTER(dbIndex)]
db_init_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 213
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_string'):
continue
db_init_string = _lib.db_init_string
db_init_string.argtypes = [POINTER(dbString)]
db_init_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 214
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_init_table'):
continue
db_init_table = _lib.db_init_table
db_init_table.argtypes = [POINTER(dbTable)]
db_init_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 215
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_insert'):
continue
db_insert = _lib.db_insert
db_insert.argtypes = [POINTER(dbCursor)]
db_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 216
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_interval_range'):
continue
db_interval_range = _lib.db_interval_range
db_interval_range.argtypes = [c_int, POINTER(c_int), POINTER(c_int)]
db_interval_range.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 217
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_isdir'):
continue
db_isdir = _lib.db_isdir
db_isdir.argtypes = [String]
db_isdir.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 218
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_legal_tablename'):
continue
db_legal_tablename = _lib.db_legal_tablename
db_legal_tablename.argtypes = [String]
db_legal_tablename.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 219
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_databases'):
continue
db_list_databases = _lib.db_list_databases
db_list_databases.argtypes = [POINTER(dbDriver), POINTER(dbString), c_int, POINTER(POINTER(dbHandle)), POINTER(c_int)]
db_list_databases.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 221
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_drivers'):
continue
db_list_drivers = _lib.db_list_drivers
db_list_drivers.argtypes = []
db_list_drivers.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 222
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_indexes'):
continue
db_list_indexes = _lib.db_list_indexes
db_list_indexes.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(POINTER(dbIndex)), POINTER(c_int)]
db_list_indexes.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 224
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_list_tables'):
continue
db_list_tables = _lib.db_list_tables
db_list_tables.argtypes = [POINTER(dbDriver), POINTER(POINTER(dbString)), POINTER(c_int), c_int]
db_list_tables.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 226
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_malloc'):
continue
db_malloc = _lib.db_malloc
db_malloc.argtypes = [c_int]
db_malloc.restype = POINTER(c_ubyte)
db_malloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 227
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__mark_database_closed'):
continue
db__mark_database_closed = _lib.db__mark_database_closed
db__mark_database_closed.argtypes = []
db__mark_database_closed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 228
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__mark_database_open'):
continue
db__mark_database_open = _lib.db__mark_database_open
db__mark_database_open.argtypes = [String, String]
db__mark_database_open.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 229
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_memory_error'):
continue
db_memory_error = _lib.db_memory_error
db_memory_error.argtypes = []
db_memory_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 230
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_new_token'):
continue
db_new_token = _lib.db_new_token
db_new_token.argtypes = [dbAddress]
db_new_token.restype = dbToken
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 231
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_nocase_compare'):
continue
db_nocase_compare = _lib.db_nocase_compare
db_nocase_compare.argtypes = [String, String]
db_nocase_compare.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 232
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_noproc_error'):
continue
db_noproc_error = _lib.db_noproc_error
db_noproc_error.argtypes = [c_int]
db_noproc_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 233
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_database'):
continue
db_open_database = _lib.db_open_database
db_open_database.argtypes = [POINTER(dbDriver), POINTER(dbHandle)]
db_open_database.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 234
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_insert_cursor'):
continue
db_open_insert_cursor = _lib.db_open_insert_cursor
db_open_insert_cursor.argtypes = [POINTER(dbDriver), POINTER(dbCursor)]
db_open_insert_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 235
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_select_cursor'):
continue
db_open_select_cursor = _lib.db_open_select_cursor
db_open_select_cursor.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbCursor), c_int]
db_open_select_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 237
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_open_update_cursor'):
continue
db_open_update_cursor = _lib.db_open_update_cursor
db_open_update_cursor.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString), POINTER(dbCursor), c_int]
db_open_update_cursor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 239
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_column_definition'):
continue
db_print_column_definition = _lib.db_print_column_definition
db_print_column_definition.argtypes = [POINTER(FILE), POINTER(dbColumn)]
db_print_column_definition.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 240
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_error'):
continue
db_print_error = _lib.db_print_error
db_print_error.argtypes = []
db_print_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 241
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_index'):
continue
db_print_index = _lib.db_print_index
db_print_index.argtypes = [POINTER(FILE), POINTER(dbIndex)]
db_print_index.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 242
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_print_table_definition'):
continue
db_print_table_definition = _lib.db_print_table_definition
db_print_table_definition.argtypes = [POINTER(FILE), POINTER(dbTable)]
db_print_table_definition.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 243
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_procedure_not_implemented'):
continue
db_procedure_not_implemented = _lib.db_procedure_not_implemented
db_procedure_not_implemented.argtypes = [String]
db_procedure_not_implemented.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 244
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_protocol_error'):
continue
db_protocol_error = _lib.db_protocol_error
db_protocol_error.argtypes = []
db_protocol_error.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 245
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_read_dbmscap'):
continue
db_read_dbmscap = _lib.db_read_dbmscap
db_read_dbmscap.argtypes = []
db_read_dbmscap.restype = POINTER(dbDbmscap)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 246
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_realloc'):
continue
db_realloc = _lib.db_realloc
db_realloc.argtypes = [POINTER(None), c_int]
db_realloc.restype = POINTER(c_ubyte)
db_realloc.errcheck = lambda v,*a : cast(v, c_void_p)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 247
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_char'):
continue
db__recv_char = _lib.db__recv_char
db__recv_char.argtypes = [String]
db__recv_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 248
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_default_value'):
continue
db__recv_column_default_value = _lib.db__recv_column_default_value
db__recv_column_default_value.argtypes = [POINTER(dbColumn)]
db__recv_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 249
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_definition'):
continue
db__recv_column_definition = _lib.db__recv_column_definition
db__recv_column_definition.argtypes = [POINTER(dbColumn)]
db__recv_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 250
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_column_value'):
continue
db__recv_column_value = _lib.db__recv_column_value
db__recv_column_value.argtypes = [POINTER(dbColumn)]
db__recv_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 251
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_datetime'):
continue
db__recv_datetime = _lib.db__recv_datetime
db__recv_datetime.argtypes = [POINTER(dbDateTime)]
db__recv_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 252
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double'):
continue
db__recv_double = _lib.db__recv_double
db__recv_double.argtypes = [POINTER(c_double)]
db__recv_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 253
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_double_array'):
continue
db__recv_double_array = _lib.db__recv_double_array
db__recv_double_array.argtypes = [POINTER(POINTER(c_double)), POINTER(c_int)]
db__recv_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 254
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float'):
continue
db__recv_float = _lib.db__recv_float
db__recv_float.argtypes = [POINTER(c_float)]
db__recv_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 255
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_float_array'):
continue
db__recv_float_array = _lib.db__recv_float_array
db__recv_float_array.argtypes = [POINTER(POINTER(c_float)), POINTER(c_int)]
db__recv_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 256
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_handle'):
continue
db__recv_handle = _lib.db__recv_handle
db__recv_handle.argtypes = [POINTER(dbHandle)]
db__recv_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 257
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index'):
continue
db__recv_index = _lib.db__recv_index
db__recv_index.argtypes = [POINTER(dbIndex)]
db__recv_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 258
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_index_array'):
continue
db__recv_index_array = _lib.db__recv_index_array
db__recv_index_array.argtypes = [POINTER(POINTER(dbIndex)), POINTER(c_int)]
db__recv_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 259
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int'):
continue
db__recv_int = _lib.db__recv_int
db__recv_int.argtypes = [POINTER(c_int)]
db__recv_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 260
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_int_array'):
continue
db__recv_int_array = _lib.db__recv_int_array
db__recv_int_array.argtypes = [POINTER(POINTER(c_int)), POINTER(c_int)]
db__recv_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 261
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_procnum'):
continue
db__recv_procnum = _lib.db__recv_procnum
db__recv_procnum.argtypes = [POINTER(c_int)]
db__recv_procnum.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 262
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_return_code'):
continue
db__recv_return_code = _lib.db__recv_return_code
db__recv_return_code.argtypes = [POINTER(c_int)]
db__recv_return_code.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 263
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short'):
continue
db__recv_short = _lib.db__recv_short
db__recv_short.argtypes = [POINTER(c_short)]
db__recv_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 264
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_short_array'):
continue
db__recv_short_array = _lib.db__recv_short_array
db__recv_short_array.argtypes = [POINTER(POINTER(c_short)), POINTER(c_int)]
db__recv_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 265
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string'):
continue
db__recv_string = _lib.db__recv_string
db__recv_string.argtypes = [POINTER(dbString)]
db__recv_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 266
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_string_array'):
continue
db__recv_string_array = _lib.db__recv_string_array
db__recv_string_array.argtypes = [POINTER(POINTER(dbString)), POINTER(c_int)]
db__recv_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 267
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_data'):
continue
db__recv_table_data = _lib.db__recv_table_data
db__recv_table_data.argtypes = [POINTER(dbTable)]
db__recv_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 268
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_table_definition'):
continue
db__recv_table_definition = _lib.db__recv_table_definition
db__recv_table_definition.argtypes = [POINTER(POINTER(dbTable))]
db__recv_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 269
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_token'):
continue
db__recv_token = _lib.db__recv_token
db__recv_token.argtypes = [POINTER(dbToken)]
db__recv_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 270
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__recv_value'):
continue
db__recv_value = _lib.db__recv_value
db__recv_value.argtypes = [POINTER(dbValue), c_int]
db__recv_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 271
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_Cstring'):
continue
db__send_Cstring = _lib.db__send_Cstring
db__send_Cstring.argtypes = [String]
db__send_Cstring.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 272
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_char'):
continue
db__send_char = _lib.db__send_char
db__send_char.argtypes = [c_int]
db__send_char.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 273
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_default_value'):
continue
db__send_column_default_value = _lib.db__send_column_default_value
db__send_column_default_value.argtypes = [POINTER(dbColumn)]
db__send_column_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 274
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_definition'):
continue
db__send_column_definition = _lib.db__send_column_definition
db__send_column_definition.argtypes = [POINTER(dbColumn)]
db__send_column_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 275
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_column_value'):
continue
db__send_column_value = _lib.db__send_column_value
db__send_column_value.argtypes = [POINTER(dbColumn)]
db__send_column_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 276
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_datetime'):
continue
db__send_datetime = _lib.db__send_datetime
db__send_datetime.argtypes = [POINTER(dbDateTime)]
db__send_datetime.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 277
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double'):
continue
db__send_double = _lib.db__send_double
db__send_double.argtypes = [c_double]
db__send_double.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 278
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_double_array'):
continue
db__send_double_array = _lib.db__send_double_array
db__send_double_array.argtypes = [POINTER(c_double), c_int]
db__send_double_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 279
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_failure'):
continue
db__send_failure = _lib.db__send_failure
db__send_failure.argtypes = []
db__send_failure.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 280
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float'):
continue
db__send_float = _lib.db__send_float
db__send_float.argtypes = [c_float]
db__send_float.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 281
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_float_array'):
continue
db__send_float_array = _lib.db__send_float_array
db__send_float_array.argtypes = [POINTER(c_float), c_int]
db__send_float_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 282
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_handle'):
continue
db__send_handle = _lib.db__send_handle
db__send_handle.argtypes = [POINTER(dbHandle)]
db__send_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 283
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index'):
continue
db__send_index = _lib.db__send_index
db__send_index.argtypes = [POINTER(dbIndex)]
db__send_index.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 284
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_index_array'):
continue
db__send_index_array = _lib.db__send_index_array
db__send_index_array.argtypes = [POINTER(dbIndex), c_int]
db__send_index_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 285
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int'):
continue
db__send_int = _lib.db__send_int
db__send_int.argtypes = [c_int]
db__send_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 286
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_int_array'):
continue
db__send_int_array = _lib.db__send_int_array
db__send_int_array.argtypes = [POINTER(c_int), c_int]
db__send_int_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 287
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_not_implemented'):
continue
db__send_procedure_not_implemented = _lib.db__send_procedure_not_implemented
db__send_procedure_not_implemented.argtypes = [c_int]
db__send_procedure_not_implemented.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 288
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_procedure_ok'):
continue
db__send_procedure_ok = _lib.db__send_procedure_ok
db__send_procedure_ok.argtypes = [c_int]
db__send_procedure_ok.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 289
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short'):
continue
db__send_short = _lib.db__send_short
db__send_short.argtypes = [c_int]
db__send_short.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 290
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_short_array'):
continue
db__send_short_array = _lib.db__send_short_array
db__send_short_array.argtypes = [POINTER(c_short), c_int]
db__send_short_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 291
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string'):
continue
db__send_string = _lib.db__send_string
db__send_string.argtypes = [POINTER(dbString)]
db__send_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 292
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_string_array'):
continue
db__send_string_array = _lib.db__send_string_array
db__send_string_array.argtypes = [POINTER(dbString), c_int]
db__send_string_array.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 293
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_success'):
continue
db__send_success = _lib.db__send_success
db__send_success.argtypes = []
db__send_success.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 294
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_data'):
continue
db__send_table_data = _lib.db__send_table_data
db__send_table_data.argtypes = [POINTER(dbTable)]
db__send_table_data.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 295
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_table_definition'):
continue
db__send_table_definition = _lib.db__send_table_definition
db__send_table_definition.argtypes = [POINTER(dbTable)]
db__send_table_definition.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 296
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_token'):
continue
db__send_token = _lib.db__send_token
db__send_token.argtypes = [POINTER(dbToken)]
db__send_token.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 297
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__send_value'):
continue
db__send_value = _lib.db__send_value
db__send_value.argtypes = [POINTER(dbValue), c_int]
db__send_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 298
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_CatValArray'):
continue
db_select_CatValArray = _lib.db_select_CatValArray
db_select_CatValArray.argtypes = [POINTER(dbDriver), String, String, String, String, POINTER(dbCatValArray)]
db_select_CatValArray.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 301
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_int'):
continue
db_select_int = _lib.db_select_int
db_select_int.argtypes = [POINTER(dbDriver), String, String, String, POINTER(POINTER(c_int))]
db_select_int.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 303
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_select_value'):
continue
db_select_value = _lib.db_select_value
db_select_value.argtypes = [POINTER(dbDriver), String, String, c_int, String, POINTER(dbValue)]
db_select_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 305
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_description'):
continue
db_set_column_description = _lib.db_set_column_description
db_set_column_description.argtypes = [POINTER(dbColumn), String]
db_set_column_description.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 306
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_defined_default_value'):
continue
db_set_column_has_defined_default_value = _lib.db_set_column_has_defined_default_value
db_set_column_has_defined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_defined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 307
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_has_undefined_default_value'):
continue
db_set_column_has_undefined_default_value = _lib.db_set_column_has_undefined_default_value
db_set_column_has_undefined_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_has_undefined_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 308
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_host_type'):
continue
db_set_column_host_type = _lib.db_set_column_host_type
db_set_column_host_type.argtypes = [POINTER(dbColumn), c_int]
db_set_column_host_type.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 309
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_length'):
continue
db_set_column_length = _lib.db_set_column_length
db_set_column_length.argtypes = [POINTER(dbColumn), c_int]
db_set_column_length.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 310
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_name'):
continue
db_set_column_name = _lib.db_set_column_name
db_set_column_name.argtypes = [POINTER(dbColumn), String]
db_set_column_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 311
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_null_allowed'):
continue
db_set_column_null_allowed = _lib.db_set_column_null_allowed
db_set_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_set_column_null_allowed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 312
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_precision'):
continue
db_set_column_precision = _lib.db_set_column_precision
db_set_column_precision.argtypes = [POINTER(dbColumn), c_int]
db_set_column_precision.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 313
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_scale'):
continue
db_set_column_scale = _lib.db_set_column_scale
db_set_column_scale.argtypes = [POINTER(dbColumn), c_int]
db_set_column_scale.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 314
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_select_priv_granted'):
continue
db_set_column_select_priv_granted = _lib.db_set_column_select_priv_granted
db_set_column_select_priv_granted.argtypes = [POINTER(dbColumn)]
db_set_column_select_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 315
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_select_priv_not_granted'):
continue
db_set_column_select_priv_not_granted = _lib.db_set_column_select_priv_not_granted
db_set_column_select_priv_not_granted.argtypes = [POINTER(dbColumn)]
db_set_column_select_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 316
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_sqltype'):
continue
db_set_column_sqltype = _lib.db_set_column_sqltype
db_set_column_sqltype.argtypes = [POINTER(dbColumn), c_int]
db_set_column_sqltype.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 317
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_update_priv_granted'):
continue
db_set_column_update_priv_granted = _lib.db_set_column_update_priv_granted
db_set_column_update_priv_granted.argtypes = [POINTER(dbColumn)]
db_set_column_update_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 318
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_update_priv_not_granted'):
continue
db_set_column_update_priv_not_granted = _lib.db_set_column_update_priv_not_granted
db_set_column_update_priv_not_granted.argtypes = [POINTER(dbColumn)]
db_set_column_update_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 319
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_column_use_default_value'):
continue
db_set_column_use_default_value = _lib.db_set_column_use_default_value
db_set_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_set_column_use_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 320
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_connection'):
continue
db_set_connection = _lib.db_set_connection
db_set_connection.argtypes = [POINTER(dbConnection)]
db_set_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 321
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_column_flag'):
continue
db_set_cursor_column_flag = _lib.db_set_cursor_column_flag
db_set_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_column_flag.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 322
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_column_for_update'):
continue
db_set_cursor_column_for_update = _lib.db_set_cursor_column_for_update
db_set_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_column_for_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 323
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode'):
continue
db_set_cursor_mode = _lib.db_set_cursor_mode
db_set_cursor_mode.argtypes = [POINTER(dbCursor), c_int]
db_set_cursor_mode.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 324
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode_insensitive'):
continue
db_set_cursor_mode_insensitive = _lib.db_set_cursor_mode_insensitive
db_set_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_set_cursor_mode_insensitive.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 325
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_mode_scroll'):
continue
db_set_cursor_mode_scroll = _lib.db_set_cursor_mode_scroll
db_set_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_set_cursor_mode_scroll.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 326
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_table'):
continue
db_set_cursor_table = _lib.db_set_cursor_table
db_set_cursor_table.argtypes = [POINTER(dbCursor), POINTER(dbTable)]
db_set_cursor_table.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 327
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_token'):
continue
db_set_cursor_token = _lib.db_set_cursor_token
db_set_cursor_token.argtypes = [POINTER(dbCursor), dbToken]
db_set_cursor_token.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 328
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_insert'):
continue
db_set_cursor_type_insert = _lib.db_set_cursor_type_insert
db_set_cursor_type_insert.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_insert.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 329
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_readonly'):
continue
db_set_cursor_type_readonly = _lib.db_set_cursor_type_readonly
db_set_cursor_type_readonly.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_readonly.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 330
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_cursor_type_update'):
continue
db_set_cursor_type_update = _lib.db_set_cursor_type_update
db_set_cursor_type_update.argtypes = [POINTER(dbCursor)]
db_set_cursor_type_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 331
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_default_connection'):
continue
db_set_default_connection = _lib.db_set_default_connection
db_set_default_connection.argtypes = []
db_set_default_connection.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 332
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_error_who'):
continue
db_set_error_who = _lib.db_set_error_who
db_set_error_who.argtypes = [String]
db_set_error_who.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 333
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_handle'):
continue
db_set_handle = _lib.db_set_handle
db_set_handle.argtypes = [POINTER(dbHandle), String, String]
db_set_handle.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 334
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_error_handler_driver'):
continue
db_set_error_handler_driver = _lib.db_set_error_handler_driver
db_set_error_handler_driver.argtypes = [POINTER(dbDriver)]
db_set_error_handler_driver.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 335
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_error_handler_driver'):
continue
db_unset_error_handler_driver = _lib.db_unset_error_handler_driver
db_unset_error_handler_driver.argtypes = [POINTER(dbDriver)]
db_unset_error_handler_driver.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 336
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_column_name'):
continue
db_set_index_column_name = _lib.db_set_index_column_name
db_set_index_column_name.argtypes = [POINTER(dbIndex), c_int, String]
db_set_index_column_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 338
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_name'):
continue
db_set_index_name = _lib.db_set_index_name
db_set_index_name.argtypes = [POINTER(dbIndex), String]
db_set_index_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 339
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_table_name'):
continue
db_set_index_table_name = _lib.db_set_index_table_name
db_set_index_table_name.argtypes = [POINTER(dbIndex), String]
db_set_index_table_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 340
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_type_non_unique'):
continue
db_set_index_type_non_unique = _lib.db_set_index_type_non_unique
db_set_index_type_non_unique.argtypes = [POINTER(dbIndex)]
db_set_index_type_non_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 341
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_index_type_unique'):
continue
db_set_index_type_unique = _lib.db_set_index_type_unique
db_set_index_type_unique.argtypes = [POINTER(dbIndex)]
db_set_index_type_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 342
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__set_protocol_fds'):
continue
db__set_protocol_fds = _lib.db__set_protocol_fds
db__set_protocol_fds.argtypes = [POINTER(FILE), POINTER(FILE)]
db__set_protocol_fds.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 343
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_string'):
continue
db_set_string = _lib.db_set_string
db_set_string.argtypes = [POINTER(dbString), String]
db_set_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 344
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_string_no_copy'):
continue
db_set_string_no_copy = _lib.db_set_string_no_copy
db_set_string_no_copy.argtypes = [POINTER(dbString), String]
db_set_string_no_copy.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 345
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_column'):
continue
db_set_table_column = _lib.db_set_table_column
db_set_table_column.argtypes = [POINTER(dbTable), c_int, POINTER(dbColumn)]
db_set_table_column.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 346
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_delete_priv_granted'):
continue
db_set_table_delete_priv_granted = _lib.db_set_table_delete_priv_granted
db_set_table_delete_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_delete_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 347
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_delete_priv_not_granted'):
continue
db_set_table_delete_priv_not_granted = _lib.db_set_table_delete_priv_not_granted
db_set_table_delete_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_delete_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 348
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_description'):
continue
db_set_table_description = _lib.db_set_table_description
db_set_table_description.argtypes = [POINTER(dbTable), String]
db_set_table_description.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 349
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_insert_priv_granted'):
continue
db_set_table_insert_priv_granted = _lib.db_set_table_insert_priv_granted
db_set_table_insert_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_insert_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 350
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_insert_priv_not_granted'):
continue
db_set_table_insert_priv_not_granted = _lib.db_set_table_insert_priv_not_granted
db_set_table_insert_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_insert_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 351
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_name'):
continue
db_set_table_name = _lib.db_set_table_name
db_set_table_name.argtypes = [POINTER(dbTable), String]
db_set_table_name.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 352
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_select_priv_granted'):
continue
db_set_table_select_priv_granted = _lib.db_set_table_select_priv_granted
db_set_table_select_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_select_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 353
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_select_priv_not_granted'):
continue
db_set_table_select_priv_not_granted = _lib.db_set_table_select_priv_not_granted
db_set_table_select_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_select_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 354
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_update_priv_granted'):
continue
db_set_table_update_priv_granted = _lib.db_set_table_update_priv_granted
db_set_table_update_priv_granted.argtypes = [POINTER(dbTable)]
db_set_table_update_priv_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 355
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_table_update_priv_not_granted'):
continue
db_set_table_update_priv_not_granted = _lib.db_set_table_update_priv_not_granted
db_set_table_update_priv_not_granted.argtypes = [POINTER(dbTable)]
db_set_table_update_priv_not_granted.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 356
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_datetime_current'):
continue
db_set_value_datetime_current = _lib.db_set_value_datetime_current
db_set_value_datetime_current.argtypes = [POINTER(dbValue)]
db_set_value_datetime_current.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 357
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_datetime_not_current'):
continue
db_set_value_datetime_not_current = _lib.db_set_value_datetime_not_current
db_set_value_datetime_not_current.argtypes = [POINTER(dbValue)]
db_set_value_datetime_not_current.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 358
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_day'):
continue
db_set_value_day = _lib.db_set_value_day
db_set_value_day.argtypes = [POINTER(dbValue), c_int]
db_set_value_day.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 359
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_double'):
continue
db_set_value_double = _lib.db_set_value_double
db_set_value_double.argtypes = [POINTER(dbValue), c_double]
db_set_value_double.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 360
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_hour'):
continue
db_set_value_hour = _lib.db_set_value_hour
db_set_value_hour.argtypes = [POINTER(dbValue), c_int]
db_set_value_hour.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 361
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_int'):
continue
db_set_value_int = _lib.db_set_value_int
db_set_value_int.argtypes = [POINTER(dbValue), c_int]
db_set_value_int.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 362
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_minute'):
continue
db_set_value_minute = _lib.db_set_value_minute
db_set_value_minute.argtypes = [POINTER(dbValue), c_int]
db_set_value_minute.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 363
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_month'):
continue
db_set_value_month = _lib.db_set_value_month
db_set_value_month.argtypes = [POINTER(dbValue), c_int]
db_set_value_month.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 364
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_not_null'):
continue
db_set_value_not_null = _lib.db_set_value_not_null
db_set_value_not_null.argtypes = [POINTER(dbValue)]
db_set_value_not_null.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 365
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_null'):
continue
db_set_value_null = _lib.db_set_value_null
db_set_value_null.argtypes = [POINTER(dbValue)]
db_set_value_null.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 366
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_seconds'):
continue
db_set_value_seconds = _lib.db_set_value_seconds
db_set_value_seconds.argtypes = [POINTER(dbValue), c_double]
db_set_value_seconds.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 367
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_string'):
continue
db_set_value_string = _lib.db_set_value_string
db_set_value_string.argtypes = [POINTER(dbValue), String]
db_set_value_string.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 368
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_value_year'):
continue
db_set_value_year = _lib.db_set_value_year
db_set_value_year.argtypes = [POINTER(dbValue), c_int]
db_set_value_year.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 369
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_shutdown_driver'):
continue
db_shutdown_driver = _lib.db_shutdown_driver
db_shutdown_driver.argtypes = [POINTER(dbDriver)]
db_shutdown_driver.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 370
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sqltype_name'):
continue
db_sqltype_name = _lib.db_sqltype_name
db_sqltype_name.argtypes = [c_int]
db_sqltype_name.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 371
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sqltype_to_Ctype'):
continue
db_sqltype_to_Ctype = _lib.db_sqltype_to_Ctype
db_sqltype_to_Ctype.argtypes = [c_int]
db_sqltype_to_Ctype.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 372
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_start_driver'):
continue
db_start_driver = _lib.db_start_driver
db_start_driver.argtypes = [String]
db_start_driver.restype = POINTER(dbDriver)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 373
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_start_driver_open_database'):
continue
db_start_driver_open_database = _lib.db_start_driver_open_database
db_start_driver_open_database.argtypes = [String, String]
db_start_driver_open_database.restype = POINTER(dbDriver)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 374
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__start_procedure_call'):
continue
db__start_procedure_call = _lib.db__start_procedure_call
db__start_procedure_call.argtypes = [c_int]
db__start_procedure_call.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 375
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_store'):
continue
db_store = _lib.db_store
db_store.argtypes = [String]
if sizeof(c_int) == sizeof(c_void_p):
db_store.restype = ReturnString
else:
db_store.restype = String
db_store.errcheck = ReturnString
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 376
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_strip'):
continue
db_strip = _lib.db_strip
db_strip.argtypes = [String]
db_strip.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 377
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_syserror'):
continue
db_syserror = _lib.db_syserror
db_syserror.argtypes = [String]
db_syserror.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 378
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_table_exists'):
continue
db_table_exists = _lib.db_table_exists
db_table_exists.argtypes = [String, String, String]
db_table_exists.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 380
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_default_value'):
continue
db_test_column_has_default_value = _lib.db_test_column_has_default_value
db_test_column_has_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 381
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_defined_default_value'):
continue
db_test_column_has_defined_default_value = _lib.db_test_column_has_defined_default_value
db_test_column_has_defined_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_defined_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 382
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_has_undefined_default_value'):
continue
db_test_column_has_undefined_default_value = _lib.db_test_column_has_undefined_default_value
db_test_column_has_undefined_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_has_undefined_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 383
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_null_allowed'):
continue
db_test_column_null_allowed = _lib.db_test_column_null_allowed
db_test_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_test_column_null_allowed.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 384
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_column_use_default_value'):
continue
db_test_column_use_default_value = _lib.db_test_column_use_default_value
db_test_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_test_column_use_default_value.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 385
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_any_column_flag'):
continue
db_test_cursor_any_column_flag = _lib.db_test_cursor_any_column_flag
db_test_cursor_any_column_flag.argtypes = [POINTER(dbCursor)]
db_test_cursor_any_column_flag.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 386
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_any_column_for_update'):
continue
db_test_cursor_any_column_for_update = _lib.db_test_cursor_any_column_for_update
db_test_cursor_any_column_for_update.argtypes = [POINTER(dbCursor)]
db_test_cursor_any_column_for_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 387
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_column_flag'):
continue
db_test_cursor_column_flag = _lib.db_test_cursor_column_flag
db_test_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_test_cursor_column_flag.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 388
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_column_for_update'):
continue
db_test_cursor_column_for_update = _lib.db_test_cursor_column_for_update
db_test_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_test_cursor_column_for_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 389
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_mode_insensitive'):
continue
db_test_cursor_mode_insensitive = _lib.db_test_cursor_mode_insensitive
db_test_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_test_cursor_mode_insensitive.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 390
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_mode_scroll'):
continue
db_test_cursor_mode_scroll = _lib.db_test_cursor_mode_scroll
db_test_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_test_cursor_mode_scroll.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 391
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_fetch'):
continue
db_test_cursor_type_fetch = _lib.db_test_cursor_type_fetch
db_test_cursor_type_fetch.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_fetch.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 392
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_insert'):
continue
db_test_cursor_type_insert = _lib.db_test_cursor_type_insert
db_test_cursor_type_insert.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_insert.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 393
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_cursor_type_update'):
continue
db_test_cursor_type_update = _lib.db_test_cursor_type_update
db_test_cursor_type_update.argtypes = [POINTER(dbCursor)]
db_test_cursor_type_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 394
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db__test_database_open'):
continue
db__test_database_open = _lib.db__test_database_open
db__test_database_open.argtypes = []
db__test_database_open.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 395
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_index_type_unique'):
continue
db_test_index_type_unique = _lib.db_test_index_type_unique
db_test_index_type_unique.argtypes = [POINTER(dbIndex)]
db_test_index_type_unique.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 396
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_value_datetime_current'):
continue
db_test_value_datetime_current = _lib.db_test_value_datetime_current
db_test_value_datetime_current.argtypes = [POINTER(dbValue)]
db_test_value_datetime_current.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 397
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_test_value_isnull'):
continue
db_test_value_isnull = _lib.db_test_value_isnull
db_test_value_isnull.argtypes = [POINTER(dbValue)]
db_test_value_isnull.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 398
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_has_default_value'):
continue
db_unset_column_has_default_value = _lib.db_unset_column_has_default_value
db_unset_column_has_default_value.argtypes = [POINTER(dbColumn)]
db_unset_column_has_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 399
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_null_allowed'):
continue
db_unset_column_null_allowed = _lib.db_unset_column_null_allowed
db_unset_column_null_allowed.argtypes = [POINTER(dbColumn)]
db_unset_column_null_allowed.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 400
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_column_use_default_value'):
continue
db_unset_column_use_default_value = _lib.db_unset_column_use_default_value
db_unset_column_use_default_value.argtypes = [POINTER(dbColumn)]
db_unset_column_use_default_value.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 401
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_column_flag'):
continue
db_unset_cursor_column_flag = _lib.db_unset_cursor_column_flag
db_unset_cursor_column_flag.argtypes = [POINTER(dbCursor), c_int]
db_unset_cursor_column_flag.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 402
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_column_for_update'):
continue
db_unset_cursor_column_for_update = _lib.db_unset_cursor_column_for_update
db_unset_cursor_column_for_update.argtypes = [POINTER(dbCursor), c_int]
db_unset_cursor_column_for_update.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 403
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode'):
continue
db_unset_cursor_mode = _lib.db_unset_cursor_mode
db_unset_cursor_mode.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 404
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode_insensitive'):
continue
db_unset_cursor_mode_insensitive = _lib.db_unset_cursor_mode_insensitive
db_unset_cursor_mode_insensitive.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode_insensitive.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 405
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_unset_cursor_mode_scroll'):
continue
db_unset_cursor_mode_scroll = _lib.db_unset_cursor_mode_scroll
db_unset_cursor_mode_scroll.argtypes = [POINTER(dbCursor)]
db_unset_cursor_mode_scroll.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 406
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_update'):
continue
db_update = _lib.db_update
db_update.argtypes = [POINTER(dbCursor)]
db_update.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 407
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_gversion'):
continue
db_gversion = _lib.db_gversion
db_gversion.argtypes = [POINTER(dbDriver), POINTER(dbString), POINTER(dbString)]
db_gversion.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 409
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_whoami'):
continue
db_whoami = _lib.db_whoami
db_whoami.argtypes = []
db_whoami.restype = c_char_p
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 410
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_zero'):
continue
db_zero = _lib.db_zero
db_zero.argtypes = [POINTER(None), c_int]
db_zero.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 411
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_zero_string'):
continue
db_zero_string = _lib.db_zero_string
db_zero_string.argtypes = [POINTER(dbString)]
db_zero_string.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 412
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_sizeof_string'):
continue
db_sizeof_string = _lib.db_sizeof_string
db_sizeof_string.argtypes = [POINTER(dbString)]
db_sizeof_string.restype = c_uint
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 413
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_login'):
continue
db_set_login = _lib.db_set_login
db_set_login.argtypes = [String, String, String, String]
db_set_login.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 414
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_set_login2'):
continue
db_set_login2 = _lib.db_set_login2
db_set_login2.argtypes = [String, String, String, String, String, String, c_int]
db_set_login2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 416
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login'):
continue
db_get_login = _lib.db_get_login
db_get_login.argtypes = [String, String, POINTER(POINTER(c_char)), POINTER(POINTER(c_char))]
db_get_login.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 417
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login2'):
continue
db_get_login2 = _lib.db_get_login2
db_get_login2.argtypes = [String, String, POINTER(POINTER(c_char)), POINTER(POINTER(c_char)), POINTER(POINTER(c_char)), POINTER(POINTER(c_char))]
db_get_login2.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/dbmi.h: 419
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'db_get_login_dump'):
continue
db_get_login_dump = _lib.db_get_login_dump
db_get_login_dump.argtypes = [POINTER(FILE)]
db_get_login_dump.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 18
try:
DB_VERSION = '0'
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 21
try:
DB_DEFAULT_DRIVER = 'sqlite'
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 27
try:
DB_PROC_VERSION = 999
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 29
try:
DB_PROC_CLOSE_DATABASE = 101
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 30
try:
DB_PROC_CREATE_DATABASE = 102
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 31
try:
DB_PROC_DELETE_DATABASE = 103
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 32
try:
DB_PROC_FIND_DATABASE = 104
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 33
try:
DB_PROC_LIST_DATABASES = 105
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 34
try:
DB_PROC_OPEN_DATABASE = 106
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 35
try:
DB_PROC_SHUTDOWN_DRIVER = 107
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 37
try:
DB_PROC_CLOSE_CURSOR = 201
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 38
try:
DB_PROC_DELETE = 202
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 39
try:
DB_PROC_FETCH = 203
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 40
try:
DB_PROC_INSERT = 204
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 41
try:
DB_PROC_OPEN_INSERT_CURSOR = 205
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 42
try:
DB_PROC_OPEN_SELECT_CURSOR = 206
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 43
try:
DB_PROC_OPEN_UPDATE_CURSOR = 207
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 44
try:
DB_PROC_UPDATE = 208
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 45
try:
DB_PROC_ROWS = 209
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 46
try:
DB_PROC_BIND_UPDATE = 220
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 47
try:
DB_PROC_BIND_INSERT = 221
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 49
try:
DB_PROC_EXECUTE_IMMEDIATE = 301
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 50
try:
DB_PROC_BEGIN_TRANSACTION = 302
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 51
try:
DB_PROC_COMMIT_TRANSACTION = 303
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 53
try:
DB_PROC_CREATE_TABLE = 401
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 54
try:
DB_PROC_DESCRIBE_TABLE = 402
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 55
try:
DB_PROC_DROP_TABLE = 403
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 56
try:
DB_PROC_LIST_TABLES = 404
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 57
try:
DB_PROC_ADD_COLUMN = 405
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 58
try:
DB_PROC_DROP_COLUMN = 406
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 59
try:
DB_PROC_GRANT_ON_TABLE = 407
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 61
try:
DB_PROC_CREATE_INDEX = 701
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 62
try:
DB_PROC_LIST_INDEXES = 702
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 63
try:
DB_PROC_DROP_INDEX = 703
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 66
try:
DB_PERM_R = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 67
try:
DB_PERM_W = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 68
try:
DB_PERM_X = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 71
try:
DB_OK = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 72
try:
DB_FAILED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 73
try:
DB_NOPROC = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 74
try:
DB_MEMORY_ERR = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 75
try:
DB_PROTOCOL_ERR = (-2)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 76
try:
DB_EOF = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 79
try:
DB_SQL_TYPE_UNKNOWN = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 81
try:
DB_SQL_TYPE_CHARACTER = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 82
try:
DB_SQL_TYPE_SMALLINT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 83
try:
DB_SQL_TYPE_INTEGER = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 84
try:
DB_SQL_TYPE_REAL = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 85
try:
DB_SQL_TYPE_DOUBLE_PRECISION = 6
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 86
try:
DB_SQL_TYPE_DECIMAL = 7
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 87
try:
DB_SQL_TYPE_NUMERIC = 8
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 88
try:
DB_SQL_TYPE_DATE = 9
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 89
try:
DB_SQL_TYPE_TIME = 10
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 90
try:
DB_SQL_TYPE_TIMESTAMP = 11
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 91
try:
DB_SQL_TYPE_INTERVAL = 12
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 92
try:
DB_SQL_TYPE_TEXT = 13
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 94
try:
DB_SQL_TYPE_SERIAL = 21
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 97
try:
DB_YEAR = 16384
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 98
try:
DB_MONTH = 8192
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 99
try:
DB_DAY = 4096
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 100
try:
DB_HOUR = 2048
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 101
try:
DB_MINUTE = 1024
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 102
try:
DB_SECOND = 512
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 103
try:
DB_FRACTION = 256
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 104
try:
DB_DATETIME_MASK = 65280
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 107
try:
DB_C_TYPE_STRING = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 108
try:
DB_C_TYPE_INT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 109
try:
DB_C_TYPE_DOUBLE = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 110
try:
DB_C_TYPE_DATETIME = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 113
try:
DB_CURRENT = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 114
try:
DB_NEXT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 115
try:
DB_PREVIOUS = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 116
try:
DB_FIRST = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 117
try:
DB_LAST = 5
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 120
try:
DB_READONLY = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 121
try:
DB_INSERT = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 122
try:
DB_UPDATE = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 123
try:
DB_SEQUENTIAL = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 124
try:
DB_SCROLL = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 125
try:
DB_INSENSITIVE = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 128
try:
DB_GRANTED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 129
try:
DB_NOT_GRANTED = (-1)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 132
try:
DB_PRIV_SELECT = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 134
try:
DB_GROUP = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 135
try:
DB_PUBLIC = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 138
try:
DB_DEFINED = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 139
try:
DB_UNDEFINED = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 142
try:
DB_SQL_MAX = 8192
except:
pass
_db_string = struct__db_string # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 151
_dbmscap = struct__dbmscap # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 153
_db_dirent = struct__db_dirent # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 166
_db_driver = struct__db_driver # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 173
_db_handle = struct__db_handle # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 180
_db_date_time = struct__db_date_time # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 191
_db_value = struct__db_value # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 200
_db_column = struct__db_column # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 218
_db_table = struct__db_table # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 228
_db_cursor = struct__db_cursor # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 238
_db_index = struct__db_index # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 247
_db_driver_state = struct__db_driver_state # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 256
_db_connection = struct__db_connection # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/dbmi.h: 304
# No inserted files | 0.174656 | 0.053403 |
import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_bit_rate_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', '0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
output_csv_file_path = 'D:\\acl-dev\\tools\\graph_generation\\bit_rates.csv'
output_csv_data = []
output_csv_headers = ['Bit Rate']
output_csv_data.append(['0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'])
for entry in input_sjson_data['inputs']:
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=input_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding = 'utf-8')
csv_data = csv_data[best_variable_data_mask]
# Strip algorithm name
output_csv_data.append(csv_data[0].tolist()[1:])
output_csv_headers.append(entry['header'])
output_csv_data = numpy.column_stack(output_csv_data)
with open(output_csv_file_path, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data, delimiter=',', fmt=('%s')) | tools/graph_generation/gen_bit_rate_stats.py | import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_bit_rate_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', '0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
output_csv_file_path = 'D:\\acl-dev\\tools\\graph_generation\\bit_rates.csv'
output_csv_data = []
output_csv_headers = ['Bit Rate']
output_csv_data.append(['0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'])
for entry in input_sjson_data['inputs']:
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=input_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding = 'utf-8')
csv_data = csv_data[best_variable_data_mask]
# Strip algorithm name
output_csv_data.append(csv_data[0].tolist()[1:])
output_csv_headers.append(entry['header'])
output_csv_data = numpy.column_stack(output_csv_data)
with open(output_csv_file_path, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data, delimiter=',', fmt=('%s')) | 0.134975 | 0.115661 |
import signal
import time
import datetime
import sys
from mqtt_client_service import MqttClientService
from sense_service import SenseService
import logging
try:
from enviro_service import EnviroService
except:
logging.warning("EnviroService not ready...")
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("""weather.py - Print readings from the BME280 weather sensor.
Press Ctrl+C to exit!
""")
is_shutdown = False
def stop(sig, frame):
logging.info(f"SIGTERM at {datetime.datetime.now()}")
global is_shutdown
is_shutdown = True
def ignore(sig, frsma):
logging.info(f"SIGHUP at {datetime.datetime.now()}")
signal.signal(signal.SIGTERM, stop)
signal.signal(signal.SIGHUP, ignore)
logging.info(f"START at {datetime.datetime.now()}")
client = MqttClientService()
# Try connect Mqtt
device_ready=False
client=None
while not device_ready:
try:
client = MqttClientService()
client.connect()
device_ready=True
except:
logging.warning("Device not yet ready")
time.sleep(1)
def warm_up_service(service):
logging.info(f"Performing warmup for service {service}")
for i in range(10):
service.read_environment_data()
time.sleep(0.2)
# Try crate Sense Service for SenseHat
sense_service = None
try:
sense_service = SenseService()
except:
logging.warning("SenseHat not available ...")
# Try crate Sense Service for SenseHat
enviro_service = None
try:
enviro_service = EnviroService()
except:
logging.warning("EnviroHat not available ...")
try:
second = 0
period = 60 * 5
splash_period = 60
while not is_shutdown:
if second % period == 0:
# Sense Service
if sense_service is not None:
warm_up_service(sense_service)
environment_data = sense_service.read_environment_data()
client.send_environment_data(environment_data)
env_value_t = round(environment_data['temperature'], 1)
sense_service.show_red_message(f"{env_value_t}")
if second % splash_period == 0:
sense_service.draw_sense_splash()
if enviro_service is not None:
warm_up_service(enviro_service)
environment_data = enviro_service.read_environment_data()
client.send_environment_data(environment_data)
time.sleep(1)
second += 1
except KeyboardInterrupt as e:
logging.info("Terminating sense service")
if sense_service is not None:
sense_service.clear()
finally:
if sense_service is not None:
sense_service.clear()
client.disconnect() | environment/sense-enviro-service.py | import signal
import time
import datetime
import sys
from mqtt_client_service import MqttClientService
from sense_service import SenseService
import logging
try:
from enviro_service import EnviroService
except:
logging.warning("EnviroService not ready...")
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
logging.info("""weather.py - Print readings from the BME280 weather sensor.
Press Ctrl+C to exit!
""")
is_shutdown = False
def stop(sig, frame):
logging.info(f"SIGTERM at {datetime.datetime.now()}")
global is_shutdown
is_shutdown = True
def ignore(sig, frsma):
logging.info(f"SIGHUP at {datetime.datetime.now()}")
signal.signal(signal.SIGTERM, stop)
signal.signal(signal.SIGHUP, ignore)
logging.info(f"START at {datetime.datetime.now()}")
client = MqttClientService()
# Try connect Mqtt
device_ready=False
client=None
while not device_ready:
try:
client = MqttClientService()
client.connect()
device_ready=True
except:
logging.warning("Device not yet ready")
time.sleep(1)
def warm_up_service(service):
logging.info(f"Performing warmup for service {service}")
for i in range(10):
service.read_environment_data()
time.sleep(0.2)
# Try crate Sense Service for SenseHat
sense_service = None
try:
sense_service = SenseService()
except:
logging.warning("SenseHat not available ...")
# Try crate Sense Service for SenseHat
enviro_service = None
try:
enviro_service = EnviroService()
except:
logging.warning("EnviroHat not available ...")
try:
second = 0
period = 60 * 5
splash_period = 60
while not is_shutdown:
if second % period == 0:
# Sense Service
if sense_service is not None:
warm_up_service(sense_service)
environment_data = sense_service.read_environment_data()
client.send_environment_data(environment_data)
env_value_t = round(environment_data['temperature'], 1)
sense_service.show_red_message(f"{env_value_t}")
if second % splash_period == 0:
sense_service.draw_sense_splash()
if enviro_service is not None:
warm_up_service(enviro_service)
environment_data = enviro_service.read_environment_data()
client.send_environment_data(environment_data)
time.sleep(1)
second += 1
except KeyboardInterrupt as e:
logging.info("Terminating sense service")
if sense_service is not None:
sense_service.clear()
finally:
if sense_service is not None:
sense_service.clear()
client.disconnect() | 0.25488 | 0.076236 |
import datetime as dt
import logging
from dataclasses import dataclass
import requests
from .const import *
from .Token import Token
from .Vehicle import Vehicle
_LOGGER = logging.getLogger(__name__)
@dataclass
class ClimateRequestOptions:
set_temp: float = None
duration: int = None
defrost: bool = None
climate: bool = None
heating: int = None
front_left_seat: int = None
front_right_seat: int = None
rear_left_seat: int = None
rear_right_seat: int = None
class ApiImpl:
data_timezone = dt.timezone.utc
temperature_range = None
def __init__(self) -> None:
"""Initialize."""
self.last_action_tracked = False
self.supports_soc_range = True
def login(self, username: str, password: str) -> Token:
"""Login into cloud endpoints and return Token"""
pass
def get_vehicles(self, token: Token) -> list[Vehicle]:
"""Return all Vehicle instances for a given Token"""
pass
def refresh_vehicles(self, token: Token, vehicles: list[Vehicle]) -> None:
"""Refresh the vehicle data provided in get_vehicles. Required for Kia USA as key is session specific"""
pass
def get_last_updated_at(self, value) -> dt.datetime:
"""Convert last updated value of vehicle into into datetime"""
pass
def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None:
"""Get cached vehicle data and update Vehicle instance with it"""
pass
def get_fresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
pass
def check_action_status(self, token: Token, vehicle: Vehicle, action_id: str):
"""Check if a previous placed call was successful"""
pass
def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
"""Triggers the system to contact the car and get fresh data"""
pass
def get_geocoded_location(self, lat, lon) -> dict:
email_parameter = ""
if self.use_email_with_geocode_api == True:
email_parameter = "&email=" + self.username
url = (
"https://nominatim.openstreetmap.org/reverse?lat="
+ str(lat)
+ "&lon="
+ str(lon)
+ "&format=json&addressdetails=1&zoom=18"
+ email_parameter
)
response = requests.get(url)
response = response.json()
return response
def lock_action(self, token: Token, vehicle: Vehicle, action: str) -> str:
"""Lock or unlocks a vehicle. Returns the tracking ID"""
pass
def start_climate(
self,
token: Token,
vehicle: Vehicle,
options: ClimateRequestOptions
) -> str:
"""Starts climate or remote start. Returns the tracking ID"""
pass
def stop_climate(self, token: Token, vehicle: Vehicle) -> str:
"""Stops climate or remote start. Returns the tracking ID"""
pass
def start_charge(self, token: Token, vehicle: Vehicle) -> str:
"""Starts charge. Returns the tracking ID"""
pass
def stop_charge(self, token: Token, vehicle: Vehicle) -> str:
"""Stops charge. Returns the tracking ID"""
pass
def set_charge_limits(
self, token: Token, vehicle: Vehicle, ac_limit: int, dc_limit: int
) -> str:
"""Sets charge limits. Returns the tracking ID"""
pass | hyundai_kia_connect_api/ApiImpl.py | import datetime as dt
import logging
from dataclasses import dataclass
import requests
from .const import *
from .Token import Token
from .Vehicle import Vehicle
_LOGGER = logging.getLogger(__name__)
@dataclass
class ClimateRequestOptions:
set_temp: float = None
duration: int = None
defrost: bool = None
climate: bool = None
heating: int = None
front_left_seat: int = None
front_right_seat: int = None
rear_left_seat: int = None
rear_right_seat: int = None
class ApiImpl:
data_timezone = dt.timezone.utc
temperature_range = None
def __init__(self) -> None:
"""Initialize."""
self.last_action_tracked = False
self.supports_soc_range = True
def login(self, username: str, password: str) -> Token:
"""Login into cloud endpoints and return Token"""
pass
def get_vehicles(self, token: Token) -> list[Vehicle]:
"""Return all Vehicle instances for a given Token"""
pass
def refresh_vehicles(self, token: Token, vehicles: list[Vehicle]) -> None:
"""Refresh the vehicle data provided in get_vehicles. Required for Kia USA as key is session specific"""
pass
def get_last_updated_at(self, value) -> dt.datetime:
"""Convert last updated value of vehicle into into datetime"""
pass
def update_vehicle_with_cached_state(self, token: Token, vehicle: Vehicle) -> None:
"""Get cached vehicle data and update Vehicle instance with it"""
pass
def get_fresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
pass
def check_action_status(self, token: Token, vehicle: Vehicle, action_id: str):
"""Check if a previous placed call was successful"""
pass
def force_refresh_vehicle_state(self, token: Token, vehicle: Vehicle) -> None:
"""Triggers the system to contact the car and get fresh data"""
pass
def get_geocoded_location(self, lat, lon) -> dict:
email_parameter = ""
if self.use_email_with_geocode_api == True:
email_parameter = "&email=" + self.username
url = (
"https://nominatim.openstreetmap.org/reverse?lat="
+ str(lat)
+ "&lon="
+ str(lon)
+ "&format=json&addressdetails=1&zoom=18"
+ email_parameter
)
response = requests.get(url)
response = response.json()
return response
def lock_action(self, token: Token, vehicle: Vehicle, action: str) -> str:
"""Lock or unlocks a vehicle. Returns the tracking ID"""
pass
def start_climate(
self,
token: Token,
vehicle: Vehicle,
options: ClimateRequestOptions
) -> str:
"""Starts climate or remote start. Returns the tracking ID"""
pass
def stop_climate(self, token: Token, vehicle: Vehicle) -> str:
"""Stops climate or remote start. Returns the tracking ID"""
pass
def start_charge(self, token: Token, vehicle: Vehicle) -> str:
"""Starts charge. Returns the tracking ID"""
pass
def stop_charge(self, token: Token, vehicle: Vehicle) -> str:
"""Stops charge. Returns the tracking ID"""
pass
def set_charge_limits(
self, token: Token, vehicle: Vehicle, ac_limit: int, dc_limit: int
) -> str:
"""Sets charge limits. Returns the tracking ID"""
pass | 0.612078 | 0.330876 |
import os
import argparse
import subprocess
from pathlib import Path
from rrap import concatenator
from rrap import indexer
from rrap import read_recruiter
from rrap import visualizer
def main():
c = Controller()
c.run()
class Controller:
def __init__(self):
self.p = argparse.ArgumentParser(prog="RRAP",
description="Run read recruitment on a set of cleaned fna files")
# argument groups
self.inputs = None
self.outputs = None
self.optional = None
self.subcommands = None
self.arg_groups = []
# args
self.args = None
# pipes
self.concatenator = None
self.indexer = None
self.read_recruiter = None
self.visualizer = None
# intermediate products
self.cat_file_path = None
self.index_dir_path = None
self.rpkm_heater_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rpkm_heater.py")
self.stats_dir_path = None
def run(self):
self.add_arguments()
print("---------making output dir if needed-------------")
self.set_output_dir()
self.cat_file_name = os.path.join(self.args.o, 'allgenomes_cat_{0}.fna'.format(self.args.n))
if not self.args.index_pass:
if self.args.crg:
self.cat_file_path = self.args.crg
else:
print("---------concatenating reference genomes-------------")
self.concatenator = concatenator.Concatenator(self.args)
self.cat_file_path = self.concatenator.concatenate()
print("---------indexing reference genomes-------------")
self.indexer = indexer.Indexer(self.args, os.path.join(self.index_dir_path, "val"), self.cat_file_path)
self.indexer.index()
else:
print("---------skipped indexing reference genomes-------------")
if not self.args.rr_pass:
print("---------read recruitment and data transform-------------")
self.read_recruiter = read_recruiter.ReadRecruiter(self.args, os.path.join(self.index_dir_path, "val"),
self.cat_file_path, self.stats_dir_path,
self.bam_dir_path)
self.read_recruiter.read_recruit()
else:
print("---------skipped read recruitment and data transform-------------")
if not self.args.vis_pass:
print("---------visualization-------------")
self.visualizer = visualizer.Visualizer(self.args, self.rpkm_heater_path, self.stats_dir_path)
self.visualizer.calculate_rpkm()
if self.args.extra_vis:
self.visualizer.plot_heatmaps()
else:
print("---------skipped visualization-------------")
def add_arguments(self):
# TODO specify argument groups
self.inputs = self.p.add_argument_group("## input arguments")
self.outputs = self.p.add_argument_group("## output arguments")
self.options = self.p.add_argument_group("## options")
self.arg_groups.extend([self.inputs, self.outputs, self.optional])
# Add the arguments
self.inputs.add_argument('-i', help='text file of all dir paths that contain cleaned metaG fna files',
required=True)
self.inputs.add_argument('-crg', help=' path for concatenated reference genome fa file',
required=False)
self.inputs.add_argument('-rg', help='input directory for reference genomes',
required=True)
self.outputs.add_argument('-o', help='output directory path',
required=True)
self.inputs.add_argument('-n', help='name of the project', required=True, metavar='project name')
self.inputs.add_argument('-sort_gen', help='txt file of sorted genomes (if --extra-vis flag is used)', required=False)
self.inputs.add_argument('-sort_samples', help='txt file of sorted samples (if --extra-vis flag is used)', required=False)
self.inputs.add_argument("--threads", help='number of available threads', required=False)
self.inputs.add_argument("-suffix", default="_pass_1.fastq",
help="everything in metaG file name that is after the acc for the forward (R1) read files \n"
"e.g. (-QUALITY_PASSED_R1.fastq for <sample_acc>-QUALITY_PASSED_R1.fastq) \n"
"Otherwise, RRAP assumes that the forward pass file name is formatted as <acc>_pass_1.fastq"
"NOTE: suffixes that contain a dash must specify '--' as an escape character e.g. '-suffix \"-- -QUALITY_PASSED_R1.fastq\"'")
# specify optional args
self.options.add_argument("--merge-contigs", default=False, dest='contig_merge',
action='store_true', help="Concatenate contigs under individual organisms")
self.options.add_argument("--skip-indexing", default=False, dest='index_pass',
action='store_true',
help='Specify if the indexing step has already been completed and can be skipped. \
If this flag is used, please check that bowtie2 index files exist e.g. \
<output_dir_path>/index_dir/<project_name>.x.bt2')
self.options.add_argument("--skip-rr", default=False, dest='rr_pass',
action='store_true',
help='Specify if the read recruitment step has already been completed and can be' \
'skipped. If this flag is used, please check that read recruitment files exist' \
'e.g. <output_dir_path>/')
self.options.add_argument("--skip-vis", default=False, dest='vis_pass',
action='store_true',
help='Specify if the visualization step can be skipped')
self.options.add_argument("--extra-vis", default=False, dest='extra_vis',
action='store_true',
help='create csv with normalized RPKM values (log10) and plot heatmap using normalized values')
self.args = self.p.parse_args()
def set_output_dir(self):
if self.args.o:
# create output dir and create inner stats and index dir
self.stats_dir_path = os.path.join(self.args.o, "stats", self.args.n)
self.index_dir_path = os.path.join(self.args.o, "index", self.args.n)
self.bam_dir_path = os.path.join(self.args.o, "bam", self.args.n)
# make output (if it does not exist)
if not os.path.isdir(self.args.o):
subprocess.run("mkdir " + self.args.o, shell=True)
# make stats, index, and bam dir
if not os.path.isdir(os.path.join(self.args.o, "stats")):
subprocess.run("mkdir " + os.path.join(self.args.o, "stats"), shell=True)
if not os.path.isdir(os.path.join(self.args.o, "index")):
subprocess.run("mkdir " + os.path.join(self.args.o, "index"), shell=True)
if not os.path.isdir(os.path.join(self.args.o, "bam")):
subprocess.run("mkdir " + os.path.join(self.args.o, "bam"), shell=True)
# make project name specific dirs
if not os.path.isdir(self.stats_dir_path):
subprocess.run("mkdir " + self.stats_dir_path, shell=True)
if not os.path.isdir(self.index_dir_path):
subprocess.run("mkdir " + self.index_dir_path, shell=True)
if not os.path.isdir(self.bam_dir_path):
subprocess.run("mkdir " + self.bam_dir_path, shell=True)
if __name__ == "__main__":
main() | src/rrap/controller.py | import os
import argparse
import subprocess
from pathlib import Path
from rrap import concatenator
from rrap import indexer
from rrap import read_recruiter
from rrap import visualizer
def main():
c = Controller()
c.run()
class Controller:
def __init__(self):
self.p = argparse.ArgumentParser(prog="RRAP",
description="Run read recruitment on a set of cleaned fna files")
# argument groups
self.inputs = None
self.outputs = None
self.optional = None
self.subcommands = None
self.arg_groups = []
# args
self.args = None
# pipes
self.concatenator = None
self.indexer = None
self.read_recruiter = None
self.visualizer = None
# intermediate products
self.cat_file_path = None
self.index_dir_path = None
self.rpkm_heater_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rpkm_heater.py")
self.stats_dir_path = None
def run(self):
self.add_arguments()
print("---------making output dir if needed-------------")
self.set_output_dir()
self.cat_file_name = os.path.join(self.args.o, 'allgenomes_cat_{0}.fna'.format(self.args.n))
if not self.args.index_pass:
if self.args.crg:
self.cat_file_path = self.args.crg
else:
print("---------concatenating reference genomes-------------")
self.concatenator = concatenator.Concatenator(self.args)
self.cat_file_path = self.concatenator.concatenate()
print("---------indexing reference genomes-------------")
self.indexer = indexer.Indexer(self.args, os.path.join(self.index_dir_path, "val"), self.cat_file_path)
self.indexer.index()
else:
print("---------skipped indexing reference genomes-------------")
if not self.args.rr_pass:
print("---------read recruitment and data transform-------------")
self.read_recruiter = read_recruiter.ReadRecruiter(self.args, os.path.join(self.index_dir_path, "val"),
self.cat_file_path, self.stats_dir_path,
self.bam_dir_path)
self.read_recruiter.read_recruit()
else:
print("---------skipped read recruitment and data transform-------------")
if not self.args.vis_pass:
print("---------visualization-------------")
self.visualizer = visualizer.Visualizer(self.args, self.rpkm_heater_path, self.stats_dir_path)
self.visualizer.calculate_rpkm()
if self.args.extra_vis:
self.visualizer.plot_heatmaps()
else:
print("---------skipped visualization-------------")
def add_arguments(self):
# TODO specify argument groups
self.inputs = self.p.add_argument_group("## input arguments")
self.outputs = self.p.add_argument_group("## output arguments")
self.options = self.p.add_argument_group("## options")
self.arg_groups.extend([self.inputs, self.outputs, self.optional])
# Add the arguments
self.inputs.add_argument('-i', help='text file of all dir paths that contain cleaned metaG fna files',
required=True)
self.inputs.add_argument('-crg', help=' path for concatenated reference genome fa file',
required=False)
self.inputs.add_argument('-rg', help='input directory for reference genomes',
required=True)
self.outputs.add_argument('-o', help='output directory path',
required=True)
self.inputs.add_argument('-n', help='name of the project', required=True, metavar='project name')
self.inputs.add_argument('-sort_gen', help='txt file of sorted genomes (if --extra-vis flag is used)', required=False)
self.inputs.add_argument('-sort_samples', help='txt file of sorted samples (if --extra-vis flag is used)', required=False)
self.inputs.add_argument("--threads", help='number of available threads', required=False)
self.inputs.add_argument("-suffix", default="_pass_1.fastq",
help="everything in metaG file name that is after the acc for the forward (R1) read files \n"
"e.g. (-QUALITY_PASSED_R1.fastq for <sample_acc>-QUALITY_PASSED_R1.fastq) \n"
"Otherwise, RRAP assumes that the forward pass file name is formatted as <acc>_pass_1.fastq"
"NOTE: suffixes that contain a dash must specify '--' as an escape character e.g. '-suffix \"-- -QUALITY_PASSED_R1.fastq\"'")
# specify optional args
self.options.add_argument("--merge-contigs", default=False, dest='contig_merge',
action='store_true', help="Concatenate contigs under individual organisms")
self.options.add_argument("--skip-indexing", default=False, dest='index_pass',
action='store_true',
help='Specify if the indexing step has already been completed and can be skipped. \
If this flag is used, please check that bowtie2 index files exist e.g. \
<output_dir_path>/index_dir/<project_name>.x.bt2')
self.options.add_argument("--skip-rr", default=False, dest='rr_pass',
action='store_true',
help='Specify if the read recruitment step has already been completed and can be' \
'skipped. If this flag is used, please check that read recruitment files exist' \
'e.g. <output_dir_path>/')
self.options.add_argument("--skip-vis", default=False, dest='vis_pass',
action='store_true',
help='Specify if the visualization step can be skipped')
self.options.add_argument("--extra-vis", default=False, dest='extra_vis',
action='store_true',
help='create csv with normalized RPKM values (log10) and plot heatmap using normalized values')
self.args = self.p.parse_args()
def set_output_dir(self):
if self.args.o:
# create output dir and create inner stats and index dir
self.stats_dir_path = os.path.join(self.args.o, "stats", self.args.n)
self.index_dir_path = os.path.join(self.args.o, "index", self.args.n)
self.bam_dir_path = os.path.join(self.args.o, "bam", self.args.n)
# make output (if it does not exist)
if not os.path.isdir(self.args.o):
subprocess.run("mkdir " + self.args.o, shell=True)
# make stats, index, and bam dir
if not os.path.isdir(os.path.join(self.args.o, "stats")):
subprocess.run("mkdir " + os.path.join(self.args.o, "stats"), shell=True)
if not os.path.isdir(os.path.join(self.args.o, "index")):
subprocess.run("mkdir " + os.path.join(self.args.o, "index"), shell=True)
if not os.path.isdir(os.path.join(self.args.o, "bam")):
subprocess.run("mkdir " + os.path.join(self.args.o, "bam"), shell=True)
# make project name specific dirs
if not os.path.isdir(self.stats_dir_path):
subprocess.run("mkdir " + self.stats_dir_path, shell=True)
if not os.path.isdir(self.index_dir_path):
subprocess.run("mkdir " + self.index_dir_path, shell=True)
if not os.path.isdir(self.bam_dir_path):
subprocess.run("mkdir " + self.bam_dir_path, shell=True)
if __name__ == "__main__":
main() | 0.257952 | 0.099865 |
from RecebeDados import RecebeDados;
from Sistema import Sistema;
from Problema import Problema;
from SaidaDados import SaidaDados;
from SaidaNewave import SaidaNewave;
from contextlib import suppress;
from coopr.pyomo import *;
from pyomo.environ import *;
from pyomo.opt import *;
import os, jsonpickle;
class Control:
def __init__(self, plan_dados, path, time):
# carrega a planilha e recebe o caminho dela
self.recebe_dados = RecebeDados(plan_dados);
self.caminho = path;
self.planilha = plan_dados;
self.start = time;
# carrega as configuracoes iniciais da planilha
print("Carregando Dados");
self.carregaInicio();
# inicializa o sistema
self.sin = Sistema(self.recebe_dados, self.tipoCombHidroEol);
print("Carregando Problema");
self.imprimeSeriesHidro();
# cria o problema passando os parametros do carregaInicio
self.problema = Problema(self.recebe_dados, self.sin, self.isRestPotHabilitada, self.isRestExpJanHabilitada, self.isPerpetHabilitada, self.fatorCarga, self.anoValidadeTemp, self.fatorValidadeTemp, self.isIntercambLimitado, self.subsFic);
# habilita o cplex
optsolver = SolverFactory("cplex", executable= "/opt/ibm/ILOG/CPLEX_Enterprise_Server129/CPLEX_Studio/cplex/bin/x86-64_linux/cplex");
print ("Modelo Criado");
self.problema.modelo.preprocess();
print ("Pre-process executado");
# configuracoes do solver
optsolver.options['mipgap'] = 0.005;
optsolver.options['mip_strategy_startalgorithm'] = 4;
optsolver.options['lpmethod'] = 4;
# congiguracao para manter reprodutibilidade entre casos
optsolver.options['parallel'] = 1;
# configuracao tentar evitar erros de memoria
optsolver.options['mip_strategy_file'] = 3;
optsolver.options['emphasis_memory'] = 'y';
optsolver.options['workmem'] = 12048;
print("Executando o CPLEX");
results = optsolver.solve(self.problema.modelo, load_solutions=True);#symbolic_solver_labels=True, tee=True);
print("Impressão de Resultados");
# escreve resultados em um txt
with open(self.caminho + "resultado.txt", "w") as saidaResul:
results.write(ostream=saidaResul);
# inicializa o objeto de saida de dados
self.saida_dados = SaidaDados(self.sin, self.problema, self.caminho, self.planilha, self.pastaCod, self.nomeSubs);
# inicializa o objeto de saida para o newave
self.saida_newave = SaidaNewave(self.recebe_dados, self.sin, self.problema, self.caminho, self.numSubs, self.subsNFic, self.subsFic, self.nomeSubs);
# relaxa o problema
self.problema.relaxar();
# chama o metodo para limpar os duais da planilha
self.saida_dados.limparDuais();
# faz a preparacao e impressao dos duais escolhidos atraves da aba inicial na planilha
# a letra passada como parametro eh um indicativo de qual dual deve ser impresso
if self.isImpresso[0]:
print("Resolvendo problema relaxado para Dual de Energia");
self.problema.prepararDualEnergia();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("E");
if self.isImpresso[1]:
print("Resolvendo problema relaxado para Dual de Potencia");
self.problema.prepararDualPotencia();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("P");
if self.isImpresso[2]:
print("Resolvendo problema relaxado para Dual Duplo");
self.problema.prepararDualDuplo();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("D");
self.saida_dados.imprimeLog(tempo_inicial = self.start);
return;
def carregaInicio(self):
# pega o numero de subsistemas na planilha geral
self.recebe_dados.defineAba("GERAL");
self.nsis = int(self.recebe_dados.pegaEscalar("G10"));
# verifica os parametros de impressao na aba inicial da planilha
# tem que vir antes da criacao do problema pois na aba estao contidos os fatores de carga
self.recebe_dados.defineAba("Inicial");
# limpa o diretorio Temp para evitar problemas de memoria se o flag estiver ativo
if (self.recebe_dados.pegaEscalar("O10")==1):
print("Limpando arquivos temporarios")
pastaTemp = "C:\\Users\\" + str(os.getlogin()) + "\\AppData\\Local\\Temp";
for filename in os.listdir(pastaTemp):
file_path = os.path.join(pastaTemp, filename);
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path);
elif os.path.isdir(file_path):
shutil.rmtree(file_path, ignore_errors=True);
except:
print("Um ou mais arquivos temporarios nao puderam ser excluidos");
# declara as variaveis que receberao a informacao das checkboxes e dos fatores na planilha
self.isImpresso = [False for i in range(0,3)];
self.isRestPotHabilitada = False;
self.isExpJsonHabilitada = False;
self.isRestExpJanHabilitada = False;
self.isPerpetHabilitada = False;
self.isIntercambLimitado = False;
self.tipoCombHidroEol = "";
self.fatorCarga = [];
self.subsNFic = [];
self.subsFic = [];
self.nomeSubs = [];
self.numSubs = [];
self.anoValidadeTemp = 0;
self.fatorValidadeTemp = [];
#le a pasta na qual esta o codigo fonte
self.pastaCod = str(self.recebe_dados.pegaEscalar("G7"));
# faz a verificacao dos valores das celulas de fato. No excel, se for verdadeiro (checked), ele retorna o numero 1
for i in range(0,3):
if (self.recebe_dados.pegaEscalar("O4",lin_offset=i)==1):
# se o valor da celula for true, a posicao da lista passa a ser true
self.isImpresso[i] = True;
# verifica se a restricao de potencia esta habilitada
if (self.recebe_dados.pegaEscalar("O7")==1):
# se o valor da celula for true, o parametro deve indicar que a restricao de potencia esta habilitada
self.isRestPotHabilitada = True;
# verifica se a opcao de exportar o objeto em json esta habilitada
if (self.recebe_dados.pegaEscalar("O8")==1):
# se o valor da celula for true, o parametro deve indicar que o json deve ser criado
self.isExpJsonHabilitada = True;
# verifica se a opcao de expansao apenas em janeiro esta habilitada
if (self.recebe_dados.pegaEscalar("O3")==1):
# se o valor da celula for true, o parametro deve indicar que a restricao de expansao somente em janeiro esta habilitada
self.isRestExpJanHabilitada = True;
# verifica se a opcao de perpetuidade esta habilitada
if (self.recebe_dados.pegaEscalar("O2")==1):
# se o valor da celula for true, o parametro deve indicar que o calculo da FO com perpetuidade esta habilitado
self.isPerpetHabilitada = True;
# importa da aba Inicial os valores dos fatores de carga dos subsistemas
self.fatorCarga = self.recebe_dados.pegaVetor("A12", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# importa da aba Inicial os numeros subsistemas
self.numSubs = self.recebe_dados.pegaVetor("A18", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# importa da aba Inicial os nomes dos subsistemas
self.nomeSubs = self.recebe_dados.pegaVetor("A19", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# cria vetor de subs nao ficticios
# se o numero do subs é maior que 99 é um subs ficticio
for subs in range(self.nsis):
if self.numSubs[subs] < 100:
self.subsNFic.append(self.numSubs[subs]);
else:
self.subsFic.append(subs);
# verifica se o usuario deseja usar todas as combinacoes de series hidrolicas e eolicas ou combinacoes intercaladas
if (self.recebe_dados.pegaEscalar("O9")==1):
self.tipoCombHidroEol = "completa";
elif (self.recebe_dados.pegaEscalar("O10")==1):
self.tipoCombHidroEol = "intercalada";
else:
print("Forma de incorporacao das series eolicas nao escolhida.");
# verifica se o usuario deseja usar o limite de intercambio
if (self.recebe_dados.pegaEscalar("O22")==1):
self.isIntercambLimitado = True;
# importa da aba inicial o ano de entrada da restricao de validade temporal
self.anoValidadeTemp = self.recebe_dados.pegaEscalar("D22");
# verifica se o usuario deseja usar a porcentagem de independencia da transmissao nos subsistemas ou nao
if (self.recebe_dados.pegaEscalar("O23")==0):
self.fatorValidadeTemp = [0 for isis in range(0, self.nsis)];
else:
self.fatorValidadeTemp = self.recebe_dados.pegaVetor("A25", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
return;
def exportaObjeto(self):
# exporta o objeto do sistema do python pro Json
objSistema = jsonpickle.encode(self.sin);
saidaResul = open(self.caminho + "objetoSistema.json", "w");
saidaResul.write(str(objSistema));
# fecha o primeiro arquivo
saidaResul.close();
# exporta o objeto do modelo do python pro Json
objModelo = jsonpickle.encode(self.problema);
saidaResul = open(self.caminho + "objetoModelo.json", "w");
saidaResul.write(str(objModelo));
# fecha o segundo arquivo
saidaResul.close();
return;
def importaObjeto(self, arqObjeto):
# arqObjeto se refere ao arquivo que contem o objeto a ser importado do json pro python
arquivo = open(self.caminho + arqObjeto);
json_str = arquivo.read();
restored_obj = jsonpickle.decode(json_str);
list_objects = [restored_obj];
print ("list_objects: ", list_objects);
return;
def imprimeSeriesHidro(self):
sin = self.sin;
# abre os arquivos
saidaEner = open(self.caminho + "serieHidro.txt", "w");
saidaPot = open(self.caminho + "pdispHidro.txt", "w");
# percorre os cenarios
for icen in range(sin.numHidros):
# percorre primeiramente os projetos
for isis in range(0,14):
# imprime o nome da usina
saidaEner.write(str(icen) + "," + str(isis));
saidaPot.write(str(icen) + "," + str(isis));
# percorre os periodos
for iper in range(sin.numMeses):
saidaEner.write("," + str(sin.subsistemas[isis].hidroExTotal[icen][iper]));
saidaPot.write("," + str(sin.subsistemas[isis].potDispExTotal[icen][iper]));
# proxima linha
saidaEner.write("\n");
saidaPot.write("\n");
# fecha o arquivo
saidaEner.close();
saidaPot.close();
return; | Control.py | from RecebeDados import RecebeDados;
from Sistema import Sistema;
from Problema import Problema;
from SaidaDados import SaidaDados;
from SaidaNewave import SaidaNewave;
from contextlib import suppress;
from coopr.pyomo import *;
from pyomo.environ import *;
from pyomo.opt import *;
import os, jsonpickle;
class Control:
def __init__(self, plan_dados, path, time):
# carrega a planilha e recebe o caminho dela
self.recebe_dados = RecebeDados(plan_dados);
self.caminho = path;
self.planilha = plan_dados;
self.start = time;
# carrega as configuracoes iniciais da planilha
print("Carregando Dados");
self.carregaInicio();
# inicializa o sistema
self.sin = Sistema(self.recebe_dados, self.tipoCombHidroEol);
print("Carregando Problema");
self.imprimeSeriesHidro();
# cria o problema passando os parametros do carregaInicio
self.problema = Problema(self.recebe_dados, self.sin, self.isRestPotHabilitada, self.isRestExpJanHabilitada, self.isPerpetHabilitada, self.fatorCarga, self.anoValidadeTemp, self.fatorValidadeTemp, self.isIntercambLimitado, self.subsFic);
# habilita o cplex
optsolver = SolverFactory("cplex", executable= "/opt/ibm/ILOG/CPLEX_Enterprise_Server129/CPLEX_Studio/cplex/bin/x86-64_linux/cplex");
print ("Modelo Criado");
self.problema.modelo.preprocess();
print ("Pre-process executado");
# configuracoes do solver
optsolver.options['mipgap'] = 0.005;
optsolver.options['mip_strategy_startalgorithm'] = 4;
optsolver.options['lpmethod'] = 4;
# congiguracao para manter reprodutibilidade entre casos
optsolver.options['parallel'] = 1;
# configuracao tentar evitar erros de memoria
optsolver.options['mip_strategy_file'] = 3;
optsolver.options['emphasis_memory'] = 'y';
optsolver.options['workmem'] = 12048;
print("Executando o CPLEX");
results = optsolver.solve(self.problema.modelo, load_solutions=True);#symbolic_solver_labels=True, tee=True);
print("Impressão de Resultados");
# escreve resultados em um txt
with open(self.caminho + "resultado.txt", "w") as saidaResul:
results.write(ostream=saidaResul);
# inicializa o objeto de saida de dados
self.saida_dados = SaidaDados(self.sin, self.problema, self.caminho, self.planilha, self.pastaCod, self.nomeSubs);
# inicializa o objeto de saida para o newave
self.saida_newave = SaidaNewave(self.recebe_dados, self.sin, self.problema, self.caminho, self.numSubs, self.subsNFic, self.subsFic, self.nomeSubs);
# relaxa o problema
self.problema.relaxar();
# chama o metodo para limpar os duais da planilha
self.saida_dados.limparDuais();
# faz a preparacao e impressao dos duais escolhidos atraves da aba inicial na planilha
# a letra passada como parametro eh um indicativo de qual dual deve ser impresso
if self.isImpresso[0]:
print("Resolvendo problema relaxado para Dual de Energia");
self.problema.prepararDualEnergia();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("E");
if self.isImpresso[1]:
print("Resolvendo problema relaxado para Dual de Potencia");
self.problema.prepararDualPotencia();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("P");
if self.isImpresso[2]:
print("Resolvendo problema relaxado para Dual Duplo");
self.problema.prepararDualDuplo();
results = optsolver.solve(self.problema.modelo, load_solutions=True, warmstart=True);#, symbolic_solver_labels=True, tee=True);
self.saida_dados.imprimeDuais("D");
self.saida_dados.imprimeLog(tempo_inicial = self.start);
return;
def carregaInicio(self):
# pega o numero de subsistemas na planilha geral
self.recebe_dados.defineAba("GERAL");
self.nsis = int(self.recebe_dados.pegaEscalar("G10"));
# verifica os parametros de impressao na aba inicial da planilha
# tem que vir antes da criacao do problema pois na aba estao contidos os fatores de carga
self.recebe_dados.defineAba("Inicial");
# limpa o diretorio Temp para evitar problemas de memoria se o flag estiver ativo
if (self.recebe_dados.pegaEscalar("O10")==1):
print("Limpando arquivos temporarios")
pastaTemp = "C:\\Users\\" + str(os.getlogin()) + "\\AppData\\Local\\Temp";
for filename in os.listdir(pastaTemp):
file_path = os.path.join(pastaTemp, filename);
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path);
elif os.path.isdir(file_path):
shutil.rmtree(file_path, ignore_errors=True);
except:
print("Um ou mais arquivos temporarios nao puderam ser excluidos");
# declara as variaveis que receberao a informacao das checkboxes e dos fatores na planilha
self.isImpresso = [False for i in range(0,3)];
self.isRestPotHabilitada = False;
self.isExpJsonHabilitada = False;
self.isRestExpJanHabilitada = False;
self.isPerpetHabilitada = False;
self.isIntercambLimitado = False;
self.tipoCombHidroEol = "";
self.fatorCarga = [];
self.subsNFic = [];
self.subsFic = [];
self.nomeSubs = [];
self.numSubs = [];
self.anoValidadeTemp = 0;
self.fatorValidadeTemp = [];
#le a pasta na qual esta o codigo fonte
self.pastaCod = str(self.recebe_dados.pegaEscalar("G7"));
# faz a verificacao dos valores das celulas de fato. No excel, se for verdadeiro (checked), ele retorna o numero 1
for i in range(0,3):
if (self.recebe_dados.pegaEscalar("O4",lin_offset=i)==1):
# se o valor da celula for true, a posicao da lista passa a ser true
self.isImpresso[i] = True;
# verifica se a restricao de potencia esta habilitada
if (self.recebe_dados.pegaEscalar("O7")==1):
# se o valor da celula for true, o parametro deve indicar que a restricao de potencia esta habilitada
self.isRestPotHabilitada = True;
# verifica se a opcao de exportar o objeto em json esta habilitada
if (self.recebe_dados.pegaEscalar("O8")==1):
# se o valor da celula for true, o parametro deve indicar que o json deve ser criado
self.isExpJsonHabilitada = True;
# verifica se a opcao de expansao apenas em janeiro esta habilitada
if (self.recebe_dados.pegaEscalar("O3")==1):
# se o valor da celula for true, o parametro deve indicar que a restricao de expansao somente em janeiro esta habilitada
self.isRestExpJanHabilitada = True;
# verifica se a opcao de perpetuidade esta habilitada
if (self.recebe_dados.pegaEscalar("O2")==1):
# se o valor da celula for true, o parametro deve indicar que o calculo da FO com perpetuidade esta habilitado
self.isPerpetHabilitada = True;
# importa da aba Inicial os valores dos fatores de carga dos subsistemas
self.fatorCarga = self.recebe_dados.pegaVetor("A12", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# importa da aba Inicial os numeros subsistemas
self.numSubs = self.recebe_dados.pegaVetor("A18", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# importa da aba Inicial os nomes dos subsistemas
self.nomeSubs = self.recebe_dados.pegaVetor("A19", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
# cria vetor de subs nao ficticios
# se o numero do subs é maior que 99 é um subs ficticio
for subs in range(self.nsis):
if self.numSubs[subs] < 100:
self.subsNFic.append(self.numSubs[subs]);
else:
self.subsFic.append(subs);
# verifica se o usuario deseja usar todas as combinacoes de series hidrolicas e eolicas ou combinacoes intercaladas
if (self.recebe_dados.pegaEscalar("O9")==1):
self.tipoCombHidroEol = "completa";
elif (self.recebe_dados.pegaEscalar("O10")==1):
self.tipoCombHidroEol = "intercalada";
else:
print("Forma de incorporacao das series eolicas nao escolhida.");
# verifica se o usuario deseja usar o limite de intercambio
if (self.recebe_dados.pegaEscalar("O22")==1):
self.isIntercambLimitado = True;
# importa da aba inicial o ano de entrada da restricao de validade temporal
self.anoValidadeTemp = self.recebe_dados.pegaEscalar("D22");
# verifica se o usuario deseja usar a porcentagem de independencia da transmissao nos subsistemas ou nao
if (self.recebe_dados.pegaEscalar("O23")==0):
self.fatorValidadeTemp = [0 for isis in range(0, self.nsis)];
else:
self.fatorValidadeTemp = self.recebe_dados.pegaVetor("A25", direcao="horizontal", tamanho=self.nsis, lin_offset=0, col_offset=0);
return;
def exportaObjeto(self):
# exporta o objeto do sistema do python pro Json
objSistema = jsonpickle.encode(self.sin);
saidaResul = open(self.caminho + "objetoSistema.json", "w");
saidaResul.write(str(objSistema));
# fecha o primeiro arquivo
saidaResul.close();
# exporta o objeto do modelo do python pro Json
objModelo = jsonpickle.encode(self.problema);
saidaResul = open(self.caminho + "objetoModelo.json", "w");
saidaResul.write(str(objModelo));
# fecha o segundo arquivo
saidaResul.close();
return;
def importaObjeto(self, arqObjeto):
# arqObjeto se refere ao arquivo que contem o objeto a ser importado do json pro python
arquivo = open(self.caminho + arqObjeto);
json_str = arquivo.read();
restored_obj = jsonpickle.decode(json_str);
list_objects = [restored_obj];
print ("list_objects: ", list_objects);
return;
def imprimeSeriesHidro(self):
sin = self.sin;
# abre os arquivos
saidaEner = open(self.caminho + "serieHidro.txt", "w");
saidaPot = open(self.caminho + "pdispHidro.txt", "w");
# percorre os cenarios
for icen in range(sin.numHidros):
# percorre primeiramente os projetos
for isis in range(0,14):
# imprime o nome da usina
saidaEner.write(str(icen) + "," + str(isis));
saidaPot.write(str(icen) + "," + str(isis));
# percorre os periodos
for iper in range(sin.numMeses):
saidaEner.write("," + str(sin.subsistemas[isis].hidroExTotal[icen][iper]));
saidaPot.write("," + str(sin.subsistemas[isis].potDispExTotal[icen][iper]));
# proxima linha
saidaEner.write("\n");
saidaPot.write("\n");
# fecha o arquivo
saidaEner.close();
saidaPot.close();
return; | 0.180287 | 0.198375 |
from Malt.GL import GL
from Malt.GL.Texture import Texture
from Malt.GL.RenderTarget import RenderTarget
from Malt.PipelineNode import PipelineNode
from Malt.PipelineParameters import Parameter, Type
from Malt.Scene import TextureShaderResource
class ScreenPass(PipelineNode):
def __init__(self, pipeline):
PipelineNode.__init__(self, pipeline)
self.resolution = None
self.texture_targets = {}
self.render_target = None
self.custom_io = []
@staticmethod
def get_pass_type():
return 'Screen.SCREEN_SHADER'
@classmethod
def reflect_inputs(cls):
inputs = {}
inputs['Layer Only'] = Parameter(True, Type.BOOL)
inputs['Scene'] = Parameter('Scene', Type.OTHER)
inputs['Normal Depth'] = Parameter('', Type.TEXTURE)
inputs['ID'] = Parameter('', Type.TEXTURE)
return inputs
def execute(self, parameters):
inputs = parameters['IN']
outputs = parameters['OUT']
material = parameters['PASS_MATERIAL']
custom_io = parameters['CUSTOM_IO']
deferred_mode = inputs['Layer Only']
scene = inputs['Scene']
t_normal_depth = inputs['Normal Depth']
t_id = inputs['ID']
shader_resources = {}
if scene:
shader_resources = scene.shader_resources.copy()
if t_normal_depth:
shader_resources['IN_NORMAL_DEPTH'] = TextureShaderResource('IN_NORMAL_DEPTH', t_normal_depth)
if t_id:
shader_resources['IN_ID'] = TextureShaderResource('IN_ID', t_id)
if self.pipeline.resolution != self.resolution or self.custom_io != custom_io:
self.texture_targets = {}
for io in custom_io:
if io['io'] == 'out':
if io['type'] == 'Texture':#TODO
self.texture_targets[io['name']] = Texture(self.pipeline.resolution, GL.GL_RGBA16F)
self.render_target = RenderTarget([*self.texture_targets.values()])
self.resolution = self.pipeline.resolution
self.custom_io = custom_io
self.render_target.clear([(0,0,0,0)]*len(self.texture_targets))
if material and material.shader and 'SHADER' in material.shader:
shader = material.shader['SHADER']
for io in custom_io:
if io['io'] == 'in':
if io['type'] == 'Texture':#TODO
from Malt.SourceTranspiler import GLSLTranspiler
glsl_name = GLSLTranspiler.custom_io_reference('IN', 'SCREEN_SHADER', io['name'])
shader.textures[glsl_name] = inputs[io['name']]
self.pipeline.common_buffer.bind(shader.uniform_blocks['COMMON_UNIFORMS'])
for resource in shader_resources.values():
resource.shader_callback(shader)
shader.uniforms['RENDER_LAYER_MODE'].set_value(True)
shader.uniforms['DEFERRED_MODE'].set_value(deferred_mode)
self.pipeline.draw_screen_pass(shader, self.render_target)
for io in custom_io:
if io['io'] == 'out':
if io['type'] == 'Texture':#TODO
outputs[io['name']] = self.texture_targets[io['name']]
NODE = ScreenPass | Malt/Pipelines/NPR_Pipeline/Nodes/RenderLayer/ScreenPass.py | from Malt.GL import GL
from Malt.GL.Texture import Texture
from Malt.GL.RenderTarget import RenderTarget
from Malt.PipelineNode import PipelineNode
from Malt.PipelineParameters import Parameter, Type
from Malt.Scene import TextureShaderResource
class ScreenPass(PipelineNode):
def __init__(self, pipeline):
PipelineNode.__init__(self, pipeline)
self.resolution = None
self.texture_targets = {}
self.render_target = None
self.custom_io = []
@staticmethod
def get_pass_type():
return 'Screen.SCREEN_SHADER'
@classmethod
def reflect_inputs(cls):
inputs = {}
inputs['Layer Only'] = Parameter(True, Type.BOOL)
inputs['Scene'] = Parameter('Scene', Type.OTHER)
inputs['Normal Depth'] = Parameter('', Type.TEXTURE)
inputs['ID'] = Parameter('', Type.TEXTURE)
return inputs
def execute(self, parameters):
inputs = parameters['IN']
outputs = parameters['OUT']
material = parameters['PASS_MATERIAL']
custom_io = parameters['CUSTOM_IO']
deferred_mode = inputs['Layer Only']
scene = inputs['Scene']
t_normal_depth = inputs['Normal Depth']
t_id = inputs['ID']
shader_resources = {}
if scene:
shader_resources = scene.shader_resources.copy()
if t_normal_depth:
shader_resources['IN_NORMAL_DEPTH'] = TextureShaderResource('IN_NORMAL_DEPTH', t_normal_depth)
if t_id:
shader_resources['IN_ID'] = TextureShaderResource('IN_ID', t_id)
if self.pipeline.resolution != self.resolution or self.custom_io != custom_io:
self.texture_targets = {}
for io in custom_io:
if io['io'] == 'out':
if io['type'] == 'Texture':#TODO
self.texture_targets[io['name']] = Texture(self.pipeline.resolution, GL.GL_RGBA16F)
self.render_target = RenderTarget([*self.texture_targets.values()])
self.resolution = self.pipeline.resolution
self.custom_io = custom_io
self.render_target.clear([(0,0,0,0)]*len(self.texture_targets))
if material and material.shader and 'SHADER' in material.shader:
shader = material.shader['SHADER']
for io in custom_io:
if io['io'] == 'in':
if io['type'] == 'Texture':#TODO
from Malt.SourceTranspiler import GLSLTranspiler
glsl_name = GLSLTranspiler.custom_io_reference('IN', 'SCREEN_SHADER', io['name'])
shader.textures[glsl_name] = inputs[io['name']]
self.pipeline.common_buffer.bind(shader.uniform_blocks['COMMON_UNIFORMS'])
for resource in shader_resources.values():
resource.shader_callback(shader)
shader.uniforms['RENDER_LAYER_MODE'].set_value(True)
shader.uniforms['DEFERRED_MODE'].set_value(deferred_mode)
self.pipeline.draw_screen_pass(shader, self.render_target)
for io in custom_io:
if io['io'] == 'out':
if io['type'] == 'Texture':#TODO
outputs[io['name']] = self.texture_targets[io['name']]
NODE = ScreenPass | 0.404272 | 0.1996 |
from typing import List, Optional, TYPE_CHECKING, TypeVar, Tuple, Deque
from immutables import Map
from ..vm_utils import render_value_as_source, stringify_value
from ..builtin_utils import BuiltinModule, Fail, make_simple, vec_to_stack, stack_to_vec
from ..types import Atom, CallByValue, Code, CodeFlags, Instruction, Put, State, Value, Stack, Scope, Vec
from queue import Queue
import heapq
import gurklang.vm
import threading
module = BuiltinModule("threading")
T, V, S = Tuple, Value, Stack
Z = TypeVar("Z", bound=Stack)
if TYPE_CHECKING:
ThreadQ = Queue[Tuple[int, Stack]] # type: ignore
else:
ThreadQ = Queue
def _run_function(
n: int,
queue: ThreadQ,
stack: Stack,
instructions: List[Instruction],
name: str,
source_code: Optional[str]
):
begin_state = State.make(gurklang.vm.global_scope, gurklang.vm.builtin_scope).with_stack(stack)
end_state = gurklang.vm.call(
begin_state,
Code(instructions, None, name=name, source_code=source_code)
)
queue.put((n, end_state.stack))
@module.register()
def run_concurrently(state: State, fail: Fail):
"""
Launch a new interpreter and run a function in it
(functions intial-stacks -- resulting-stacks)
"""
(stack_vec, (fnvec, rest)) = state.infinite_stack()
stacks = [vec_to_stack(sv, fail) for sv in stack_vec.values] # type: ignore
result_queue: ThreadQ = Queue()
threads: List[threading.Thread] = []
for i, (stack, fn) in enumerate(zip(stacks, fnvec.values)): # type: ignore
thread = threading.Thread(
name=f"My Thread {i}",
target=_run_function,
args=(i, result_queue, stack, list(fn.instructions), fn.name, fn.source_code) # type: ignore
)
thread.start()
threads.append(thread)
results: List[Stack] = [None] * len(threads) # type: ignore
for _ in threads:
i, stack = result_queue.get()
results[i] = stack
for thread in threads:
thread.join()
return (
state
.with_stack(rest)
.push(Vec([stack_to_vec(stack) for stack in results]))
) | gurklang/stdlib_modules/threading_.py | from typing import List, Optional, TYPE_CHECKING, TypeVar, Tuple, Deque
from immutables import Map
from ..vm_utils import render_value_as_source, stringify_value
from ..builtin_utils import BuiltinModule, Fail, make_simple, vec_to_stack, stack_to_vec
from ..types import Atom, CallByValue, Code, CodeFlags, Instruction, Put, State, Value, Stack, Scope, Vec
from queue import Queue
import heapq
import gurklang.vm
import threading
module = BuiltinModule("threading")
T, V, S = Tuple, Value, Stack
Z = TypeVar("Z", bound=Stack)
if TYPE_CHECKING:
ThreadQ = Queue[Tuple[int, Stack]] # type: ignore
else:
ThreadQ = Queue
def _run_function(
n: int,
queue: ThreadQ,
stack: Stack,
instructions: List[Instruction],
name: str,
source_code: Optional[str]
):
begin_state = State.make(gurklang.vm.global_scope, gurklang.vm.builtin_scope).with_stack(stack)
end_state = gurklang.vm.call(
begin_state,
Code(instructions, None, name=name, source_code=source_code)
)
queue.put((n, end_state.stack))
@module.register()
def run_concurrently(state: State, fail: Fail):
"""
Launch a new interpreter and run a function in it
(functions intial-stacks -- resulting-stacks)
"""
(stack_vec, (fnvec, rest)) = state.infinite_stack()
stacks = [vec_to_stack(sv, fail) for sv in stack_vec.values] # type: ignore
result_queue: ThreadQ = Queue()
threads: List[threading.Thread] = []
for i, (stack, fn) in enumerate(zip(stacks, fnvec.values)): # type: ignore
thread = threading.Thread(
name=f"My Thread {i}",
target=_run_function,
args=(i, result_queue, stack, list(fn.instructions), fn.name, fn.source_code) # type: ignore
)
thread.start()
threads.append(thread)
results: List[Stack] = [None] * len(threads) # type: ignore
for _ in threads:
i, stack = result_queue.get()
results[i] = stack
for thread in threads:
thread.join()
return (
state
.with_stack(rest)
.push(Vec([stack_to_vec(stack) for stack in results]))
) | 0.72331 | 0.265755 |