hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7217bf8d6fabaf470f63ef2822e2cba3024153c | 7,292 | py | Python | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 2 | 2022-02-14T09:51:39.000Z | 2022-02-14T13:27:49.000Z | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/audio/fuss.py | shubhamkumaR630/datasets | fe9ee91849cefed0953141ea3588f73b7def78fd | [
"Apache-2.0"
] | 1 | 2020-12-13T22:11:33.000Z | 2020-12-13T22:11:33.000Z | # coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FUSS dataset."""
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""\
@inproceedings{wisdom2020fuss,
title = {What's All the {FUSS} About Free Universal Sound Separation Data?},
author = {Scott Wisdom and Hakan Erdogan and Daniel P. W. Ellis and Romain Serizel and Nicolas Turpault and Eduardo Fonseca and Justin Salamon and Prem Seetharaman and John R. Hershey},
year = {2020},
url = {https://arxiv.org/abs/2011.00803},
}
@inproceedings{fonseca2020fsd50k,
author = {Eduardo Fonseca and Xavier Favory and Jordi Pons and Frederic Font Corbera and Xavier Serra},
title = {{FSD}50k: an open dataset of human-labeled sound events},
year = {2020},
url = {https://arxiv.org/abs/2010.00475},
}
"""
_DESCRIPTION = """\
The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary
sound mixtures and source-level references, for use in experiments on arbitrary
sound separation.
This is the official sound separation data for the DCASE2020 Challenge Task 4:
Sound Event Detection and Separation in Domestic Environments.
Overview: FUSS audio data is sourced from a pre-release of Freesound dataset
known as (FSD50k), a sound event dataset composed of Freesound content annotated
with labels from the AudioSet Ontology. Using the FSD50K labels, these source
files have been screened such that they likely only contain a single type of
sound. Labels are not provided for these source files, and are not considered
part of the challenge. For the purpose of the DCASE Task4 Sound Separation and
Event Detection challenge, systems should not use FSD50K labels, even though
they may become available upon FSD50K release.
To create mixtures, 10 second clips of sources are convolved with simulated room
impulse responses and added together. Each 10 second mixture contains between
1 and 4 sources. Source files longer than 10 seconds are considered "background"
sources. Every mixture contains one background source, which is active for the
entire duration. We provide: a software recipe to create the dataset, the room
impulse responses, and the original source audio.
"""
_URL = "https://github.com/google-research/sound-separation/blob/master/datasets/fuss/FUSS_license_doc/README.md"
_DL_METADATA = {
"reverberant":
("https://zenodo.org/record/3743844/files/FUSS_ssdata_reverb.tar.gz",
"ssdata_reverb"),
"unprocessed":
("https://zenodo.org/record/3743844/files/FUSS_ssdata.tar.gz", "ssdata"
),
}
class Fuss(tfds.core.GeneratorBasedBuilder):
"""FUSS: Free Universal Sound Separation dataset."""
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name="reverberant",
description="Default reverberated audio.",
version=tfds.core.Version("1.2.0")),
tfds.core.BuilderConfig(
name="unprocessed",
description="Unprocessed audio without additional reverberation.",
version=tfds.core.Version("1.2.0")),
]
def _info(self):
source_labels = ["background0", "foreground0", "foreground1", "foreground2"]
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"mixture_audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"sources":
tfds.features.Sequence({
"audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"label":
tfds.features.ClassLabel(names=source_labels),
}),
"segments":
tfds.features.Sequence({
"start_time_seconds": tf.float32,
"end_time_seconds": tf.float32,
"label": tf.string
}),
"jams":
tf.string,
"id":
tf.string,
}),
supervised_keys=("mixture_audio", "sources"),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url, extracted_dirname = _DL_METADATA[self.builder_config.name]
base_dir = dl_manager.download_and_extract(url)
splits = []
for split_name, split_dir in [(tfds.Split.TRAIN, "train"),
(tfds.Split.VALIDATION, "validation"),
(tfds.Split.TEST, "eval")]:
splits.append(
tfds.core.SplitGenerator(
name=split_name,
gen_kwargs={
"base_dir": os.path.join(base_dir, extracted_dirname),
"split": split_dir,
}))
return splits
def _parse_segments(self, path):
segments = []
if not tf.io.gfile.exists(path):
# Some segments files are missing in the "unprocessed" set.
logging.info("Missing segments file: %s", path)
return segments
with tf.io.gfile.GFile(path) as f:
for l in f:
try:
start, end, label = l.split()
except ValueError:
continue
segments.append({
"start_time_seconds": float(start),
"end_time_seconds": float(end),
"label": label
})
return segments
def _generate_examples(self, base_dir, split):
"""Generates examples for the given split."""
path = os.path.join(base_dir, "%s_example_list.txt" % split)
split_dir = os.path.join(base_dir, split)
with tf.io.gfile.GFile(path) as example_list:
for line in example_list:
paths = line.split()
key = _basename_without_ext(paths[0])
sources = []
for p in paths[1:]:
sources.append({
"audio": os.path.join(base_dir, p),
"label": _basename_without_ext(p).split("_")[0],
})
segments = self._parse_segments(os.path.join(split_dir, "%s.txt" % key))
jams = tf.io.gfile.GFile(os.path.join(split_dir,
"%s.jams" % key)).read()
example = {
"mixture_audio": os.path.join(base_dir, paths[0]),
"sources": sources,
"segments": segments,
"jams": jams,
"id": key,
}
yield key, example
def _basename_without_ext(p):
basename, _ = os.path.splitext(os.path.basename(p))
return basename
| 37.782383 | 187 | 0.62932 |
import os
from absl import logging
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_CITATION = r"""\
@inproceedings{wisdom2020fuss,
title = {What's All the {FUSS} About Free Universal Sound Separation Data?},
author = {Scott Wisdom and Hakan Erdogan and Daniel P. W. Ellis and Romain Serizel and Nicolas Turpault and Eduardo Fonseca and Justin Salamon and Prem Seetharaman and John R. Hershey},
year = {2020},
url = {https://arxiv.org/abs/2011.00803},
}
@inproceedings{fonseca2020fsd50k,
author = {Eduardo Fonseca and Xavier Favory and Jordi Pons and Frederic Font Corbera and Xavier Serra},
title = {{FSD}50k: an open dataset of human-labeled sound events},
year = {2020},
url = {https://arxiv.org/abs/2010.00475},
}
"""
_DESCRIPTION = """\
The Free Universal Sound Separation (FUSS) Dataset is a database of arbitrary
sound mixtures and source-level references, for use in experiments on arbitrary
sound separation.
This is the official sound separation data for the DCASE2020 Challenge Task 4:
Sound Event Detection and Separation in Domestic Environments.
Overview: FUSS audio data is sourced from a pre-release of Freesound dataset
known as (FSD50k), a sound event dataset composed of Freesound content annotated
with labels from the AudioSet Ontology. Using the FSD50K labels, these source
files have been screened such that they likely only contain a single type of
sound. Labels are not provided for these source files, and are not considered
part of the challenge. For the purpose of the DCASE Task4 Sound Separation and
Event Detection challenge, systems should not use FSD50K labels, even though
they may become available upon FSD50K release.
To create mixtures, 10 second clips of sources are convolved with simulated room
impulse responses and added together. Each 10 second mixture contains between
1 and 4 sources. Source files longer than 10 seconds are considered "background"
sources. Every mixture contains one background source, which is active for the
entire duration. We provide: a software recipe to create the dataset, the room
impulse responses, and the original source audio.
"""
_URL = "https://github.com/google-research/sound-separation/blob/master/datasets/fuss/FUSS_license_doc/README.md"
_DL_METADATA = {
"reverberant":
("https://zenodo.org/record/3743844/files/FUSS_ssdata_reverb.tar.gz",
"ssdata_reverb"),
"unprocessed":
("https://zenodo.org/record/3743844/files/FUSS_ssdata.tar.gz", "ssdata"
),
}
class Fuss(tfds.core.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
tfds.core.BuilderConfig(
name="reverberant",
description="Default reverberated audio.",
version=tfds.core.Version("1.2.0")),
tfds.core.BuilderConfig(
name="unprocessed",
description="Unprocessed audio without additional reverberation.",
version=tfds.core.Version("1.2.0")),
]
def _info(self):
source_labels = ["background0", "foreground0", "foreground1", "foreground2"]
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
"mixture_audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"sources":
tfds.features.Sequence({
"audio":
tfds.features.Audio(
file_format="wav",
shape=(160000,),
sample_rate=16000,
dtype=tf.int16),
"label":
tfds.features.ClassLabel(names=source_labels),
}),
"segments":
tfds.features.Sequence({
"start_time_seconds": tf.float32,
"end_time_seconds": tf.float32,
"label": tf.string
}),
"jams":
tf.string,
"id":
tf.string,
}),
supervised_keys=("mixture_audio", "sources"),
homepage=_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
url, extracted_dirname = _DL_METADATA[self.builder_config.name]
base_dir = dl_manager.download_and_extract(url)
splits = []
for split_name, split_dir in [(tfds.Split.TRAIN, "train"),
(tfds.Split.VALIDATION, "validation"),
(tfds.Split.TEST, "eval")]:
splits.append(
tfds.core.SplitGenerator(
name=split_name,
gen_kwargs={
"base_dir": os.path.join(base_dir, extracted_dirname),
"split": split_dir,
}))
return splits
def _parse_segments(self, path):
segments = []
if not tf.io.gfile.exists(path):
# Some segments files are missing in the "unprocessed" set.
logging.info("Missing segments file: %s", path)
return segments
with tf.io.gfile.GFile(path) as f:
for l in f:
try:
start, end, label = l.split()
except ValueError:
continue
segments.append({
"start_time_seconds": float(start),
"end_time_seconds": float(end),
"label": label
})
return segments
def _generate_examples(self, base_dir, split):
path = os.path.join(base_dir, "%s_example_list.txt" % split)
split_dir = os.path.join(base_dir, split)
with tf.io.gfile.GFile(path) as example_list:
for line in example_list:
paths = line.split()
key = _basename_without_ext(paths[0])
sources = []
for p in paths[1:]:
sources.append({
"audio": os.path.join(base_dir, p),
"label": _basename_without_ext(p).split("_")[0],
})
segments = self._parse_segments(os.path.join(split_dir, "%s.txt" % key))
jams = tf.io.gfile.GFile(os.path.join(split_dir,
"%s.jams" % key)).read()
example = {
"mixture_audio": os.path.join(base_dir, paths[0]),
"sources": sources,
"segments": segments,
"jams": jams,
"id": key,
}
yield key, example
def _basename_without_ext(p):
basename, _ = os.path.splitext(os.path.basename(p))
return basename
| true | true |
f7217c7974021f0ec405e5dff2a600a77498317d | 538 | py | Python | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | src/svm/get_vocab_dict.py | dimart10/machine-learning | 0f33bef65a9335c0f7fed680f1112419bae8fabc | [
"MIT"
] | null | null | null | def getVocabDict(reverse=False):
"""
Function to read in the supplied vocab list text file into a dictionary.
Dictionary key is the stemmed word, value is the index in the text file
If "reverse", the keys and values are switched.
"""
vocab_dict = {}
with open("../data/emails/vocab.txt") as f:
for line in f:
(val, key) = line.split()
if not reverse:
vocab_dict[key] = int(val)
else:
vocab_dict[int(val)] = key
return vocab_dict
| 31.647059 | 76 | 0.581784 | def getVocabDict(reverse=False):
vocab_dict = {}
with open("../data/emails/vocab.txt") as f:
for line in f:
(val, key) = line.split()
if not reverse:
vocab_dict[key] = int(val)
else:
vocab_dict[int(val)] = key
return vocab_dict
| true | true |
f7217cb6c5888d602826730dbf6b55ce8ad59ff8 | 1,125 | py | Python | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 999 | 2018-07-07T01:36:21.000Z | 2022-03-31T18:25:18.000Z | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 1,681 | 2018-07-19T23:45:31.000Z | 2022-03-31T22:21:07.000Z | clients/python/marquez_client/models.py | aridwiprayogo/marquez | b15e44fb7c2a0efcbe8ee8ce412144ac5ee68e0e | [
"Apache-2.0"
] | 182 | 2018-08-02T11:35:45.000Z | 2022-03-31T07:02:14.000Z | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
class DatasetId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class JobId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class DatasetType(Enum):
DB_TABLE = "DB_TABLE"
STREAM = "STREAM"
class JobType(Enum):
BATCH = "BATCH"
STREAM = "STREAM"
SERVICE = "SERVICE"
class RunState(Enum):
NEW = 'NEW'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
ABORTED = 'ABORTED'
| 25 | 74 | 0.688889 |
from enum import Enum
class DatasetId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class JobId:
def __init__(self, namespace: str, name: str):
self.namespace = namespace
self.name = name
class DatasetType(Enum):
DB_TABLE = "DB_TABLE"
STREAM = "STREAM"
class JobType(Enum):
BATCH = "BATCH"
STREAM = "STREAM"
SERVICE = "SERVICE"
class RunState(Enum):
NEW = 'NEW'
RUNNING = 'RUNNING'
COMPLETED = 'COMPLETED'
FAILED = 'FAILED'
ABORTED = 'ABORTED'
| true | true |
f7217ef251ef43a682f902818aa9a8aa8f1b0d93 | 2,145 | py | Python | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 33 | 2016-01-11T20:16:17.000Z | 2021-11-23T12:50:29.000Z | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 1,249 | 2015-11-30T16:43:21.000Z | 2022-03-24T13:04:55.000Z | app/s3_client/s3_csv_client.py | alphagov/notify-admin-frontend | 70f2a6a97aefe2432d7a3b54dc1555c030dd3693 | [
"MIT"
] | 36 | 2015-12-02T09:49:26.000Z | 2021-04-10T18:05:41.000Z | import uuid
import botocore
from flask import current_app
from notifications_utils.s3 import s3upload as utils_s3upload
from app.s3_client.s3_logo_client import get_s3_object
FILE_LOCATION_STRUCTURE = 'service-{}-notify/{}.csv'
def get_csv_location(service_id, upload_id, bucket=None):
return (
bucket or current_app.config['CSV_UPLOAD_BUCKET_NAME'],
FILE_LOCATION_STRUCTURE.format(service_id, upload_id),
)
def get_csv_upload(service_id, upload_id, bucket=None):
return get_s3_object(*get_csv_location(service_id, upload_id, bucket))
def s3upload(service_id, filedata, region, bucket=None):
upload_id = str(uuid.uuid4())
bucket_name, file_location = get_csv_location(service_id, upload_id, bucket)
utils_s3upload(
filedata=filedata['data'],
region=region,
bucket_name=bucket_name,
file_location=file_location,
)
return upload_id
def s3download(service_id, upload_id, bucket=None):
contents = ''
try:
key = get_csv_upload(service_id, upload_id, bucket)
contents = key.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
return contents
def set_metadata_on_csv_upload(service_id, upload_id, bucket=None, **kwargs):
get_csv_upload(
service_id, upload_id, bucket=bucket
).copy_from(
CopySource='{}/{}'.format(*get_csv_location(service_id, upload_id, bucket=bucket)),
ServerSideEncryption='AES256',
Metadata={
key: str(value) for key, value in kwargs.items()
},
MetadataDirective='REPLACE',
)
def get_csv_metadata(service_id, upload_id, bucket=None):
try:
key = get_csv_upload(service_id, upload_id, bucket)
return key.get()['Metadata']
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
| 31.544118 | 91 | 0.699301 | import uuid
import botocore
from flask import current_app
from notifications_utils.s3 import s3upload as utils_s3upload
from app.s3_client.s3_logo_client import get_s3_object
FILE_LOCATION_STRUCTURE = 'service-{}-notify/{}.csv'
def get_csv_location(service_id, upload_id, bucket=None):
return (
bucket or current_app.config['CSV_UPLOAD_BUCKET_NAME'],
FILE_LOCATION_STRUCTURE.format(service_id, upload_id),
)
def get_csv_upload(service_id, upload_id, bucket=None):
return get_s3_object(*get_csv_location(service_id, upload_id, bucket))
def s3upload(service_id, filedata, region, bucket=None):
upload_id = str(uuid.uuid4())
bucket_name, file_location = get_csv_location(service_id, upload_id, bucket)
utils_s3upload(
filedata=filedata['data'],
region=region,
bucket_name=bucket_name,
file_location=file_location,
)
return upload_id
def s3download(service_id, upload_id, bucket=None):
contents = ''
try:
key = get_csv_upload(service_id, upload_id, bucket)
contents = key.get()['Body'].read().decode('utf-8')
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
return contents
def set_metadata_on_csv_upload(service_id, upload_id, bucket=None, **kwargs):
get_csv_upload(
service_id, upload_id, bucket=bucket
).copy_from(
CopySource='{}/{}'.format(*get_csv_location(service_id, upload_id, bucket=bucket)),
ServerSideEncryption='AES256',
Metadata={
key: str(value) for key, value in kwargs.items()
},
MetadataDirective='REPLACE',
)
def get_csv_metadata(service_id, upload_id, bucket=None):
try:
key = get_csv_upload(service_id, upload_id, bucket)
return key.get()['Metadata']
except botocore.exceptions.ClientError as e:
current_app.logger.error("Unable to download s3 file {}".format(
FILE_LOCATION_STRUCTURE.format(service_id, upload_id)))
raise e
| true | true |
f7217f0a995fcc98786c4617f284dd074799a176 | 3,622 | py | Python | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | null | null | null | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | 1 | 2021-06-08T19:43:09.000Z | 2021-06-08T19:43:09.000Z | dfs_search.py | orionoiro/path_searcher | 198888a4570b40812a53e8485387e8cd59fe20ee | [
"MIT"
] | null | null | null | from graph import Digraph, Node, WeightedEdge
def load_map(map_filename):
"""
Parses the map file and constructs a directed graph
Assumes:
Each entry in the map file consists of the following four positive
integers, separated by a blank space:
32 76 54 23
This entry would become an edge from 32 to 76.
Returns:
a Digraph representing the map
"""
g = Digraph()
with open(map_filename, 'r') as file:
read_data = file.read().split('\n')
for elem in read_data:
read_data[read_data.index(elem)] = elem.split(' ')
read_data.remove([''])
for elem in read_data:
start = Node(elem[0])
dest = Node(elem[1])
try:
g.add_node(start)
except ValueError:
pass
try:
g.add_node(dest)
except ValueError:
pass
edge1 = WeightedEdge(start, dest, int(elem[2]), int(elem[3]))
try:
g.add_edge(edge1)
except ValueError:
pass
return g
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
"""
Finds the shortest path between buildings.
Returns:
A tuple with the shortest-path from start to end, represented by
a list of building numbers and the distance of that path.
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then return None.
"""
start = Node(start)
end = Node(end)
path[0].append(start.get_name())
if start not in digraph.nodes or end not in digraph.nodes:
raise ValueError
elif start == end:
return tuple([path[0].copy(), path[1]])
else:
for edge in digraph.edges[start]:
if edge.get_destination().get_name() not in path[0]:
if len(best_path) == 0 or len(path[0]) < len(best_path):
if path[2] + edge.get_outdoor_distance() <= max_dist_outdoors:
path[1] += edge.get_total_distance()
path[2] += edge.get_outdoor_distance()
next_path = get_best_path(digraph, edge.get_destination(), end, path,
max_dist_outdoors, best_dist, best_path)
path[0].remove(edge.get_destination().get_name())
path[1] -= edge.get_total_distance()
path[2] -= edge.get_outdoor_distance()
else:
continue
if next_path is not None:
if best_dist == 0 or next_path[1] < best_dist:
best_path = next_path[0]
best_dist = next_path[1]
if best_dist == 0:
return None
return tuple([best_path, best_dist])
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
"""
Finds the shortest path from start to end using a directed depth-first
search.
Returns:
The shortest-path from start to end, represented by
a list of building numbers (in strings).
If there exists no path that satisfies max_total_dist and
max_dist_outdoors constraints, then raises a ValueError.
"""
search_result = get_best_path(digraph, start, end, [[], 0, 0], max_dist_outdoors, 0, [])
try:
if search_result[-1] <= max_total_dist:
return search_result[0]
else:
raise ValueError
except TypeError:
raise ValueError
| 32.927273 | 93 | 0.570403 | from graph import Digraph, Node, WeightedEdge
def load_map(map_filename):
g = Digraph()
with open(map_filename, 'r') as file:
read_data = file.read().split('\n')
for elem in read_data:
read_data[read_data.index(elem)] = elem.split(' ')
read_data.remove([''])
for elem in read_data:
start = Node(elem[0])
dest = Node(elem[1])
try:
g.add_node(start)
except ValueError:
pass
try:
g.add_node(dest)
except ValueError:
pass
edge1 = WeightedEdge(start, dest, int(elem[2]), int(elem[3]))
try:
g.add_edge(edge1)
except ValueError:
pass
return g
def get_best_path(digraph, start, end, path, max_dist_outdoors, best_dist,
best_path):
start = Node(start)
end = Node(end)
path[0].append(start.get_name())
if start not in digraph.nodes or end not in digraph.nodes:
raise ValueError
elif start == end:
return tuple([path[0].copy(), path[1]])
else:
for edge in digraph.edges[start]:
if edge.get_destination().get_name() not in path[0]:
if len(best_path) == 0 or len(path[0]) < len(best_path):
if path[2] + edge.get_outdoor_distance() <= max_dist_outdoors:
path[1] += edge.get_total_distance()
path[2] += edge.get_outdoor_distance()
next_path = get_best_path(digraph, edge.get_destination(), end, path,
max_dist_outdoors, best_dist, best_path)
path[0].remove(edge.get_destination().get_name())
path[1] -= edge.get_total_distance()
path[2] -= edge.get_outdoor_distance()
else:
continue
if next_path is not None:
if best_dist == 0 or next_path[1] < best_dist:
best_path = next_path[0]
best_dist = next_path[1]
if best_dist == 0:
return None
return tuple([best_path, best_dist])
def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):
search_result = get_best_path(digraph, start, end, [[], 0, 0], max_dist_outdoors, 0, [])
try:
if search_result[-1] <= max_total_dist:
return search_result[0]
else:
raise ValueError
except TypeError:
raise ValueError
| true | true |
f72180e784ecfee3622da10e4ca8c64c9fb89d32 | 3,450 | py | Python | tests/functional/test_cli.py | garnaat/aws-lambda-builders | 0ce436cacb7e5e756c65cb4fa4d78877ada307e5 | [
"Apache-2.0"
] | 2 | 2020-11-12T22:58:17.000Z | 2021-03-22T16:13:34.000Z | tests/functional/test_cli.py | awood45/aws-lambda-builders | 3744cea731403fc5d5aad36c4f60d9512231fd78 | [
"Apache-2.0"
] | null | null | null | tests/functional/test_cli.py | awood45/aws-lambda-builders | 3744cea731403fc5d5aad36c4f60d9512231fd78 | [
"Apache-2.0"
] | null | null | null |
import json
import os
import shutil
import tempfile
import subprocess
import copy
from unittest import TestCase
from parameterized import parameterized
class TestCliWithHelloWorkflow(TestCase):
HELLO_WORKFLOW_MODULE = "hello_workflow.write_hello"
TEST_WORKFLOWS_FOLDER = os.path.join(os.path.dirname(__file__), "testdata", "workflows")
def setUp(self):
self.source_dir = tempfile.mkdtemp()
self.artifacts_dir = tempfile.mkdtemp()
# Capabilities supported by the Hello workflow
self.language = "test"
self.dependency_manager = "test"
self.application_framework = "test"
# The builder should write a file called hello.txt with contents "Hello World"
self.expected_filename = os.path.join(self.artifacts_dir, 'hello.txt')
self.expected_contents = "Hello World"
self.command_name = "lambda-builders-dev" if os.environ.get("LAMBDA_BUILDERS_DEV") else "lambda-builders"
# Make sure the test workflow is in PYTHONPATH to be automatically loaded
self.python_path_list = os.environ.get("PYTHONPATH", '').split(os.pathsep) + [self.TEST_WORKFLOWS_FOLDER]
self.python_path = os.pathsep.join(filter(bool, self.python_path_list))
def tearDown(self):
shutil.rmtree(self.source_dir)
shutil.rmtree(self.artifacts_dir)
@parameterized.expand([
("request_through_stdin"),
("request_through_argument")
])
def test_run_hello_workflow(self, flavor):
request_json = json.dumps({
"jsonschema": "2.0",
"id": 1234,
"method": "LambdaBuilder.build",
"params": {
"capability": {
"language": self.language,
"dependency_manager": self.dependency_manager,
"application_framework": self.application_framework
},
"supported_workflows": [self.HELLO_WORKFLOW_MODULE],
"source_dir": self.source_dir,
"artifacts_dir": self.artifacts_dir,
"scratch_dir": "/ignored",
"manifest_path": "/ignored",
"runtime": "ignored",
"optimizations": {},
"options": {},
}
})
env = copy.deepcopy(os.environ)
env["PYTHONPATH"] = self.python_path
stdout_data = None
if flavor == "request_through_stdin":
p = subprocess.Popen([self.command_name], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate(input=request_json.encode('utf-8'))[0]
elif flavor == "request_through_argument":
p = subprocess.Popen([self.command_name, request_json], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate()[0]
else:
raise ValueError("Invalid test flavor")
# Validate the response object. It should be successful response
response = json.loads(stdout_data)
self.assertNotIn('error', response)
self.assertIn('result', response)
self.assertEquals(response['result']['artifacts_dir'], self.artifacts_dir)
self.assertTrue(os.path.exists(self.expected_filename))
contents = ''
with open(self.expected_filename, 'r') as fp:
contents = fp.read()
self.assertEquals(contents, self.expected_contents)
| 35.9375 | 123 | 0.630145 |
import json
import os
import shutil
import tempfile
import subprocess
import copy
from unittest import TestCase
from parameterized import parameterized
class TestCliWithHelloWorkflow(TestCase):
HELLO_WORKFLOW_MODULE = "hello_workflow.write_hello"
TEST_WORKFLOWS_FOLDER = os.path.join(os.path.dirname(__file__), "testdata", "workflows")
def setUp(self):
self.source_dir = tempfile.mkdtemp()
self.artifacts_dir = tempfile.mkdtemp()
self.language = "test"
self.dependency_manager = "test"
self.application_framework = "test"
self.expected_filename = os.path.join(self.artifacts_dir, 'hello.txt')
self.expected_contents = "Hello World"
self.command_name = "lambda-builders-dev" if os.environ.get("LAMBDA_BUILDERS_DEV") else "lambda-builders"
self.python_path_list = os.environ.get("PYTHONPATH", '').split(os.pathsep) + [self.TEST_WORKFLOWS_FOLDER]
self.python_path = os.pathsep.join(filter(bool, self.python_path_list))
def tearDown(self):
shutil.rmtree(self.source_dir)
shutil.rmtree(self.artifacts_dir)
@parameterized.expand([
("request_through_stdin"),
("request_through_argument")
])
def test_run_hello_workflow(self, flavor):
request_json = json.dumps({
"jsonschema": "2.0",
"id": 1234,
"method": "LambdaBuilder.build",
"params": {
"capability": {
"language": self.language,
"dependency_manager": self.dependency_manager,
"application_framework": self.application_framework
},
"supported_workflows": [self.HELLO_WORKFLOW_MODULE],
"source_dir": self.source_dir,
"artifacts_dir": self.artifacts_dir,
"scratch_dir": "/ignored",
"manifest_path": "/ignored",
"runtime": "ignored",
"optimizations": {},
"options": {},
}
})
env = copy.deepcopy(os.environ)
env["PYTHONPATH"] = self.python_path
stdout_data = None
if flavor == "request_through_stdin":
p = subprocess.Popen([self.command_name], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate(input=request_json.encode('utf-8'))[0]
elif flavor == "request_through_argument":
p = subprocess.Popen([self.command_name, request_json], env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
stdout_data = p.communicate()[0]
else:
raise ValueError("Invalid test flavor")
response = json.loads(stdout_data)
self.assertNotIn('error', response)
self.assertIn('result', response)
self.assertEquals(response['result']['artifacts_dir'], self.artifacts_dir)
self.assertTrue(os.path.exists(self.expected_filename))
contents = ''
with open(self.expected_filename, 'r') as fp:
contents = fp.read()
self.assertEquals(contents, self.expected_contents)
| true | true |
f721837c57c136970d438343cccd809cda08ff22 | 19,515 | py | Python | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 52 | 2017-03-28T02:44:25.000Z | 2021-08-13T08:32:56.000Z | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 51 | 2017-04-05T08:27:29.000Z | 2020-05-08T14:40:31.000Z | pype/vendor/capture_gui/accordion.py | kalisp/pype | 28bbffaf2d12ccee48313cd9985e8dfa05e81a5c | [
"MIT"
] | 12 | 2016-09-19T11:55:03.000Z | 2021-10-15T09:21:31.000Z | from .vendor.Qt import QtCore, QtWidgets, QtGui
class AccordionItem(QtWidgets.QGroupBox):
trigger = QtCore.Signal(bool)
def __init__(self, accordion, title, widget):
QtWidgets.QGroupBox.__init__(self, parent=accordion)
# create the layout
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(6, 12, 6, 6)
layout.setSpacing(0)
layout.addWidget(widget)
self._accordianWidget = accordion
self._rolloutStyle = 2
self._dragDropMode = 0
self.setAcceptDrops(True)
self.setLayout(layout)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showMenu)
# create custom properties
self._widget = widget
self._collapsed = False
self._collapsible = True
self._clicked = False
self._customData = {}
# set common properties
self.setTitle(title)
def accordionWidget(self):
"""
\remarks grabs the parent item for the accordian widget
\return <blurdev.gui.widgets.accordianwidget.AccordianWidget>
"""
return self._accordianWidget
def customData(self, key, default=None):
"""
\remarks return a custom pointer to information stored with this item
\param key <str>
\param default <variant> default value to return if the key was not found
\return <variant> data
"""
return self._customData.get(str(key), default)
def dragEnterEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dragDropRect(self):
return QtCore.QRect(25, 7, 10, 6)
def dragDropMode(self):
return self._dragDropMode
def dragMoveEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dropEvent(self, event):
widget = event.source()
layout = self.parent().layout()
layout.insertWidget(layout.indexOf(self), widget)
self._accordianWidget.emitItemsReordered()
def expandCollapseRect(self):
return QtCore.QRect(0, 0, self.width(), 20)
def enterEvent(self, event):
self.accordionWidget().leaveEvent(event)
event.accept()
def leaveEvent(self, event):
self.accordionWidget().enterEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if self._clicked and self.expandCollapseRect().contains(event.pos()):
self.toggleCollapsed()
event.accept()
else:
event.ignore()
self._clicked = False
def mouseMoveEvent(self, event):
event.ignore()
def mousePressEvent(self, event):
# handle an internal move
# start a drag event
if event.button() == QtCore.Qt.LeftButton and self.dragDropRect().contains(
event.pos()):
# create the pixmap
pixmap = QtGui.QPixmap.grabWidget(self, self.rect())
# create the mimedata
mimeData = QtCore.QMimeData()
mimeData.setText('ItemTitle::%s' % (self.title()))
# create the drag
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos())
if not drag.exec_():
self._accordianWidget.emitItemDragFailed(self)
event.accept()
# determine if the expand/collapse should occur
elif event.button() == QtCore.Qt.LeftButton and self.expandCollapseRect().contains(
event.pos()):
self._clicked = True
event.accept()
else:
event.ignore()
def isCollapsed(self):
return self._collapsed
def isCollapsible(self):
return self._collapsible
def __drawTriangle(self, painter, x, y):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 160),
QtCore.Qt.SolidPattern)
if not self.isCollapsed():
tl, tr, tp = QtCore.QPoint(x + 9, y + 8), QtCore.QPoint(x + 19,
y + 8), QtCore.QPoint(
x + 14, y + 13.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
else:
tl, tr, tp = QtCore.QPoint(x + 11, y + 6), QtCore.QPoint(x + 16,
y + 11), QtCore.QPoint(
x + 11, y + 16.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
currentBrush = painter.brush()
painter.setBrush(brush)
painter.drawPolygon(triangle)
painter.setBrush(currentBrush)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(painter.Antialiasing)
font = painter.font()
font.setBold(True)
painter.setFont(font)
x = self.rect().x()
y = self.rect().y()
w = self.rect().width() - 1
h = self.rect().height() - 1
r = 8
# draw a rounded style
if self._rolloutStyle == 2:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
# draw the triangle
self.__drawTriangle(painter, x, y)
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRoundedRect(x + 1, y + 1, w - 1, h - 1, r, r)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRoundedRect(x, y, w - 1, h - 1, r, r)
# draw a square style
if self._rolloutStyle == 3:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(x + 1, y + 1, w - 1, h - 1)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(x, y, w - 1, h - 1)
# draw a Maya style
if self._rolloutStyle == 4:
# draw the text
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
self.__drawTriangle(painter, x, y)
# draw the borders - top
headerHeight = 20
headerRect = QtCore.QRect(x + 1, y + 1, w - 1, headerHeight)
headerRectShadow = QtCore.QRect(x - 1, y - 1, w + 1,
headerHeight + 2)
# Highlight
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(headerRect)
painter.fillRect(headerRect, QtGui.QColor(255, 255, 255, 18))
# Shadow
pen.setColor(self.palette().color(QtGui.QPalette.Dark))
painter.setPen(pen)
painter.drawRect(headerRectShadow)
if not self.isCollapsed():
# draw the lover border
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Dark))
pen.setWidthF(0.8)
painter.setPen(pen)
offSet = headerHeight + 3
bodyRect = QtCore.QRect(x, y + offSet, w, h - offSet)
bodyRectShadow = QtCore.QRect(x + 1, y + offSet, w + 1,
h - offSet + 1)
painter.drawRect(bodyRect)
pen.setColor(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(bodyRectShadow)
# draw a boxed style
elif self._rolloutStyle == 1:
if self.isCollapsed():
arect = QtCore.QRect(x + 1, y + 9, w - 1, 4)
brect = QtCore.QRect(x, y + 8, w - 1, 4)
text = '+'
else:
arect = QtCore.QRect(x + 1, y + 9, w - 1, h - 9)
brect = QtCore.QRect(x, y + 8, w - 1, h - 9)
text = '-'
# draw the borders
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(arect)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(brect)
painter.setRenderHint(painter.Antialiasing, False)
painter.setBrush(
self.palette().color(QtGui.QPalette.Window).darker(120))
painter.drawRect(x + 10, y + 1, w - 20, 16)
painter.drawText(x + 16, y + 1,
w - 32, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
text)
painter.drawText(x + 10, y + 1,
w - 20, 16,
QtCore.Qt.AlignCenter,
self.title())
if self.dragDropMode():
rect = self.dragDropRect()
# draw the lines
l = rect.left()
r = rect.right()
cy = rect.center().y()
for y in (cy - 3, cy, cy + 3):
painter.drawLine(l, y, r, y)
painter.end()
def setCollapsed(self, state=True):
if self.isCollapsible():
accord = self.accordionWidget()
accord.setUpdatesEnabled(False)
self._collapsed = state
if state:
self.setMinimumHeight(22)
self.setMaximumHeight(22)
self.widget().setVisible(False)
else:
self.setMinimumHeight(0)
self.setMaximumHeight(1000000)
self.widget().setVisible(True)
self._accordianWidget.emitItemCollapsed(self)
accord.setUpdatesEnabled(True)
def setCollapsible(self, state=True):
self._collapsible = state
def setCustomData(self, key, value):
"""
\remarks set a custom pointer to information stored on this item
\param key <str>
\param value <variant>
"""
self._customData[str(key)] = value
def setDragDropMode(self, mode):
self._dragDropMode = mode
def setRolloutStyle(self, style):
self._rolloutStyle = style
def showMenu(self):
if QtCore.QRect(0, 0, self.width(), 20).contains(
self.mapFromGlobal(QtGui.QCursor.pos())):
self._accordianWidget.emitItemMenuRequested(self)
def rolloutStyle(self):
return self._rolloutStyle
def toggleCollapsed(self):
# enable signaling here
collapse_state = not self.isCollapsed()
self.setCollapsed(collapse_state)
return collapse_state
def widget(self):
return self._widget
class AccordionWidget(QtWidgets.QScrollArea):
"""Accordion style widget.
A collapsible accordion widget like Maya's attribute editor.
This is a modified version bsed on Blur's Accordion Widget to
include a Maya style.
"""
itemCollapsed = QtCore.Signal(AccordionItem)
itemMenuRequested = QtCore.Signal(AccordionItem)
itemDragFailed = QtCore.Signal(AccordionItem)
itemsReordered = QtCore.Signal()
Boxed = 1
Rounded = 2
Square = 3
Maya = 4
NoDragDrop = 0
InternalMove = 1
def __init__(self, parent):
QtWidgets.QScrollArea.__init__(self, parent)
self.setFrameShape(QtWidgets.QScrollArea.NoFrame)
self.setAutoFillBackground(False)
self.setWidgetResizable(True)
self.setMouseTracking(True)
self.verticalScrollBar().setMaximumWidth(10)
widget = QtWidgets.QWidget(self)
# define custom properties
self._rolloutStyle = AccordionWidget.Rounded
self._dragDropMode = AccordionWidget.NoDragDrop
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
self._itemClass = AccordionItem
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(2, 2, 2, 6)
layout.setSpacing(2)
layout.addStretch(1)
widget.setLayout(layout)
self.setWidget(widget)
def setSpacing(self, spaceInt):
self.widget().layout().setSpacing(spaceInt)
def addItem(self, title, widget, collapsed=False):
self.setUpdatesEnabled(False)
item = self._itemClass(self, title, widget)
item.setRolloutStyle(self.rolloutStyle())
item.setDragDropMode(self.dragDropMode())
layout = self.widget().layout()
layout.insertWidget(layout.count() - 1, item)
layout.setStretchFactor(item, 0)
if collapsed:
item.setCollapsed(collapsed)
self.setUpdatesEnabled(True)
return item
def clear(self):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
while layout.count() > 1:
item = layout.itemAt(0)
# remove the item from the layout
w = item.widget()
layout.removeItem(item)
# close the widget and delete it
w.close()
w.deleteLater()
self.setUpdatesEnabled(True)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.MouseButtonPress:
self.mousePressEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseMove:
self.mouseMoveEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.mouseReleaseEvent(event)
return True
return False
def canScroll(self):
return self.verticalScrollBar().maximum() > 0
def count(self):
return self.widget().layout().count() - 1
def dragDropMode(self):
return self._dragDropMode
def indexOf(self, widget):
"""
\remarks Searches for widget(not including child layouts).
Returns the index of widget, or -1 if widget is not found
\return <int>
"""
layout = self.widget().layout()
for index in range(layout.count()):
if layout.itemAt(index).widget().widget() == widget:
return index
return -1
def isBoxedMode(self):
return self._rolloutStyle == AccordionWidget.Maya
def itemClass(self):
return self._itemClass
def itemAt(self, index):
layout = self.widget().layout()
if 0 <= index and index < layout.count() - 1:
return layout.itemAt(index).widget()
return None
def emitItemCollapsed(self, item):
if not self.signalsBlocked():
self.itemCollapsed.emit(item)
def emitItemDragFailed(self, item):
if not self.signalsBlocked():
self.itemDragFailed.emit(item)
def emitItemMenuRequested(self, item):
if not self.signalsBlocked():
self.itemMenuRequested.emit(item)
def emitItemsReordered(self):
if not self.signalsBlocked():
self.itemsReordered.emit()
def enterEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.OpenHandCursor)
def leaveEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.restoreOverrideCursor()
def mouseMoveEvent(self, event):
if self._scrolling:
sbar = self.verticalScrollBar()
smax = sbar.maximum()
# calculate the distance moved for the moust point
dy = event.globalY() - self._scrollInitY
# calculate the percentage that is of the scroll bar
dval = smax * (dy / float(sbar.height()))
# calculate the new value
sbar.setValue(self._scrollInitVal - dval)
event.accept()
def mousePressEvent(self, event):
# handle a scroll event
if event.button() == QtCore.Qt.LeftButton and self.canScroll():
self._scrolling = True
self._scrollInitY = event.globalY()
self._scrollInitVal = self.verticalScrollBar().value()
QtWidgets.QApplication.setOverrideCursor(
QtCore.Qt.ClosedHandCursor)
event.accept()
def mouseReleaseEvent(self, event):
if self._scrolling:
QtWidgets.QApplication.restoreOverrideCursor()
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
event.accept()
def moveItemDown(self, index):
layout = self.widget().layout()
if (layout.count() - 1) > (index + 1):
widget = layout.takeAt(index).widget()
layout.insertWidget(index + 1, widget)
def moveItemUp(self, index):
if index > 0:
layout = self.widget().layout()
widget = layout.takeAt(index).widget()
layout.insertWidget(index - 1, widget)
def setBoxedMode(self, state):
if state:
self._rolloutStyle = AccordionWidget.Boxed
else:
self._rolloutStyle = AccordionWidget.Rounded
def setDragDropMode(self, dragDropMode):
self._dragDropMode = dragDropMode
for item in self.findChildren(AccordionItem):
item.setDragDropMode(self._dragDropMode)
def setItemClass(self, itemClass):
self._itemClass = itemClass
def setRolloutStyle(self, rolloutStyle):
self._rolloutStyle = rolloutStyle
for item in self.findChildren(AccordionItem):
item.setRolloutStyle(self._rolloutStyle)
def rolloutStyle(self):
return self._rolloutStyle
def takeAt(self, index):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
widget = None
if 0 <= index and index < layout.count() - 1:
item = layout.itemAt(index)
widget = item.widget()
layout.removeItem(item)
widget.close()
self.setUpdatesEnabled(True)
return widget
def widgetAt(self, index):
item = self.itemAt(index)
if item:
return item.widget()
return None
pyBoxedMode = QtCore.Property('bool', isBoxedMode, setBoxedMode)
| 31.224 | 92 | 0.564386 | from .vendor.Qt import QtCore, QtWidgets, QtGui
class AccordionItem(QtWidgets.QGroupBox):
trigger = QtCore.Signal(bool)
def __init__(self, accordion, title, widget):
QtWidgets.QGroupBox.__init__(self, parent=accordion)
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(6, 12, 6, 6)
layout.setSpacing(0)
layout.addWidget(widget)
self._accordianWidget = accordion
self._rolloutStyle = 2
self._dragDropMode = 0
self.setAcceptDrops(True)
self.setLayout(layout)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showMenu)
self._widget = widget
self._collapsed = False
self._collapsible = True
self._clicked = False
self._customData = {}
self.setTitle(title)
def accordionWidget(self):
return self._accordianWidget
def customData(self, key, default=None):
return self._customData.get(str(key), default)
def dragEnterEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dragDropRect(self):
return QtCore.QRect(25, 7, 10, 6)
def dragDropMode(self):
return self._dragDropMode
def dragMoveEvent(self, event):
if not self._dragDropMode:
return
source = event.source()
if source != self and source.parent() == self.parent() and isinstance(
source, AccordionItem):
event.acceptProposedAction()
def dropEvent(self, event):
widget = event.source()
layout = self.parent().layout()
layout.insertWidget(layout.indexOf(self), widget)
self._accordianWidget.emitItemsReordered()
def expandCollapseRect(self):
return QtCore.QRect(0, 0, self.width(), 20)
def enterEvent(self, event):
self.accordionWidget().leaveEvent(event)
event.accept()
def leaveEvent(self, event):
self.accordionWidget().enterEvent(event)
event.accept()
def mouseReleaseEvent(self, event):
if self._clicked and self.expandCollapseRect().contains(event.pos()):
self.toggleCollapsed()
event.accept()
else:
event.ignore()
self._clicked = False
def mouseMoveEvent(self, event):
event.ignore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self.dragDropRect().contains(
event.pos()):
pixmap = QtGui.QPixmap.grabWidget(self, self.rect())
mimeData = QtCore.QMimeData()
mimeData.setText('ItemTitle::%s' % (self.title()))
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos())
if not drag.exec_():
self._accordianWidget.emitItemDragFailed(self)
event.accept()
elif event.button() == QtCore.Qt.LeftButton and self.expandCollapseRect().contains(
event.pos()):
self._clicked = True
event.accept()
else:
event.ignore()
def isCollapsed(self):
return self._collapsed
def isCollapsible(self):
return self._collapsible
def __drawTriangle(self, painter, x, y):
brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 160),
QtCore.Qt.SolidPattern)
if not self.isCollapsed():
tl, tr, tp = QtCore.QPoint(x + 9, y + 8), QtCore.QPoint(x + 19,
y + 8), QtCore.QPoint(
x + 14, y + 13.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
else:
tl, tr, tp = QtCore.QPoint(x + 11, y + 6), QtCore.QPoint(x + 16,
y + 11), QtCore.QPoint(
x + 11, y + 16.0)
points = [tl, tr, tp]
triangle = QtGui.QPolygon(points)
currentBrush = painter.brush()
painter.setBrush(brush)
painter.drawPolygon(triangle)
painter.setBrush(currentBrush)
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setRenderHint(painter.Antialiasing)
font = painter.font()
font.setBold(True)
painter.setFont(font)
x = self.rect().x()
y = self.rect().y()
w = self.rect().width() - 1
h = self.rect().height() - 1
r = 8
if self._rolloutStyle == 2:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRoundedRect(x + 1, y + 1, w - 1, h - 1, r, r)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRoundedRect(x, y, w - 1, h - 1, r, r)
if self._rolloutStyle == 3:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
self.__drawTriangle(painter, x, y)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(x + 1, y + 1, w - 1, h - 1)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(x, y, w - 1, h - 1)
if self._rolloutStyle == 4:
painter.drawText(x + 33, y + 3, w, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop,
self.title())
painter.setRenderHint(QtGui.QPainter.Antialiasing, False)
self.__drawTriangle(painter, x, y)
headerHeight = 20
headerRect = QtCore.QRect(x + 1, y + 1, w - 1, headerHeight)
headerRectShadow = QtCore.QRect(x - 1, y - 1, w + 1,
headerHeight + 2)
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(headerRect)
painter.fillRect(headerRect, QtGui.QColor(255, 255, 255, 18))
pen.setColor(self.palette().color(QtGui.QPalette.Dark))
painter.setPen(pen)
painter.drawRect(headerRectShadow)
if not self.isCollapsed():
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Dark))
pen.setWidthF(0.8)
painter.setPen(pen)
offSet = headerHeight + 3
bodyRect = QtCore.QRect(x, y + offSet, w, h - offSet)
bodyRectShadow = QtCore.QRect(x + 1, y + offSet, w + 1,
h - offSet + 1)
painter.drawRect(bodyRect)
pen.setColor(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.4)
painter.setPen(pen)
painter.drawRect(bodyRectShadow)
elif self._rolloutStyle == 1:
if self.isCollapsed():
arect = QtCore.QRect(x + 1, y + 9, w - 1, 4)
brect = QtCore.QRect(x, y + 8, w - 1, 4)
text = '+'
else:
arect = QtCore.QRect(x + 1, y + 9, w - 1, h - 9)
brect = QtCore.QRect(x, y + 8, w - 1, h - 9)
text = '-'
pen = QtGui.QPen(self.palette().color(QtGui.QPalette.Light))
pen.setWidthF(0.6)
painter.setPen(pen)
painter.drawRect(arect)
pen.setColor(self.palette().color(QtGui.QPalette.Shadow))
painter.setPen(pen)
painter.drawRect(brect)
painter.setRenderHint(painter.Antialiasing, False)
painter.setBrush(
self.palette().color(QtGui.QPalette.Window).darker(120))
painter.drawRect(x + 10, y + 1, w - 20, 16)
painter.drawText(x + 16, y + 1,
w - 32, 16,
QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,
text)
painter.drawText(x + 10, y + 1,
w - 20, 16,
QtCore.Qt.AlignCenter,
self.title())
if self.dragDropMode():
rect = self.dragDropRect()
l = rect.left()
r = rect.right()
cy = rect.center().y()
for y in (cy - 3, cy, cy + 3):
painter.drawLine(l, y, r, y)
painter.end()
def setCollapsed(self, state=True):
if self.isCollapsible():
accord = self.accordionWidget()
accord.setUpdatesEnabled(False)
self._collapsed = state
if state:
self.setMinimumHeight(22)
self.setMaximumHeight(22)
self.widget().setVisible(False)
else:
self.setMinimumHeight(0)
self.setMaximumHeight(1000000)
self.widget().setVisible(True)
self._accordianWidget.emitItemCollapsed(self)
accord.setUpdatesEnabled(True)
def setCollapsible(self, state=True):
self._collapsible = state
def setCustomData(self, key, value):
self._customData[str(key)] = value
def setDragDropMode(self, mode):
self._dragDropMode = mode
def setRolloutStyle(self, style):
self._rolloutStyle = style
def showMenu(self):
if QtCore.QRect(0, 0, self.width(), 20).contains(
self.mapFromGlobal(QtGui.QCursor.pos())):
self._accordianWidget.emitItemMenuRequested(self)
def rolloutStyle(self):
return self._rolloutStyle
def toggleCollapsed(self):
collapse_state = not self.isCollapsed()
self.setCollapsed(collapse_state)
return collapse_state
def widget(self):
return self._widget
class AccordionWidget(QtWidgets.QScrollArea):
itemCollapsed = QtCore.Signal(AccordionItem)
itemMenuRequested = QtCore.Signal(AccordionItem)
itemDragFailed = QtCore.Signal(AccordionItem)
itemsReordered = QtCore.Signal()
Boxed = 1
Rounded = 2
Square = 3
Maya = 4
NoDragDrop = 0
InternalMove = 1
def __init__(self, parent):
QtWidgets.QScrollArea.__init__(self, parent)
self.setFrameShape(QtWidgets.QScrollArea.NoFrame)
self.setAutoFillBackground(False)
self.setWidgetResizable(True)
self.setMouseTracking(True)
self.verticalScrollBar().setMaximumWidth(10)
widget = QtWidgets.QWidget(self)
self._rolloutStyle = AccordionWidget.Rounded
self._dragDropMode = AccordionWidget.NoDragDrop
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
self._itemClass = AccordionItem
layout = QtWidgets.QVBoxLayout()
layout.setContentsMargins(2, 2, 2, 6)
layout.setSpacing(2)
layout.addStretch(1)
widget.setLayout(layout)
self.setWidget(widget)
def setSpacing(self, spaceInt):
self.widget().layout().setSpacing(spaceInt)
def addItem(self, title, widget, collapsed=False):
self.setUpdatesEnabled(False)
item = self._itemClass(self, title, widget)
item.setRolloutStyle(self.rolloutStyle())
item.setDragDropMode(self.dragDropMode())
layout = self.widget().layout()
layout.insertWidget(layout.count() - 1, item)
layout.setStretchFactor(item, 0)
if collapsed:
item.setCollapsed(collapsed)
self.setUpdatesEnabled(True)
return item
def clear(self):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
while layout.count() > 1:
item = layout.itemAt(0)
w = item.widget()
layout.removeItem(item)
w.close()
w.deleteLater()
self.setUpdatesEnabled(True)
def eventFilter(self, object, event):
if event.type() == QtCore.QEvent.MouseButtonPress:
self.mousePressEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseMove:
self.mouseMoveEvent(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
self.mouseReleaseEvent(event)
return True
return False
def canScroll(self):
return self.verticalScrollBar().maximum() > 0
def count(self):
return self.widget().layout().count() - 1
def dragDropMode(self):
return self._dragDropMode
def indexOf(self, widget):
layout = self.widget().layout()
for index in range(layout.count()):
if layout.itemAt(index).widget().widget() == widget:
return index
return -1
def isBoxedMode(self):
return self._rolloutStyle == AccordionWidget.Maya
def itemClass(self):
return self._itemClass
def itemAt(self, index):
layout = self.widget().layout()
if 0 <= index and index < layout.count() - 1:
return layout.itemAt(index).widget()
return None
def emitItemCollapsed(self, item):
if not self.signalsBlocked():
self.itemCollapsed.emit(item)
def emitItemDragFailed(self, item):
if not self.signalsBlocked():
self.itemDragFailed.emit(item)
def emitItemMenuRequested(self, item):
if not self.signalsBlocked():
self.itemMenuRequested.emit(item)
def emitItemsReordered(self):
if not self.signalsBlocked():
self.itemsReordered.emit()
def enterEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.setOverrideCursor(QtCore.Qt.OpenHandCursor)
def leaveEvent(self, event):
if self.canScroll():
QtWidgets.QApplication.restoreOverrideCursor()
def mouseMoveEvent(self, event):
if self._scrolling:
sbar = self.verticalScrollBar()
smax = sbar.maximum()
dy = event.globalY() - self._scrollInitY
dval = smax * (dy / float(sbar.height()))
sbar.setValue(self._scrollInitVal - dval)
event.accept()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton and self.canScroll():
self._scrolling = True
self._scrollInitY = event.globalY()
self._scrollInitVal = self.verticalScrollBar().value()
QtWidgets.QApplication.setOverrideCursor(
QtCore.Qt.ClosedHandCursor)
event.accept()
def mouseReleaseEvent(self, event):
if self._scrolling:
QtWidgets.QApplication.restoreOverrideCursor()
self._scrolling = False
self._scrollInitY = 0
self._scrollInitVal = 0
event.accept()
def moveItemDown(self, index):
layout = self.widget().layout()
if (layout.count() - 1) > (index + 1):
widget = layout.takeAt(index).widget()
layout.insertWidget(index + 1, widget)
def moveItemUp(self, index):
if index > 0:
layout = self.widget().layout()
widget = layout.takeAt(index).widget()
layout.insertWidget(index - 1, widget)
def setBoxedMode(self, state):
if state:
self._rolloutStyle = AccordionWidget.Boxed
else:
self._rolloutStyle = AccordionWidget.Rounded
def setDragDropMode(self, dragDropMode):
self._dragDropMode = dragDropMode
for item in self.findChildren(AccordionItem):
item.setDragDropMode(self._dragDropMode)
def setItemClass(self, itemClass):
self._itemClass = itemClass
def setRolloutStyle(self, rolloutStyle):
self._rolloutStyle = rolloutStyle
for item in self.findChildren(AccordionItem):
item.setRolloutStyle(self._rolloutStyle)
def rolloutStyle(self):
return self._rolloutStyle
def takeAt(self, index):
self.setUpdatesEnabled(False)
layout = self.widget().layout()
widget = None
if 0 <= index and index < layout.count() - 1:
item = layout.itemAt(index)
widget = item.widget()
layout.removeItem(item)
widget.close()
self.setUpdatesEnabled(True)
return widget
def widgetAt(self, index):
item = self.itemAt(index)
if item:
return item.widget()
return None
pyBoxedMode = QtCore.Property('bool', isBoxedMode, setBoxedMode)
| true | true |
f721842d767265f7f548ee0d34b73c892bd60f1b | 183 | py | Python | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 9 | 2019-12-06T13:12:33.000Z | 2021-10-05T12:47:15.000Z | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 2 | 2019-11-28T17:20:27.000Z | 2019-12-09T18:44:35.000Z | pystrometry/example_subpkg/setup_package.py | Johannes-Sahlmann/pystrometry | 79dc67369be2ce46ddb0ebc73e5fe3570d20c025 | [
"BSD-3-Clause"
] | 3 | 2019-11-28T17:04:22.000Z | 2021-10-19T13:12:34.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import
def get_package_data():
return {'pystrometry.example_subpkg': ['data/*']}
| 26.142857 | 63 | 0.754098 |
from __future__ import absolute_import
def get_package_data():
return {'pystrometry.example_subpkg': ['data/*']}
| true | true |
f721854d6db9efb92a7df07e88cf428c0d746223 | 3,699 | py | Python | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | muti/glmu.py | invertedv/utilities | 42c331893b1beee73b2d21df6cb2bad73b872bb7 | [
"MIT"
] | null | null | null | from muti import genu
import clickhouse_driver
import pandas as pd
from modeling.glm import glm
import numpy as np
import math
def build_model_formula(features_dict: dict, target: str):
"""
Builds the model formula for glm from modeling based on the features_dict specification.
Does not included embedded features
:param features_dict: features dictionary
:param target: dependent variable
:return: model formula
:rtype str
"""
ms = target + '~'
extra = ''
for feature in features_dict:
if features_dict[feature][0] == 'cts':
ms += extra + feature
elif features_dict[feature][0] == 'spl':
ms += extra + 'h(' + feature + ',' + features_dict[feature][1] + ',0)'
elif features_dict[feature][0] == 'cat':
ms += extra + 'c(' + feature + ',' + features_dict[feature][2] + ')'
extra = ' + '
return ms
def incr_build(model: str, target_var: str, start_list: list, add_list: list, get_data_fn, sample_size: int,
client: clickhouse_driver.Client, global_valid_df_in: pd.DataFrame, family='normal'):
"""
This function builds a sequence of GLM models. The get_data_fn takes a list of values as contained in
start_list and add_list and returns data subset to those values. The initial model is built on the
values of start_list and then evaluated on the data subset to the first value of add_list.
At the next step, the data in the first element of add_list is added to the start_list data, the model
is updated and the evaluation is conducted on the second element of add_list.
This function is the GLM counterpart to incr_build
:param model: model specification for glm
:param target_var: response variable we're modeling
:param start_list: list of (general) time periods for model build for the first model build
:param add_list: list of out-of-time periods to evaluate
:param get_data_fn: function to get a pandas DataFrame of data to work on
:param sample_size: size of pandas DataFrames to get
:param client: db connector
:param family: family of the model ('normal' or 'binomial')
:param global_valid_df_in: pandas DataFrame covering all the values of add_list for validation
:return: lists of out-of-sample values:
add_list
rmse root mean squared error
corr correlation
"""
build_list = start_list
global_valid_df = global_valid_df_in.copy()
global_valid_df['model_glm_inc'] = np.full((global_valid_df.shape[0]), 0.0)
rmse_valid = []
corr_valid = []
segs = []
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, client)
valid_df = get_data_fn([valid], sample_size, client)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
# print('Build list: {0}'.format(build_list))
glm_model = glm(model, model_df, family=family)
build_list += [valid]
gyh = glm_model.predict(global_valid_df)
i = global_valid_df['vintage'] == valid
global_valid_df.loc[i, 'model_glm_inc'] = gyh[i]
yh = glm_model.predict(valid_df)
res = valid_df[target_var] - np.array(yh).flatten()
rmse_valid += [math.sqrt(np.square(res).mean())]
valid_df['yh'] = yh
cor = genu.r_square(valid_df['yh'], valid_df[target_var])
corr_valid += [cor]
return segs, rmse_valid, corr_valid, global_valid_df
| 41.561798 | 112 | 0.651798 | from muti import genu
import clickhouse_driver
import pandas as pd
from modeling.glm import glm
import numpy as np
import math
def build_model_formula(features_dict: dict, target: str):
ms = target + '~'
extra = ''
for feature in features_dict:
if features_dict[feature][0] == 'cts':
ms += extra + feature
elif features_dict[feature][0] == 'spl':
ms += extra + 'h(' + feature + ',' + features_dict[feature][1] + ',0)'
elif features_dict[feature][0] == 'cat':
ms += extra + 'c(' + feature + ',' + features_dict[feature][2] + ')'
extra = ' + '
return ms
def incr_build(model: str, target_var: str, start_list: list, add_list: list, get_data_fn, sample_size: int,
client: clickhouse_driver.Client, global_valid_df_in: pd.DataFrame, family='normal'):
build_list = start_list
global_valid_df = global_valid_df_in.copy()
global_valid_df['model_glm_inc'] = np.full((global_valid_df.shape[0]), 0.0)
rmse_valid = []
corr_valid = []
segs = []
for j, valid in enumerate(add_list):
segs += [valid]
model_df = get_data_fn(build_list, sample_size, client)
valid_df = get_data_fn([valid], sample_size, client)
print('Data sizes for out-of-sample value {0}: build {1}, validate {2}'.format(valid, model_df.shape[0],
valid_df.shape[0]))
glm_model = glm(model, model_df, family=family)
build_list += [valid]
gyh = glm_model.predict(global_valid_df)
i = global_valid_df['vintage'] == valid
global_valid_df.loc[i, 'model_glm_inc'] = gyh[i]
yh = glm_model.predict(valid_df)
res = valid_df[target_var] - np.array(yh).flatten()
rmse_valid += [math.sqrt(np.square(res).mean())]
valid_df['yh'] = yh
cor = genu.r_square(valid_df['yh'], valid_df[target_var])
corr_valid += [cor]
return segs, rmse_valid, corr_valid, global_valid_df
| true | true |
f7218599cb5a20deb178638895ef1d333f863936 | 4,015 | py | Python | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | scripts/fastRequests.py | Hitoshirenu/muchspace | e3db813b148941d6caf6e3b13e82c0fc48f454bf | [
"MIT"
] | null | null | null | # import threading
from pathlib import Path
from multiprocessing.dummy import Pool as ThreadPool
from more_itertools import unique_everseen
import requests, json, datetime
from scripts.byteSize import human_byte_size
# Initialization
Total_Size = 0
Processed_URLs = 0
Progress = 0
Total_URLs = 0
Rate = 0
Report = False
ReportJson = []
""" Main fuction to gather info about URL """
def url_info(URL):
linkStatus = {}
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
if URL not in [' ','']: # Ignoring any whitespaces within the list
try:
File_Size = 0 # Initialize
fileLink = requests.head(URL, stream=True) # Get the link header info
fileLink.raise_for_status() # To catch 404 and 500 earlier
# Why i use get instead of head, Source: https://stackoverflow.com/questions/14270698/get-file-size-using-python-requests-while-only-getting-the-header
HEAD = requests.get(URL, stream=True).headers # Invoked if 400 series
File_Size = int(HEAD['Content-length']) # Get only the headers not the entire content
Progress += Rate
Processed_URLs = Processed_URLs + 1
Total_Size += File_Size
print('URLs Done:{0}/{1} File Size:{2} Total Size:{3} Progress:{4:.2f}%'.format(Processed_URLs, Total_URLs, human_byte_size(File_Size), human_byte_size(Total_Size), Progress))
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("Oops: Something Else",err)
if Report is True:
linkStatus['link'] = URL
linkStatus['size'] = human_byte_size(File_Size)
linkStatus['status'] = fileLink.status_code
linkStatus['last-checked'] = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
ReportJson.append(linkStatus)
def thread_series_creator(List_Of_URLs):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
# Make the Pool of workers
pool = ThreadPool(100)
# Open the urls in their own threads and return the results
results = pool.map(url_info, List_Of_URLs)
# close the pool and wait for the work to finish
pool.close()
pool.join()
def main(file_path, report=False):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
# If exist check if it is a file
file_of_links = Path(file_path)
if file_of_links.is_file():
try:
# Preprocessing
with open(file_of_links,'r') as f: # Loading URLs into list for faster access
List_of_URLs = list(unique_everseen(f.read().splitlines())) # Removing duplicates without changing order
Total_URLs = len(List_of_URLs) # Total number of links
Rate = 100/Total_URLs # Calculate each link percentage
except IOError:
print("IO Error : Unable to read from file")
print("Exiting...")
return
else:
print("Error! Invalid file path!")
print("Exiting...")
return
Report = report
thread_series_creator(List_of_URLs)
if Report is True: # Creating report
Date = datetime.date.today().strftime('%d.%b.%Y')
with open("muchspace.Report."+Date+".json", "w") as write_file:
json.dump(ReportJson, write_file, indent=4)
# Final Console Report
print("******Final Diagnostic Report******")
print("Total URLs: {0} Processed URLs: {1} Rate of completion: {2:.2f}%".format(Total_URLs, Processed_URLs, Progress))
print("Total size of {}/{} links is: {}".format(Processed_URLs, Total_URLs, human_byte_size(Total_Size)))
| 43.641304 | 187 | 0.646077 |
from pathlib import Path
from multiprocessing.dummy import Pool as ThreadPool
from more_itertools import unique_everseen
import requests, json, datetime
from scripts.byteSize import human_byte_size
Total_Size = 0
Processed_URLs = 0
Progress = 0
Total_URLs = 0
Rate = 0
Report = False
ReportJson = []
def url_info(URL):
linkStatus = {}
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
if URL not in [' ','']:
try:
File_Size = 0
fileLink = requests.head(URL, stream=True)
fileLink.raise_for_status()
HEAD = requests.get(URL, stream=True).headers
File_Size = int(HEAD['Content-length'])
Progress += Rate
Processed_URLs = Processed_URLs + 1
Total_Size += File_Size
print('URLs Done:{0}/{1} File Size:{2} Total Size:{3} Progress:{4:.2f}%'.format(Processed_URLs, Total_URLs, human_byte_size(File_Size), human_byte_size(Total_Size), Progress))
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("Oops: Something Else",err)
if Report is True:
linkStatus['link'] = URL
linkStatus['size'] = human_byte_size(File_Size)
linkStatus['status'] = fileLink.status_code
linkStatus['last-checked'] = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
ReportJson.append(linkStatus)
def thread_series_creator(List_Of_URLs):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
pool = ThreadPool(100)
results = pool.map(url_info, List_Of_URLs)
pool.close()
pool.join()
def main(file_path, report=False):
global Total_Size, Processed_URLs, Progress, Total_URLs, Rate, Report
file_of_links = Path(file_path)
if file_of_links.is_file():
try:
with open(file_of_links,'r') as f:
List_of_URLs = list(unique_everseen(f.read().splitlines()))
Total_URLs = len(List_of_URLs)
Rate = 100/Total_URLs
except IOError:
print("IO Error : Unable to read from file")
print("Exiting...")
return
else:
print("Error! Invalid file path!")
print("Exiting...")
return
Report = report
thread_series_creator(List_of_URLs)
if Report is True:
Date = datetime.date.today().strftime('%d.%b.%Y')
with open("muchspace.Report."+Date+".json", "w") as write_file:
json.dump(ReportJson, write_file, indent=4)
print("******Final Diagnostic Report******")
print("Total URLs: {0} Processed URLs: {1} Rate of completion: {2:.2f}%".format(Total_URLs, Processed_URLs, Progress))
print("Total size of {}/{} links is: {}".format(Processed_URLs, Total_URLs, human_byte_size(Total_Size)))
| true | true |
f72186852716593e8409116793bd82e2b2526084 | 2,714 | py | Python | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | src/pipelines/epidemiology/nl_authority.py | nelhage/data | 50a1ab91b786c9f89a8ff6ff10ea57ea5335490d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class NetherlandsDataSource(DataSource):
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
# Rename the appropriate columns
data = dataframes[0].rename(
columns={
"Date_of_report": "date",
"Municipality_code": "subregion2_code",
"Municipality_name": "subregion2_name",
"Province": "subregion1_name",
"Total_reported": "confirmed",
"Hospital_admission": "hospitalized",
"Deceased": "deceased",
}
)
# Drop data without a clear demarcation
data = data[~data.subregion1_name.isna()]
data = data[~data.subregion2_code.isna()]
data = data[~data.subregion2_name.isna()]
# Get date in ISO format
data.date = data.date.apply(lambda x: datetime.fromisoformat(x).date().isoformat())
# Make sure the region code is zero-padded and without prefix
data["subregion2_code"] = data["subregion2_code"].apply(lambda x: x[2:])
data = data.drop(columns=["subregion1_name", "subregion2_name"])
data = data.merge(aux["metadata"], on="subregion2_code")
# We only need to keep key-date pair for identification
data = data[["date", "key", "confirmed", "deceased", "hospitalized"]]
# Compute the daily counts
data = grouped_diff(data, ["key", "date"])
# Group by level 2 region, and add the parts
l2 = data.copy()
l2["key"] = l2.key.apply(lambda x: x[:5])
l2 = l2.groupby(["key", "date"]).sum().reset_index()
# Group by country level, and add the parts
l1 = l2.copy().drop(columns=["key"])
l1 = l1.groupby("date").sum().reset_index()
l1["key"] = "NL"
# Output the results
return concat([l1, l2, data])
| 37.178082 | 91 | 0.637804 |
from datetime import datetime
from typing import Any, Dict, List
from pandas import DataFrame, concat, merge
from lib.pipeline import DataSource
from lib.time import datetime_isoformat
from lib.utils import grouped_diff
class NetherlandsDataSource(DataSource):
def parse_dataframes(
self, dataframes: List[DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = dataframes[0].rename(
columns={
"Date_of_report": "date",
"Municipality_code": "subregion2_code",
"Municipality_name": "subregion2_name",
"Province": "subregion1_name",
"Total_reported": "confirmed",
"Hospital_admission": "hospitalized",
"Deceased": "deceased",
}
)
data = data[~data.subregion1_name.isna()]
data = data[~data.subregion2_code.isna()]
data = data[~data.subregion2_name.isna()]
data.date = data.date.apply(lambda x: datetime.fromisoformat(x).date().isoformat())
data["subregion2_code"] = data["subregion2_code"].apply(lambda x: x[2:])
data = data.drop(columns=["subregion1_name", "subregion2_name"])
data = data.merge(aux["metadata"], on="subregion2_code")
data = data[["date", "key", "confirmed", "deceased", "hospitalized"]]
data = grouped_diff(data, ["key", "date"])
l2 = data.copy()
l2["key"] = l2.key.apply(lambda x: x[:5])
l2 = l2.groupby(["key", "date"]).sum().reset_index()
l1 = l2.copy().drop(columns=["key"])
l1 = l1.groupby("date").sum().reset_index()
l1["key"] = "NL"
return concat([l1, l2, data])
| true | true |
f72187bfd6178c0257c0f81666097723e96f4c4d | 21,206 | py | Python | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | tests/controller_test.py | elmopl/homekit_python | bb2b07e66fce3c3034b012ef679695a3da77f787 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2018 Joachim Lusiardi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import tempfile
import threading
import time
from homekit import Controller
from homekit import AccessoryServer
from homekit.exceptions import AccessoryNotFoundError, AlreadyPairedError, UnavailableError, FormatError, \
ConfigLoadingError, ConfigSavingError, MalformedPinError
from homekit.model import Accessory
from homekit.model.services import LightBulbService
from homekit.model import mixin as model_mixin
from homekit.tools import BLE_TRANSPORT_SUPPORTED, IP_TRANSPORT_SUPPORTED
if BLE_TRANSPORT_SUPPORTED:
from homekit.controller.ble_impl import BlePairing
if IP_TRANSPORT_SUPPORTED:
from homekit.controller.ip_implementation import IpPairing
class T(threading.Thread):
def __init__(self, accessoryServer):
threading.Thread.__init__(self)
self.a_s = accessoryServer
def run(self):
self.a_s.publish_device()
self.a_s.serve_forever()
value = 0
identify = 0
def identify_callback():
global identify
identify = 1
def set_value(new_value):
global value
value = new_value
class TestControllerIpUnpaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
# prepare config file for unpaired accessory server
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0B",
"accessory_pin": "010-22-020",
"c#": 0,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 54321,
"name": "unittestLight",
"peers": {
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
# Make sure get_id() numbers are stable between tests
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(10)
cls.controller_file = tempfile.NamedTemporaryFile()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self.controller_file = tempfile.NamedTemporaryFile()
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def test_01_1_discover(self):
"""Try to discover the test accessory"""
result = self.controller.discover()
found = False
for device in result:
if '12:34:56:00:01:0B' == device['id']:
found = True
self.assertTrue(found)
def test_01_2_unpaired_identify(self):
"""Try to trigger the identification of the test accessory"""
global identify
self.controller.identify('12:34:56:00:01:0B')
self.assertEqual(1, identify)
identify = 0
def test_01_3_unpaired_identify_not_found(self):
"""Try to identify a non existing accessory. This should result in AccessoryNotFoundError"""
self.assertRaises(AccessoryNotFoundError, self.controller.identify, '12:34:56:00:01:0C')
def test_02_pair(self):
"""Try to pair the test accessory"""
self.controller.perform_pairing('alias', '12:34:56:00:01:0B', '010-22-020')
pairings = self.controller.get_pairings()
self.controller.save_data(self.controller_file.name)
self.assertIn('alias', pairings)
def test_02_pair_accessory_not_found(self):
""""""
self.assertRaises(AccessoryNotFoundError, self.controller.perform_pairing, 'alias1', '12:34:56:00:01:1B',
'010-22-020')
def test_02_pair_wrong_pin(self):
""""""
self.assertRaises(UnavailableError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'010-22-021')
def test_02_pair_malformed_pin(self):
""""""
self.assertRaises(MalformedPinError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'01022021')
class TestControllerIpPaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0A",
"accessory_pin": "031-45-154",
"c#": 1,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 51842,
"name": "unittestLight",
"peers": {
"decc6fa3-de3e-41c9-adba-ef7409821bfc": {
"admin": true,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
},
"ABCDEFfa3-de3e-41c9-adba-ef7409821bfc": {
"admin": false,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
}
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
# Make sure get_id() numbers are stable between tests
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(5)
cls.controller_file = tempfile.NamedTemporaryFile()
cls.controller_file.write("""{
"alias": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
cls.controller_file.flush()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def tearDown(self):
self.controller.shutdown()
def test_01_1_discover(self):
result = self.controller.discover(5)
found = None
for device in result:
if '12:34:56:00:01:0A' == device['id']:
found = device
self.assertIsNotNone(found)
def test_02_pair_alias_exists(self):
"""Try to pair the test accessory"""
self.controller.load_data(self.controller_file.name)
self.assertRaises(AlreadyPairedError, self.controller.perform_pairing, 'alias', '12:34:56:00:01:0B',
'010-22-020')
def test_02_paired_identify_wrong_method(self):
"""Try to identify an already paired accessory via the controller's method for unpaired accessories."""
self.assertRaises(AlreadyPairedError, self.controller.identify, '12:34:56:00:01:0A')
def test_03_get_accessories(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.list_accessories_and_characteristics()
for characteristic in result[0]['services'][0]['characteristics']:
if characteristic['format'] == 'bool':
self.assertNotIn('maxDataLen', characteristic)
self.assertNotIn('maxLen', characteristic)
self.assertEqual(1, len(result))
result = result[0]
self.assertIn('aid', result)
self.assertIn('services', result)
def test_04_1_get_characteristic(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertEqual(['value'], list(result[(1, 4)].keys()))
def test_04_2_get_characteristics(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4), (1, 10)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn((1, 10), result)
self.assertIn('value', result[(1, 10)])
self.assertEqual(False, result[(1, 10)]['value'])
def test_04_3_get_characteristic_with_events(self):
"""This tests the include_events flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_events=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('ev', result[(1, 4)])
def test_04_4_get_characteristic_with_type(self):
"""This tests the include_type flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_type=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('type', result[(1, 4)])
self.assertEqual('20', result[(1, 4)]['type'])
def test_04_5_get_characteristic_with_perms(self):
"""This tests the include_perms flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_perms=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('perms', result[(1, 4)])
self.assertEqual(['pr'], result[(1, 4)]['perms'])
result = pairing.get_characteristics([(1, 3)], include_perms=True)
self.assertEqual(['pw'], result[(1, 3)]['perms'])
def test_04_4_get_characteristic_with_meta(self):
"""This tests the include_meta flag on get_characteristics"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_meta=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('format', result[(1, 4)])
self.assertEqual('string', result[(1, 4)]['format'])
self.assertIn('maxLen', result[(1, 4)])
self.assertEqual(64, result[(1, 4)]['maxLen'])
def test_05_1_put_characteristic(self):
""""""
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')])
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')])
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion(self):
""""""
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion_wrong_value(self):
"""Tests that values that are not convertible to boolean cause a HomeKitTypeException"""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
self.assertRaises(FormatError, pairing.put_characteristics, [(1, 10, 'Hallo Welt')], do_conversion=True)
def test_06_list_pairings(self):
"""Gets the listing of registered controllers of the device. Count must be 1."""
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
results = pairing.list_pairings()
self.assertEqual(2, len(results))
result = results[0]
self.assertIn('pairingId', result)
self.assertEqual('ABCDEFfa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertIn('controllerType', result)
self.assertEqual(result['controllerType'], 'regular')
self.assertIn('publicKey', result)
self.assertIn('permissions', result)
self.assertEqual(result['permissions'], 0)
self.assertIn('pairingId', result)
result = results[1]
self.assertEqual('decc6fa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertEqual(result['controllerType'], 'admin')
self.assertEqual(result['permissions'], 1)
def test_07_paired_identify(self):
"""Tests the paired variant of the identify method."""
global identify
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.identify()
self.assertTrue(result)
self.assertEqual(1, identify)
identify = 0
def test_99_remove_pairing(self):
"""Tests that a removed pairing is not present in the list of pairings anymore."""
self.controller.load_data(self.controller_file.name)
self.controller.remove_pairing('alias')
pairings = self.controller.get_pairings()
self.assertNotIn('alias', pairings)
class TestController(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.controller = Controller()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_both_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertEqual(self.controller.get_pairings()['alias_ip'].pairing_data['Connection'], 'IP')
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_missing_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
def test_load_pairings_unknown_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertEqual(0, len(self.controller.get_pairings()))
controller_file.close()
def test_load_pairings_invalid_json(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN",
}
}""".encode())
controller_file.flush()
self.assertRaises(ConfigLoadingError, self.controller.load_data, controller_file.name)
controller_file.close()
def test_load_pairings_missing_file(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, 'test')
def test_load_pairings_permissions(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, '/etc/shadow')
def test_save_pairings_permissions(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/root/shadow')
def test_save_pairings_missing_file(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/tmp/shadow/foo')
| 42.927126 | 113 | 0.650712 |
import unittest
import tempfile
import threading
import time
from homekit import Controller
from homekit import AccessoryServer
from homekit.exceptions import AccessoryNotFoundError, AlreadyPairedError, UnavailableError, FormatError, \
ConfigLoadingError, ConfigSavingError, MalformedPinError
from homekit.model import Accessory
from homekit.model.services import LightBulbService
from homekit.model import mixin as model_mixin
from homekit.tools import BLE_TRANSPORT_SUPPORTED, IP_TRANSPORT_SUPPORTED
if BLE_TRANSPORT_SUPPORTED:
from homekit.controller.ble_impl import BlePairing
if IP_TRANSPORT_SUPPORTED:
from homekit.controller.ip_implementation import IpPairing
class T(threading.Thread):
def __init__(self, accessoryServer):
threading.Thread.__init__(self)
self.a_s = accessoryServer
def run(self):
self.a_s.publish_device()
self.a_s.serve_forever()
value = 0
identify = 0
def identify_callback():
global identify
identify = 1
def set_value(new_value):
global value
value = new_value
class TestControllerIpUnpaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0B",
"accessory_pin": "010-22-020",
"c#": 0,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 54321,
"name": "unittestLight",
"peers": {
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(10)
cls.controller_file = tempfile.NamedTemporaryFile()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
self.controller_file = tempfile.NamedTemporaryFile()
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def test_01_1_discover(self):
result = self.controller.discover()
found = False
for device in result:
if '12:34:56:00:01:0B' == device['id']:
found = True
self.assertTrue(found)
def test_01_2_unpaired_identify(self):
global identify
self.controller.identify('12:34:56:00:01:0B')
self.assertEqual(1, identify)
identify = 0
def test_01_3_unpaired_identify_not_found(self):
self.assertRaises(AccessoryNotFoundError, self.controller.identify, '12:34:56:00:01:0C')
def test_02_pair(self):
self.controller.perform_pairing('alias', '12:34:56:00:01:0B', '010-22-020')
pairings = self.controller.get_pairings()
self.controller.save_data(self.controller_file.name)
self.assertIn('alias', pairings)
def test_02_pair_accessory_not_found(self):
self.assertRaises(AccessoryNotFoundError, self.controller.perform_pairing, 'alias1', '12:34:56:00:01:1B',
'010-22-020')
def test_02_pair_wrong_pin(self):
self.assertRaises(UnavailableError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'010-22-021')
def test_02_pair_malformed_pin(self):
self.assertRaises(MalformedPinError, self.controller.perform_pairing, 'alias2', '12:34:56:00:01:0B',
'01022021')
class TestControllerIpPaired(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.config_file = tempfile.NamedTemporaryFile()
cls.config_file.write("""{
"accessory_ltpk": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"accessory_ltsk": "3d99f3e959a1f93af4056966f858074b2a1fdec1c5fd84a51ea96f9fa004156a",
"accessory_pairing_id": "12:34:56:00:01:0A",
"accessory_pin": "031-45-154",
"c#": 1,
"category": "Lightbulb",
"host_ip": "127.0.0.1",
"host_port": 51842,
"name": "unittestLight",
"peers": {
"decc6fa3-de3e-41c9-adba-ef7409821bfc": {
"admin": true,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
},
"ABCDEFfa3-de3e-41c9-adba-ef7409821bfc": {
"admin": false,
"key": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8"
}
},
"unsuccessful_tries": 0
}""".encode())
cls.config_file.flush()
model_mixin.id_counter = 0
cls.httpd = AccessoryServer(cls.config_file.name, None)
cls.httpd.set_identify_callback(identify_callback)
accessory = Accessory('Testlicht', 'lusiardi.de', 'Demoserver', '0001', '0.1')
accessory.set_identify_callback(identify_callback)
lightBulbService = LightBulbService()
lightBulbService.set_on_set_callback(set_value)
accessory.services.append(lightBulbService)
cls.httpd.add_accessory(accessory)
t = T(cls.httpd)
t.start()
time.sleep(5)
cls.controller_file = tempfile.NamedTemporaryFile()
cls.controller_file.write("""{
"alias": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
cls.controller_file.flush()
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
@classmethod
def tearDownClass(cls):
cls.httpd.unpublish_device()
cls.httpd.shutdown()
cls.config_file.close()
def setUp(self):
self.controller = Controller()
def tearDown(self):
self.controller.shutdown()
def test_01_1_discover(self):
result = self.controller.discover(5)
found = None
for device in result:
if '12:34:56:00:01:0A' == device['id']:
found = device
self.assertIsNotNone(found)
def test_02_pair_alias_exists(self):
self.controller.load_data(self.controller_file.name)
self.assertRaises(AlreadyPairedError, self.controller.perform_pairing, 'alias', '12:34:56:00:01:0B',
'010-22-020')
def test_02_paired_identify_wrong_method(self):
self.assertRaises(AlreadyPairedError, self.controller.identify, '12:34:56:00:01:0A')
def test_03_get_accessories(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.list_accessories_and_characteristics()
for characteristic in result[0]['services'][0]['characteristics']:
if characteristic['format'] == 'bool':
self.assertNotIn('maxDataLen', characteristic)
self.assertNotIn('maxLen', characteristic)
self.assertEqual(1, len(result))
result = result[0]
self.assertIn('aid', result)
self.assertIn('services', result)
def test_04_1_get_characteristic(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertEqual(['value'], list(result[(1, 4)].keys()))
def test_04_2_get_characteristics(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4), (1, 10)])
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn((1, 10), result)
self.assertIn('value', result[(1, 10)])
self.assertEqual(False, result[(1, 10)]['value'])
def test_04_3_get_characteristic_with_events(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_events=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('ev', result[(1, 4)])
def test_04_4_get_characteristic_with_type(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_type=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('type', result[(1, 4)])
self.assertEqual('20', result[(1, 4)]['type'])
def test_04_5_get_characteristic_with_perms(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_perms=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('perms', result[(1, 4)])
self.assertEqual(['pr'], result[(1, 4)]['perms'])
result = pairing.get_characteristics([(1, 3)], include_perms=True)
self.assertEqual(['pw'], result[(1, 3)]['perms'])
def test_04_4_get_characteristic_with_meta(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.get_characteristics([(1, 4)], include_meta=True)
self.assertIn((1, 4), result)
self.assertIn('value', result[(1, 4)])
self.assertEqual('lusiardi.de', result[(1, 4)]['value'])
self.assertIn('format', result[(1, 4)])
self.assertEqual('string', result[(1, 4)]['format'])
self.assertIn('maxLen', result[(1, 4)])
self.assertEqual(64, result[(1, 4)]['maxLen'])
def test_05_1_put_characteristic(self):
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')])
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')])
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion(self):
global value
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.put_characteristics([(1, 10, 'On')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(1, value)
result = pairing.put_characteristics([(1, 10, 'Off')], do_conversion=True)
self.assertEqual(result, {})
self.assertEqual(0, value)
def test_05_2_put_characteristic_do_conversion_wrong_value(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
self.assertRaises(FormatError, pairing.put_characteristics, [(1, 10, 'Hallo Welt')], do_conversion=True)
def test_06_list_pairings(self):
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
results = pairing.list_pairings()
self.assertEqual(2, len(results))
result = results[0]
self.assertIn('pairingId', result)
self.assertEqual('ABCDEFfa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertIn('controllerType', result)
self.assertEqual(result['controllerType'], 'regular')
self.assertIn('publicKey', result)
self.assertIn('permissions', result)
self.assertEqual(result['permissions'], 0)
self.assertIn('pairingId', result)
result = results[1]
self.assertEqual('decc6fa3-de3e-41c9-adba-ef7409821bfc', result['pairingId'])
self.assertEqual(result['controllerType'], 'admin')
self.assertEqual(result['permissions'], 1)
def test_07_paired_identify(self):
global identify
self.controller.load_data(self.controller_file.name)
pairing = self.controller.get_pairings()['alias']
result = pairing.identify()
self.assertTrue(result)
self.assertEqual(1, identify)
identify = 0
def test_99_remove_pairing(self):
self.controller.load_data(self.controller_file.name)
self.controller.remove_pairing('alias')
pairings = self.controller.get_pairings()
self.assertNotIn('alias', pairings)
class TestController(unittest.TestCase):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName)
def setUp(self):
self.controller = Controller()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_both_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"Connection": "IP",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertEqual(self.controller.get_pairings()['alias_ip'].pairing_data['Connection'], 'IP')
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
@unittest.skipIf(not BLE_TRANSPORT_SUPPORTED, 'BLE no supported')
def test_load_pairings_missing_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_ip": {
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryPort": 51842,
"AccessoryIP": "127.0.0.1",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
},
"alias_ble": {
"Connection": "BLE",
"iOSDeviceLTPK": "d708df2fbf4a8779669f0ccd43f4962d6d49e4274f88b1292f822edc3bcf8ed8",
"iOSPairingId": "decc6fa3-de3e-41c9-adba-ef7409821bfc",
"AccessoryLTPK": "7986cf939de8986f428744e36ed72d86189bea46b4dcdc8d9d79a3e4fceb92b9",
"AccessoryPairingID": "12:34:56:00:01:0A",
"AccessoryMAC": "FD:3C:D4:13:02:59",
"iOSDeviceLTSK": "fa45f082ef87efc6c8c8d043d74084a3ea923a2253e323a7eb9917b4090c2fcc"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertIsInstance(self.controller.get_pairings()['alias_ip'], IpPairing)
self.assertIsInstance(self.controller.get_pairings()['alias_ble'], BlePairing)
controller_file.close()
def test_load_pairings_unknown_type(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN"
}
}""".encode())
controller_file.flush()
self.controller.load_data(controller_file.name)
self.assertEqual(0, len(self.controller.get_pairings()))
controller_file.close()
def test_load_pairings_invalid_json(self):
controller_file = tempfile.NamedTemporaryFile()
controller_file.write("""{
"alias_unknown": {
"Connection": "UNKNOWN",
}
}""".encode())
controller_file.flush()
self.assertRaises(ConfigLoadingError, self.controller.load_data, controller_file.name)
controller_file.close()
def test_load_pairings_missing_file(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, 'test')
def test_load_pairings_permissions(self):
self.assertRaises(ConfigLoadingError, self.controller.load_data, '/etc/shadow')
def test_save_pairings_permissions(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/root/shadow')
def test_save_pairings_missing_file(self):
self.assertRaises(ConfigSavingError, self.controller.save_data, '/tmp/shadow/foo')
| true | true |
f721880fe59e59ce9a574f51f4ac11921a0ea939 | 4,792 | py | Python | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 31 | 2015-01-02T01:44:18.000Z | 2021-06-10T16:29:54.000Z | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 1 | 2015-04-08T07:54:50.000Z | 2015-04-09T14:29:38.000Z | zendesk/endpoints.py | optixx/zendesk | 7a4439f1c5b46913acad6b3153266d52f011c11e | [
"MIT"
] | 23 | 2015-01-12T23:42:34.000Z | 2021-09-08T11:20:12.000Z | """
API MAPPING
"""
mapping_table = {
# Rest API: Organizations
'list_organizations': {
'path': '/organizations.json',
'method': 'GET',
'status': 200,
},
'show_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'GET',
'status': 200,
},
'create_organization': {
'path': '/organizations.json',
'method': 'POST',
'status': 201,
},
'update_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Groups
'list_groups': {
'path': '/groups.json',
'method': 'GET',
'status': 200,
},
'show_group': {
'path': '/groups/{{group_id}}.json',
'method': 'GET',
'status': 200,
},
'create_group': {
'path': '/groups.json',
'method': 'POST',
'status': 201,
},
'update_group': {
'path': '/groups/{{group_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_group': {
'path': '/groups/{{group_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Tickets
'list_tickets': {
'path': '/rules/{{view_id}}.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'show_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'GET',
'status': 200,
},
'create_ticket': {
'path': '/tickets.json',
'method': 'POST',
'status': 201,
},
'update_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'comment_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'DELETE',
'status': 200,
},
# Rest API: Attachment
'create_attachment': {
'path': '/uploads.json',
'valid_params': ('filename', 'token'),
'method': 'POST',
'status': 201,
},
# Rest API: Users
'list_users': {
'path': '/users.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'search_users': {
'path': '/users.json',
'valid_params': ('query', 'role', 'page'),
'method': 'GET',
'status': 200,
},
'show_user': {
'path': '/users/{{user_id}}.json',
'method': 'GET',
'status': 200,
},
'create_user': {
'path': '/users.json',
'method': 'POST',
'status': 201,
},
'update_user': {
'path': '/users/{{user_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_user': {
'path': '/users/{{user_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_user_identities': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'GET',
'status': 200,
},
'add_user_email': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'add_twitter_handle': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'make_identity_primary': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}/make_primary',
'method': 'POST',
'status': 200,
},
'delete_identity': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}',
'method': 'DELETE',
'status': 200,
},
# Rest API: Tags
'list_tags': {
'path': '/tags.json',
'method': 'GET',
'status': 200,
},
'list_assets': {
'path': '/tags/{{tag_id}}.json',
'valid_params': ('asset_type', 'page'),
'method': 'GET',
'status': 200,
},
# Rest API: Ticket Fields
'list_ticket_fields': {
'path': '/ticket_fields.json',
'method': 'GET',
'status': 200,
},
# Rest API: Macros
'list_macros': {
'path': '/macros.json',
'method': 'GET',
'status': 200,
},
'evaluate_macro': {
'path': '/macros/{{macro_id}}/apply.json',
'valid_params': ('ticket_id', ),
'method': 'POST',
'status': 201,
},
# Rest API: Search
'search': {
'path': '/search.json',
'valid_params': ('query', 'page'),
'method': 'GET',
'status': 200,
},
}
| 24.701031 | 82 | 0.459098 |
mapping_table = {
'list_organizations': {
'path': '/organizations.json',
'method': 'GET',
'status': 200,
},
'show_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'GET',
'status': 200,
},
'create_organization': {
'path': '/organizations.json',
'method': 'POST',
'status': 201,
},
'update_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_organization': {
'path': '/organizations/{{organization_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_groups': {
'path': '/groups.json',
'method': 'GET',
'status': 200,
},
'show_group': {
'path': '/groups/{{group_id}}.json',
'method': 'GET',
'status': 200,
},
'create_group': {
'path': '/groups.json',
'method': 'POST',
'status': 201,
},
'update_group': {
'path': '/groups/{{group_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_group': {
'path': '/groups/{{group_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_tickets': {
'path': '/rules/{{view_id}}.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'show_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'GET',
'status': 200,
},
'create_ticket': {
'path': '/tickets.json',
'method': 'POST',
'status': 201,
},
'update_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'comment_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_ticket': {
'path': '/tickets/{{ticket_id}}.json',
'method': 'DELETE',
'status': 200,
},
'create_attachment': {
'path': '/uploads.json',
'valid_params': ('filename', 'token'),
'method': 'POST',
'status': 201,
},
'list_users': {
'path': '/users.json',
'valid_params': ('page', ),
'method': 'GET',
'status': 200,
},
'search_users': {
'path': '/users.json',
'valid_params': ('query', 'role', 'page'),
'method': 'GET',
'status': 200,
},
'show_user': {
'path': '/users/{{user_id}}.json',
'method': 'GET',
'status': 200,
},
'create_user': {
'path': '/users.json',
'method': 'POST',
'status': 201,
},
'update_user': {
'path': '/users/{{user_id}}.json',
'method': 'PUT',
'status': 200,
},
'delete_user': {
'path': '/users/{{user_id}}.json',
'method': 'DELETE',
'status': 200,
},
'list_user_identities': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'GET',
'status': 200,
},
'add_user_email': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'add_twitter_handle': {
'path': '/users/{{user_id}}/user_identities.json',
'method': 'POST',
'status': 201,
},
'make_identity_primary': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}/make_primary',
'method': 'POST',
'status': 200,
},
'delete_identity': {
'path': '/users/{{user_id}}/user_identities/{{identity_id}}',
'method': 'DELETE',
'status': 200,
},
'list_tags': {
'path': '/tags.json',
'method': 'GET',
'status': 200,
},
'list_assets': {
'path': '/tags/{{tag_id}}.json',
'valid_params': ('asset_type', 'page'),
'method': 'GET',
'status': 200,
},
'list_ticket_fields': {
'path': '/ticket_fields.json',
'method': 'GET',
'status': 200,
},
'list_macros': {
'path': '/macros.json',
'method': 'GET',
'status': 200,
},
'evaluate_macro': {
'path': '/macros/{{macro_id}}/apply.json',
'valid_params': ('ticket_id', ),
'method': 'POST',
'status': 201,
},
'search': {
'path': '/search.json',
'valid_params': ('query', 'page'),
'method': 'GET',
'status': 200,
},
}
| true | true |
f721881eea115b79515a4c824cdd061fe585c80c | 6,885 | py | Python | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | 1 | 2021-01-04T11:40:17.000Z | 2021-01-04T11:40:17.000Z | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | logging/tests/unit/handlers/test__helpers.py | rodrigodias27/google-cloud-python | 7d1161f70744c0dbbe67a3f472ea95667eaafe50 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
# webapp2 has not been ported to python3, so it will give a syntax
# error if we try. We'll just skip the webapp2 tests in that case.
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
| 30.197368 | 78 | 0.679448 |
import json
import unittest
import mock
import six
try:
from webapp2 import RequestHandler
except SyntaxError:
RequestHandler = object
class Test_get_trace_id_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_flask()
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route('/')
def index():
return 'test flask trace' # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(
path='/',
headers={}):
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
flask_trace_header = 'X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceidflask'
flask_trace_id = expected_trace_id + '/testspanid'
app = self.create_app()
context = app.test_request_context(
path='/',
headers={flask_trace_header: flask_trace_id})
with context:
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class _GetTraceId(RequestHandler):
def get(self):
from google.cloud.logging.handlers import _helpers
trace_id = _helpers.get_trace_id_from_webapp2()
self.response.content_type = 'application/json'
self.response.out.write(json.dumps(trace_id))
@unittest.skipIf(six.PY3, 'webapp2 is Python 2 only')
class Test_get_trace_id_from_webapp2(unittest.TestCase):
@staticmethod
def create_app():
import webapp2
app = webapp2.WSGIApplication([
('/', _GetTraceId),
])
return app
def test_no_context_header(self):
import webob
req = webob.BaseRequest.blank('/')
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(None, trace_id)
def test_valid_context_header(self):
import webob
webapp2_trace_header = 'X-Cloud-Trace-Context'
expected_trace_id = 'testtraceidwebapp2'
webapp2_trace_id = expected_trace_id + '/testspanid'
req = webob.BaseRequest.blank(
'/',
headers={webapp2_trace_header: webapp2_trace_id})
response = req.get_response(self.create_app())
trace_id = json.loads(response.body)
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id_from_django()
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_request = RequestFactory().get('/')
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertIsNone(trace_id)
def test_valid_context_header(self):
from django.test import RequestFactory
from google.cloud.logging.handlers.middleware import request
django_trace_header = 'HTTP_X_CLOUD_TRACE_CONTEXT'
expected_trace_id = 'testtraceiddjango'
django_trace_id = expected_trace_id + '/testspanid'
django_request = RequestFactory().get(
'/',
**{django_trace_header: django_trace_id})
middleware = request.RequestMiddleware()
middleware.process_request(django_request)
trace_id = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
class Test_get_trace_id(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging.handlers import _helpers
return _helpers.get_trace_id()
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_django',
return_value=django_return)
flask_patch = mock.patch(
'google.cloud.logging.handlers._helpers.get_trace_id_from_flask',
return_value=flask_return)
with django_patch as django_mock:
with flask_patch as flask_mock:
trace_id = self._call_fut()
return django_mock, flask_mock, trace_id
def test_from_django(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', None)
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_mock, flask_mock, trace_id = self._helper(
None, 'test-flask-trace-id')
self.assertEqual(trace_id, flask_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_mock, flask_mock, trace_id = self._helper(
'test-django-trace-id', 'test-flask-trace-id')
# Django wins.
self.assertEqual(trace_id, django_mock.return_value)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing(self):
django_mock, flask_mock, trace_id = self._helper(None, None)
self.assertIsNone(trace_id)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
| true | true |
f7218951799b74c37930bbca42f5a8dabc271ee3 | 8,665 | py | Python | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | pattoo/ingest/files.py | palisadoes/pattoo | 57bd3e82e49d51e3426b13ad53ed8326a735ce29 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Pattoo classes that manage various data."""
# Standard imports
import os
import time
# Import project libraries
from pattoo_shared import log, files, converter
from pattoo.configuration import ConfigIngester as Config
from pattoo.constants import PATTOO_API_AGENT_NAME, PATTOO_INGESTER_NAME
from .records import Records
class Cache():
"""Process ingest cache data."""
def __init__(self, batch_size=500, age=0):
"""Initialize the class.
Args:
batch_size: Number of files to read
age: Minimum age of files to be read per batch
Returns:
None
"""
# Get cache directory
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
self._batch_id = int(time.time() * 1000)
# Read data from cache. Stop if there is no data found.
self._data = files.read_json_files(
directory, die=False, age=age, count=batch_size)
# Save the number of files read
self.files = len(self._data)
def records(self):
"""Create PattooDBrecord objects from cache directory.
Args:
None
Returns:
result: List of list of PattooDBrecord objects grouped by agent_id
"""
# Initialize list of files that have been processed
_cache = {}
result = []
# Read data from files
for filepath, json_data in sorted(self._data):
# Get data from JSON file. Convert to rows of key-pairs
if bool(json_data) is True and isinstance(json_data, dict) is True:
pdbrs = converter.cache_to_keypairs(json_data)
if bool(pdbrs) is False:
log_message = ('''\
File {} has invalid data. It will not be processed'''.format(filepath))
log.log2info(20026, log_message)
continue
# Group data by agent_id
pattoo_agent_id = pdbrs[0].pattoo_agent_id
if pattoo_agent_id in _cache:
_cache[pattoo_agent_id].extend(pdbrs)
else:
_cache[pattoo_agent_id] = pdbrs
# Aggregate data
if bool(_cache) is True:
for _, item in sorted(_cache.items()):
result.append(item)
# Return
return result
def purge(self):
"""Purge cache files.
Args:
None
Returns:
None
"""
# Initialize key variables
filepaths = [filepath for filepath, _ in self._data]
# Delete cache files after processing
for filepath in filepaths:
if os.path.exists(filepath):
try:
os.remove(filepath)
except:
log_message = ('''\
Error deleting cache file {}.'''.format(filepath))
log.log2warning(20110, log_message)
def ingest(self):
"""Ingest cache data into the database.
Args:
None
Returns:
records: Number of records processed
"""
# Process
_data = self.records()
if bool(_data) is True:
# Log
log_message = ('''\
Processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20004, log_message)
# Add records to the database
_records = Records(_data)
_records.ingest()
self.purge()
# Log
log_message = ('''\
Finished processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20117, log_message)
# Determine the number of key pairs read
records = 0
for item in _data:
records += len(item)
return records
def process_cache(batch_size=500, max_duration=3600, fileage=10, script=False):
"""Ingest data.
Args:
batch_size: Number of files to process at a time
max_duration: Maximum duration
fileage: Minimum age of files to be processed in seconds
Returns:
success: True if successful
Method:
1) Read the files in the cache directory older than a threshold
2) Process the data in the files
3) Repeat, if new files are found that are older than the threshold,
or we have been running too long.
Batches of files are read to reduce the risk of overloading available
memory, and ensure we can exit if we are running too long.
"""
# Initialize key variables
records = 0
start = time.time()
looptime = 0
files_read = 0
success = True
# Get cache directory
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
# Log what we are doing
log_message = 'Processing ingest cache.'
log.log2info(20085, log_message)
# Get the number of files in the directory
files_found = len(
[_ for _ in os.listdir(directory) if _.endswith('.json')])
# Create lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock()
if bool(success) is False:
return bool(success)
# Process the files in batches to reduce the database connection count
# This can cause errors
while True:
# Agents constantly update files. We don't want an infinite loop
# situation where we always have files available that are newer than
# the desired fileage.
loopstart = time.time()
fileage = fileage + looptime
# Automatically stop if we are going on too long.(1 of 2)
duration = loopstart - start
if duration > max_duration:
log_message = ('''\
Stopping ingester after exceeding the maximum runtime duration of {}s. \
This can be adjusted on the CLI.'''.format(max_duration))
log.log2info(20022, log_message)
break
# Automatically stop if we are going on too long.(2 of 2)
if files_read >= files_found:
# No need to log. This is an expected outcome.
break
# Read data from cache. Stop if there is no data found.
cache = Cache(batch_size=batch_size, age=fileage)
count = cache.ingest()
# Automatically stop if we are going on too long.(2 of 2)
if bool(cache.files) is False:
# No need to log. This is an expected outcome.
break
# Get the records processed, looptime and files read
records += count
files_read += cache.files
looptime = max(time.time() - loopstart, looptime)
# Print result
duration = time.time() - start
if bool(records) is True and bool(duration) is True:
log_message = ('''\
Agent cache ingest completed. {0} records processed in {1:.2f} seconds, \
{2:.2f} records / second. {3} files read. \
'''.format(records, duration, records / duration, files_read))
log.log2info(20084, log_message)
else:
log_message = 'No files found to ingest'
log.log2info(20021, log_message)
# Delete lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock(delete=True)
# Log what we are doing
log_message = 'Finished processing ingest cache.'
log.log2info(20020, log_message)
return bool(success)
def _lock(delete=False):
"""Create a lock file.
Args:
delete: Delete the file if true
Returns:
None
"""
# Initialize key variables
config = Config()
lockfile = files.lock_file(PATTOO_INGESTER_NAME, config)
success = False
# Lock
if bool(delete) is False:
if os.path.exists(lockfile) is True:
log_message = ('''\
Lockfile {} exists. Will not start ingester script. Is another Ingester \
instance running? If not, delete the lockfile and rerun this script.\
'''.format(lockfile))
log.log2warning(20023, log_message)
else:
open(lockfile, 'a').close()
success = True
else:
if os.path.exists(lockfile) is True:
try:
os.remove(lockfile)
success = True
except:
log_message = ('Error deleting lockfile {}.'.format(lockfile))
log.log2warning(20107, log_message)
else:
log_message = ('Lockfile {} not found.'.format(lockfile))
log.log2warning(20108, log_message)
return success
| 30.191638 | 79 | 0.599308 |
import os
import time
from pattoo_shared import log, files, converter
from pattoo.configuration import ConfigIngester as Config
from pattoo.constants import PATTOO_API_AGENT_NAME, PATTOO_INGESTER_NAME
from .records import Records
class Cache():
def __init__(self, batch_size=500, age=0):
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
self._batch_id = int(time.time() * 1000)
self._data = files.read_json_files(
directory, die=False, age=age, count=batch_size)
self.files = len(self._data)
def records(self):
_cache = {}
result = []
for filepath, json_data in sorted(self._data):
if bool(json_data) is True and isinstance(json_data, dict) is True:
pdbrs = converter.cache_to_keypairs(json_data)
if bool(pdbrs) is False:
log_message = ('''\
File {} has invalid data. It will not be processed'''.format(filepath))
log.log2info(20026, log_message)
continue
pattoo_agent_id = pdbrs[0].pattoo_agent_id
if pattoo_agent_id in _cache:
_cache[pattoo_agent_id].extend(pdbrs)
else:
_cache[pattoo_agent_id] = pdbrs
if bool(_cache) is True:
for _, item in sorted(_cache.items()):
result.append(item)
return result
def purge(self):
filepaths = [filepath for filepath, _ in self._data]
for filepath in filepaths:
if os.path.exists(filepath):
try:
os.remove(filepath)
except:
log_message = ('''\
Error deleting cache file {}.'''.format(filepath))
log.log2warning(20110, log_message)
def ingest(self):
_data = self.records()
if bool(_data) is True:
log_message = ('''\
Processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20004, log_message)
_records = Records(_data)
_records.ingest()
self.purge()
log_message = ('''\
Finished processing ingest cache files. Batch ID: {}'''.format(self._batch_id))
log.log2debug(20117, log_message)
records = 0
for item in _data:
records += len(item)
return records
def process_cache(batch_size=500, max_duration=3600, fileage=10, script=False):
records = 0
start = time.time()
looptime = 0
files_read = 0
success = True
config = Config()
directory = config.agent_cache_directory(PATTOO_API_AGENT_NAME)
log_message = 'Processing ingest cache.'
log.log2info(20085, log_message)
files_found = len(
[_ for _ in os.listdir(directory) if _.endswith('.json')])
if bool(script) is True:
success = _lock()
if bool(success) is False:
return bool(success)
while True:
# situation where we always have files available that are newer than
# the desired fileage.
loopstart = time.time()
fileage = fileage + looptime
# Automatically stop if we are going on too long.(1 of 2)
duration = loopstart - start
if duration > max_duration:
log_message = ('''\
Stopping ingester after exceeding the maximum runtime duration of {}s. \
This can be adjusted on the CLI.'''.format(max_duration))
log.log2info(20022, log_message)
break
# Automatically stop if we are going on too long.(2 of 2)
if files_read >= files_found:
# No need to log. This is an expected outcome.
break
# Read data from cache. Stop if there is no data found.
cache = Cache(batch_size=batch_size, age=fileage)
count = cache.ingest()
# Automatically stop if we are going on too long.(2 of 2)
if bool(cache.files) is False:
# No need to log. This is an expected outcome.
break
# Get the records processed, looptime and files read
records += count
files_read += cache.files
looptime = max(time.time() - loopstart, looptime)
# Print result
duration = time.time() - start
if bool(records) is True and bool(duration) is True:
log_message = ('''\
Agent cache ingest completed. {0} records processed in {1:.2f} seconds, \
{2:.2f} records / second. {3} files read. \
'''.format(records, duration, records / duration, files_read))
log.log2info(20084, log_message)
else:
log_message = 'No files found to ingest'
log.log2info(20021, log_message)
# Delete lockfile only if running as a script.
# The daemon has its own locking mechanism
if bool(script) is True:
success = _lock(delete=True)
# Log what we are doing
log_message = 'Finished processing ingest cache.'
log.log2info(20020, log_message)
return bool(success)
def _lock(delete=False):
# Initialize key variables
config = Config()
lockfile = files.lock_file(PATTOO_INGESTER_NAME, config)
success = False
# Lock
if bool(delete) is False:
if os.path.exists(lockfile) is True:
log_message = ('''\
Lockfile {} exists. Will not start ingester script. Is another Ingester \
instance running? If not, delete the lockfile and rerun this script.\
'''.format(lockfile))
log.log2warning(20023, log_message)
else:
open(lockfile, 'a').close()
success = True
else:
if os.path.exists(lockfile) is True:
try:
os.remove(lockfile)
success = True
except:
log_message = ('Error deleting lockfile {}.'.format(lockfile))
log.log2warning(20107, log_message)
else:
log_message = ('Lockfile {} not found.'.format(lockfile))
log.log2warning(20108, log_message)
return success
| true | true |
f7218963b535569939ecb7f8ec24da1fd34de53b | 8,127 | py | Python | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 3 | 2019-08-28T17:10:29.000Z | 2020-11-22T14:06:45.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 1 | 2019-11-03T12:02:43.000Z | 2019-11-20T02:04:36.000Z | Pytorch/class_wrapper.py | BensonRen/idlm_Ben | 0d83780232d6341575daf88792959542aef82132 | [
"MIT"
] | 2 | 2019-08-29T02:32:56.000Z | 2019-12-22T17:44:26.000Z | """
The class wrapper for the networks
"""
# Built-in
import os
import time
# Torch
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
# Libs
import numpy as np
# Own module
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn # The model maker function
self.flags = flags # The Flags containing the specs
if inference_mode: # If inference mode, use saved model
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
else: # training mode, create a new ckpt folder
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
self.model = self.create_model() # The model itself
self.loss = self.make_loss() # The loss function
self.optm = self.make_optimizer() # The optimizer
self.train_loader = train_loader # The train data loader
self.test_loader = test_loader # The test data loader
self.log = SummaryWriter(self.ckpt_dir) # Create a summary writer for keeping the summary to the tensor board
self.best_validation_loss = float('inf') # Set the BVL to large number
def create_model(self):
"""
Function to create the network module from provided model fn and flags
:return: the created nn module
"""
model = self.model_fn(self.flags)
#summary(model, input_size=(128, 8))
print(model)
return model
def make_loss(self, logit=None, labels=None):
"""
Create a tensor that represents the loss. This is consistant both at training time \
and inference time for Backward model
:param logit: The output of the network
:return: the total loss
"""
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels) # The MSE Loss of the
BDY_loss = 0 # Implemenation later
return MSE_loss + BDY_loss
def make_optimizer(self):
"""
Make the corresponding optimizer from the flags. Only below optimizers are allowed. Welcome to add more
:return:
"""
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def save(self):
"""
Saving the model to the current check point folder with name best_model.pt
:return: None
"""
#torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt'))
def load(self):
"""
Loading the model from the check point folder with name best_model.pt
:return:
"""
#self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt')))
def train(self):
"""
The major training function. This would start the training using information given in the flags
:return: None
"""
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
# Set to Training Mode
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda() # Put data onto GPU
spectra = spectra.cuda() # Put data onto GPU
self.optm.zero_grad() # Zero the gradient first
logit = self.model(geometry) # Get the output
loss = self.make_loss(logit, spectra) # Get the loss tensor
loss.backward() # Calculate the backward gradients
self.optm.step() # Move one step the optimizer
train_loss += loss # Aggregate the loss
if epoch % self.flags.eval_step: # For eval steps, do the evaluations and tensor board
# Record the training loss to the tensorboard
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
# Set to Evaluation Mode
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader): # Loop through the eval set
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra) # compute the loss
test_loss += loss # Aggregate the loss
# Record the testing loss to the tensorboard
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
# Model improving, save the model down
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
def evaluate(self, save_dir='data/'):
self.load()
self.model.eval() # Evaluation mode
# Get the file names
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(self.saved_model))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(self.saved_model))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(self.saved_model))
#Xpred_file = os.path.join(save_dir, 'test_Xpred_{}.csv'.format(self.saved_model)) # For pure forward model, there is no Xpred
# Open those files to append
with open(Xtruth_file,'a') as fxt,open(Ytruth_file, 'a') as fyt, open(Ypred_file,'a') as fyp:
# Loop through the eval data and evaluate
for ind, (geometry, spectra) in enumerate(self.test_loader):
logits = self.model(geometry)
np.savetxt(fxt, geometry.numpy(), fmt='%.3f')
np.savetxt(fyt, spectra.numpy(), fmt='%.3f')
np.savetxt(fyp, logits.numpy(), fmt='%.3f')
| 47.526316 | 135 | 0.556909 |
import os
import time
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchsummary import summary
import numpy as np
class Network(object):
def __init__(self, model_fn, flags, train_loader, test_loader,
ckpt_dir=os.path.join(os.path.abspath(''), 'models'),
inference_mode=False, saved_model=None):
self.model_fn = model_fn
self.flags = flags
if inference_mode:
self.ckpt_dir = os.path.join(ckpt_dir, saved_model)
self.saved_model = saved_model
else:
self.ckpt_dir = os.path.join(ckpt_dir, time.strftime('%Y%m%d_%H%M%S', time.localtime()))
self.model = self.create_model()
self.loss = self.make_loss()
self.optm = self.make_optimizer()
self.train_loader = train_loader
self.test_loader = test_loader
self.log = SummaryWriter(self.ckpt_dir)
self.best_validation_loss = float('inf')
def create_model(self):
model = self.model_fn(self.flags)
print(model)
return model
def make_loss(self, logit=None, labels=None):
if logit is None:
return None
MSE_loss = nn.functional.mse_loss(logit, labels)
BDY_loss = 0
return MSE_loss + BDY_loss
def make_optimizer(self):
if self.flags.optim == 'Adam':
op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'RMSprop':
op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
elif self.flags.optim == 'SGD':
op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)
else:
raise Exception("Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben")
return op
def save(self):
torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model.pt'))
def load(self):
self.model.load(torch.load(os.path.join(self.ckpt_dir, 'best_model.pt')))
def train(self):
cuda = True if torch.cuda.is_available() else False
if cuda:
self.model.cuda()
for epoch in range(self.flags.train_step):
train_loss = 0
self.model.train()
for j, (geometry, spectra) in enumerate(self.train_loader):
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
self.optm.zero_grad()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra)
loss.backward()
self.optm.step()
train_loss += loss
if epoch % self.flags.eval_step:
train_avg_loss = train_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/train', train_avg_loss, epoch)
self.model.eval()
print("Doing Evaluation on the model now")
test_loss = 0
for j, (geometry, spectra) in enumerate(self.test_loader):
if cuda:
geometry = geometry.cuda()
spectra = spectra.cuda()
logit = self.model(geometry)
loss = self.make_loss(logit, spectra)
test_loss += loss
test_avg_loss = test_loss.data.numpy() / (j+1)
self.log.add_scalar('Loss/test', test_avg_loss, epoch)
print("This is Epoch %d, training loss %.5f, validation loss %.5f" \
% (epoch, train_avg_loss, test_avg_loss ))
if test_avg_loss < self.best_validation_loss:
self.best_validation_loss = test_avg_loss
self.save()
print("Saving the model down...")
if self.best_validation_loss < self.flags.stop_threshold:
print("Training finished EARLIER at epoch %d, reaching loss of %.5f" %\
(epoch, self.best_validation_loss))
return None
def evaluate(self, save_dir='data/'):
self.load()
self.model.eval()
Ypred_file = os.path.join(save_dir, 'test_Ypred_{}.csv'.format(self.saved_model))
Xtruth_file = os.path.join(save_dir, 'test_Xtruth_{}.csv'.format(self.saved_model))
Ytruth_file = os.path.join(save_dir, 'test_Ytruth_{}.csv'.format(self.saved_model))
') as fxt,open(Ytruth_file, 'a') as fyt, open(Ypred_file,'a') as fyp:
for ind, (geometry, spectra) in enumerate(self.test_loader):
logits = self.model(geometry)
np.savetxt(fxt, geometry.numpy(), fmt='%.3f')
np.savetxt(fyt, spectra.numpy(), fmt='%.3f')
np.savetxt(fyp, logits.numpy(), fmt='%.3f')
| true | true |
f7218967c57c18721cbadcda05e9f80ac9a6d65e | 709 | py | Python | forms.py | T3chn3/HFP | ac555de68db689c63e25119ac2ca03612f4c3340 | [
"MIT"
] | null | null | null | forms.py | T3chn3/HFP | ac555de68db689c63e25119ac2ca03612f4c3340 | [
"MIT"
] | null | null | null | forms.py | T3chn3/HFP | ac555de68db689c63e25119ac2ca03612f4c3340 | [
"MIT"
] | null | null | null | #Forms
#On the web server
import cgi #used to invoke the request
<form action="cgi-bin/process-time.py" method="POST"> Enter a timing value: #action to take and method to envoke for the response, text
<input type="Text" name="TimeValue" size=40> #TimeValue will hold the users input
<br />
<input type="Submit" value="Send"> </form> #code for the button
form = cgi.FieldStorage() #get the data from the form above
timing_value = form["TimeValue"].value #access the value associated with "TimeValue" from the form.
#this code is used to extend the yate.py template
#altered the cgi script to receive the users input via POST
#skipped the rest of this section! Just pulling out the sqlite implementation
| 37.315789 | 135 | 0.754584 |
import cgi
<form action="cgi-bin/process-time.py" method="POST"> Enter a timing value:
<input type="Text" name="TimeValue" size=40>
<br />
<input type="Submit" value="Send"> </form>
form = cgi.FieldStorage()
timing_value = form["TimeValue"].value
| false | true |
f72189c34849c418bee945e1e54df7340ce233c9 | 435 | py | Python | virtual/lib/python3.8/site-packages/wtforms/fields/__init__.py | Esther-Anyona/mylearner | d49d1c4c8dbeb93cc384f2037c48236be5dc89e1 | [
"MIT"
] | 3 | 2022-01-04T18:26:21.000Z | 2022-02-02T00:10:50.000Z | venv/lib/python3.10/site-packages/wtforms/fields/__init__.py | superiorkid/rbac | 40f45849687075bc46a52985af22eab6cf83cbda | [
"MIT"
] | 1 | 2021-12-30T10:36:57.000Z | 2021-12-30T10:36:57.000Z | venv/lib/python3.10/site-packages/wtforms/fields/__init__.py | superiorkid/rbac | 40f45849687075bc46a52985af22eab6cf83cbda | [
"MIT"
] | 2 | 2022-02-12T15:33:59.000Z | 2022-02-14T15:36:31.000Z | from wtforms.fields.choices import *
from wtforms.fields.choices import SelectFieldBase
from wtforms.fields.core import Field
from wtforms.fields.core import Flags
from wtforms.fields.core import Label
from wtforms.fields.datetime import *
from wtforms.fields.form import *
from wtforms.fields.list import *
from wtforms.fields.numeric import *
from wtforms.fields.simple import *
from wtforms.utils import unset_value as _unset_value
| 36.25 | 53 | 0.832184 | from wtforms.fields.choices import *
from wtforms.fields.choices import SelectFieldBase
from wtforms.fields.core import Field
from wtforms.fields.core import Flags
from wtforms.fields.core import Label
from wtforms.fields.datetime import *
from wtforms.fields.form import *
from wtforms.fields.list import *
from wtforms.fields.numeric import *
from wtforms.fields.simple import *
from wtforms.utils import unset_value as _unset_value
| true | true |
f7218c5841c78da8df7b09b9049a325f9cfeaba6 | 8,968 | py | Python | custom_admin/views.py | samuira/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2019-11-09T17:18:10.000Z | 2019-11-09T17:18:10.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 19 | 2019-12-05T00:13:31.000Z | 2022-03-11T23:58:13.000Z | custom_admin/views.py | abhisek11/TutionMastor | 5b6d89efc90a9ebb54766530554d7dc9d5ee8298 | [
"MIT"
] | 1 | 2020-02-29T07:35:25.000Z | 2020-02-29T07:35:25.000Z | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context)
| 33.092251 | 126 | 0.748104 | from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.core.exceptions import ValidationError
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse, HttpResponseRedirect
from django.urls import reverse_lazy, reverse
from django.utils.text import slugify
from django.views import View
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView
from blog.models import BlogPost
from custom_admin.models import User
from custom_admin.utils import Util
from .forms import LoginForm, RegisterForm, BlogPostCreateForm, BlogPostEditForm, UserEditForm
from django.shortcuts import redirect
from datetime import datetime
class Dashboard(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/dashboard.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
return render(request, self.template_name)
class Login(View):
template_name = 'custom_admin/account/login.html'
form_class = LoginForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST)
self.context['form'] = form
if form.is_valid():
user = authenticate(request=request, email=request.POST['email'], password=request.POST['password'])
if user:
login(request, user)
return redirect('dashboard')
else:
messages.error(request, 'Incorrect Email or Password')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Register(View):
template_name = 'custom_admin/account/register.html'
form_class = RegisterForm
context = dict()
def get(self, request, *args, **kwargs):
self.context.clear()
return render(request, self.template_name)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request=request)
self.context['form'] = form
if form.is_valid():
try:
user = User.objects.create_user(email=request.POST['email'], password=request.POST['password'])
except ValidationError as e:
[messages.error(request, error[0]) for error in e.message_dict.values()]
else:
return redirect('login')
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class Logout(LoginRequiredMixin, UserPassesTestMixin, View):
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
logout(request)
return HttpResponseRedirect(reverse('login'))
class BlogList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
queryset = BlogPost.objects.all()
paginate_by = 10
context_object_name = 'blog_post'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class BlogCreate(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/create.html'
login_url = reverse_lazy('login')
form_class = BlogPostCreateForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request):
self.context.clear()
self.context['ckeditor'] = True
print(self.context)
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context.clear()
form = self.form_class(request.POST, request.FILES)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
BlogPost.objects.create(
created_by=request.user,
title_image=form.cleaned_data.get('title_image', ''),
title=form.cleaned_data.get('title'),
description=form.cleaned_data.get('bp_description'),
slug=slugify(form.cleaned_data.get('title'))
)
messages.success(self.request, 'Blog has been created successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/edit.html'
login_url = reverse_lazy('login')
form_class = BlogPostEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['ckeditor'] = True
self.context['blog'] = BlogPost.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST, request.FILES, pk=self.context['blog'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
blog = self.context['blog']
blog.title_image = form.cleaned_data.get('title_image', '') or blog.title_image
blog.title = form.cleaned_data.get('title')
blog.is_verified = form.cleaned_data.get('is_verified')
blog.published_on = datetime.now() if form.cleaned_data.get('is_verified') and not blog.published_on else blog.published_on
blog.description = form.cleaned_data.get('bp_description')
blog.slug = slugify(form.cleaned_data.get('title'))
blog.save()
messages.success(self.request, 'Blog has been updated successfully.')
return HttpResponseRedirect(reverse('blog-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
return render(request, self.template_name, self.context)
class BlogDelete(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/blog/list.html'
login_url = reverse_lazy('login')
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
BlogPost.objects.get(pk=kwargs['pk']).delete()
messages.success(self.request, 'Blog has been deleted successfully.')
return HttpResponseRedirect(reverse('blog-list'))
class UserList(LoginRequiredMixin, UserPassesTestMixin, ListView):
template_name = 'custom_admin/user/list.html'
login_url = reverse_lazy('login')
queryset = User.objects.all()
paginate_by = 10
context_object_name = 'user_list'
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
class UserEdit(LoginRequiredMixin, UserPassesTestMixin, View):
template_name = 'custom_admin/user/edit.html'
login_url = reverse_lazy('login')
form_class = UserEditForm
context = dict()
def test_func(self):
return self.request.user.is_superuser
def handle_no_permission(self):
messages.error(self.request, 'Permission denied!!!')
return redirect('login')
def get(self, request, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
print(self.context, kwargs['pk'])
return render(request, self.template_name, self.context)
def post(self, request, *args, **kwargs):
self.context['user'] = User.objects.get(pk=kwargs['pk'])
form = self.form_class(request.POST, request.FILES, pk=self.context['user'].id)
self.context['form'] = form
if form.is_valid():
print(form.cleaned_data)
user = self.context['user']
user.avatar = form.cleaned_data.get('avatar') or user.avatar
user.first_name = form.cleaned_data.get('first_name', '')
user.last_name = form.cleaned_data.get('last_name', '')
user.phone = form.cleaned_data.get('phone', '')
user.is_superuser = form.cleaned_data.get('is_superuser', False)
user.is_staff = form.cleaned_data.get('is_staff', False)
user.is_active = form.cleaned_data.get('is_active', False)
user.save()
messages.success(self.request, 'User has been updated successfully.')
return HttpResponseRedirect(reverse('user-list'))
else:
error = Util.form_validation_error(request, form)
self.context['error'] = error
print('Error:', error)
return render(request, self.template_name, self.context)
| true | true |
f7218c9e437eabf2dfc69680b59fad493a030b44 | 1,925 | py | Python | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | src/braille/braille_translator.py | stuart-stanley/dotspicejar | bcf0c4656764011744581c5ea052b47ee70a34f1 | [
"MIT"
] | null | null | null | from .braille_cell import BrailleCell
from .braille_string import BrailleString
class BrailleTranslator(object):
_simple_cells = None
def __init__(self, text):
self.__raw_text = text
if BrailleTranslator._simple_cells is None:
self.__setup_class_simple_cells()
@property
def as_grade_1(self):
cell_list = []
for c in self.__raw_text:
cell = self._simple_cells[c]
cell_list.append(cell)
bs = BrailleString(self.__raw_text, cell_list)
return bs
def __setup_class_simple_cells(self):
cd = {}
cd['a'] = BrailleCell('a', '*..', '...')
cd['b'] = BrailleCell('b', '**.', '...')
cd['c'] = BrailleCell('c', '*..', '*..')
cd['d'] = BrailleCell('d', '*..', '**.')
cd['e'] = BrailleCell('e', '*..', '.*.')
cd['f'] = BrailleCell('f', '**.', '*..')
cd['g'] = BrailleCell('g', '**.', '**.')
cd['h'] = BrailleCell('h', '**.', '.*.')
cd['i'] = BrailleCell('i', '.*.', '*..')
cd['j'] = BrailleCell('j', '.*.', '**.')
cd['k'] = BrailleCell('k', '*.*', '...')
cd['l'] = BrailleCell('l', '***', '...')
cd['m'] = BrailleCell('m', '*.*', '*..')
cd['n'] = BrailleCell('n', '*.*', '**.')
cd['o'] = BrailleCell('o', '*.*', '.*.')
cd['p'] = BrailleCell('p', '***', '*..')
cd['q'] = BrailleCell('q', '***', '**.')
cd['r'] = BrailleCell('r', '***', '.*.')
cd['s'] = BrailleCell('s', '.**', '*..')
cd['t'] = BrailleCell('t', '.**', '**.')
cd['u'] = BrailleCell('u', '*.*', '..*')
cd['v'] = BrailleCell('v', '***', '..*')
cd['w'] = BrailleCell('w', '.*.', '***')
cd['x'] = BrailleCell('x', '*.*', '*.*')
cd['y'] = BrailleCell('y', '*.*', '***')
cd['z'] = BrailleCell('z', '*.*', '.**')
BrailleTranslator._simple_cells = cd
| 37.745098 | 54 | 0.424935 | from .braille_cell import BrailleCell
from .braille_string import BrailleString
class BrailleTranslator(object):
_simple_cells = None
def __init__(self, text):
self.__raw_text = text
if BrailleTranslator._simple_cells is None:
self.__setup_class_simple_cells()
@property
def as_grade_1(self):
cell_list = []
for c in self.__raw_text:
cell = self._simple_cells[c]
cell_list.append(cell)
bs = BrailleString(self.__raw_text, cell_list)
return bs
def __setup_class_simple_cells(self):
cd = {}
cd['a'] = BrailleCell('a', '*..', '...')
cd['b'] = BrailleCell('b', '**.', '...')
cd['c'] = BrailleCell('c', '*..', '*..')
cd['d'] = BrailleCell('d', '*..', '**.')
cd['e'] = BrailleCell('e', '*..', '.*.')
cd['f'] = BrailleCell('f', '**.', '*..')
cd['g'] = BrailleCell('g', '**.', '**.')
cd['h'] = BrailleCell('h', '**.', '.*.')
cd['i'] = BrailleCell('i', '.*.', '*..')
cd['j'] = BrailleCell('j', '.*.', '**.')
cd['k'] = BrailleCell('k', '*.*', '...')
cd['l'] = BrailleCell('l', '***', '...')
cd['m'] = BrailleCell('m', '*.*', '*..')
cd['n'] = BrailleCell('n', '*.*', '**.')
cd['o'] = BrailleCell('o', '*.*', '.*.')
cd['p'] = BrailleCell('p', '***', '*..')
cd['q'] = BrailleCell('q', '***', '**.')
cd['r'] = BrailleCell('r', '***', '.*.')
cd['s'] = BrailleCell('s', '.**', '*..')
cd['t'] = BrailleCell('t', '.**', '**.')
cd['u'] = BrailleCell('u', '*.*', '..*')
cd['v'] = BrailleCell('v', '***', '..*')
cd['w'] = BrailleCell('w', '.*.', '***')
cd['x'] = BrailleCell('x', '*.*', '*.*')
cd['y'] = BrailleCell('y', '*.*', '***')
cd['z'] = BrailleCell('z', '*.*', '.**')
BrailleTranslator._simple_cells = cd
| true | true |
f7218cb7844b332e36d07bd50f2d78b34959e42b | 56,514 | py | Python | src/doc/common/builder.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | null | null | null | src/doc/common/builder.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | null | null | null | src/doc/common/builder.py | bopopescu/sage-5 | 9d85b34956ca2edd55af307f99c5d3859acd30bf | [
"BSL-1.0"
] | null | null | null | #!/usr/bin/env python
"""
The documentation builder
It is the starting point for building documentation, and is
responsible to figure out what to build and with which options. The
actual documentation build for each individual document is then done
in a subprocess call to sphinx, see :func:`builder_helper`.
* The builder can be configured in build_options.py
* The sphinx subprocesses are configured in conf.py
"""
import logging, optparse, os, shutil, subprocess, sys, re
import sphinx.cmdline
import sphinx.util.console
import sphinx.ext.intersphinx
#We remove the current directory from sys.path right away
#so that we import sage from the proper spot
try:
sys.path.remove(os.path.realpath(os.getcwd()))
except ValueError:
pass
from sage.misc.cachefunc import cached_method
from sage.misc.misc import sage_makedirs as mkdir
from sage.env import SAGE_DOC, SAGE_SRC
# Load the options, including
# SAGE_DOC, LANGUAGES, SPHINXOPTS, PAPER, OMIT,
# PAPEROPTS, ALLSPHINXOPTS, NUM_THREADS, WEBSITESPHINXOPTS
# from build_options.py.
execfile(os.path.join(SAGE_DOC, 'common' , 'build_options.py'))
##########################################
# Parallel Building Ref Manual #
##########################################
def build_ref_doc(args):
doc = args[0]
lang = args[1]
format = args[2]
kwds = args[3]
args = args[4:]
if format == 'inventory': # you must not use the inventory to build the inventory
kwds['use_multidoc_inventory'] = False
getattr(ReferenceSubBuilder(doc, lang), format)(*args, **kwds)
##########################################
# Builders #
##########################################
def builder_helper(type):
"""
Returns a function which builds the documentation for
output type type.
"""
def f(self, *args, **kwds):
output_dir = self._output_dir(type)
options = ALLSPHINXOPTS
if self.name == 'website':
# WEBSITESPHINXOPTS is either empty or " -A hide_pdf_links=1 "
options += WEBSITESPHINXOPTS
if kwds.get('use_multidoc_inventory', True):
options += ' -D multidoc_first_pass=0'
else:
options += ' -D multidoc_first_pass=1'
build_command = '-b %s -d %s %s %s %s'%(type, self._doctrees_dir(),
options, self.dir,
output_dir)
logger.debug(build_command)
# Execute custom-sphinx-build.py
sys.argv = [os.path.join(SAGE_DOC, 'common', 'custom-sphinx-build.py')]
sys.argv.extend(build_command.split())
try:
execfile(sys.argv[0])
except Exception:
import traceback
logger.error(traceback.format_exc())
# Print message about location of output:
# - by default if html output
# - if verbose and if not pdf output
# - if pdf: print custom message here if verbose, and print
# full message below (see pdf method) after 'make all-pdf'
# is done running
if 'output/html' in output_dir:
logger.warning("Build finished. The built documents can be found in %s",
output_dir)
elif 'output/pdf' not in output_dir:
logger.info("Build finished. The built documents can be found in %s",
output_dir)
else:
logger.info("LaTeX file written to %s; now making PDF.",
output_dir)
f.is_output_format = True
return f
class DocBuilder(object):
def __init__(self, name, lang='en'):
"""
INPUT:
- ``name`` - the name of a subdirectory in SAGE_DOC, such as
'tutorial' or 'bordeaux_2008'
- ``lang`` - (default "en") the language of the document.
"""
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = os.path.join(*doc)
self.lang = lang
self.dir = os.path.join(SAGE_DOC, self.lang, self.name)
#Make sure the .static and .templates directories are there
mkdir(os.path.join(self.dir, "static"))
mkdir(os.path.join(self.dir, "templates"))
def _output_dir(self, type):
"""
Returns the directory where the output of type type is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b._output_dir('html')
'.../doc/output/html/en/tutorial'
"""
d = os.path.join(SAGE_DOC, "output", type, self.lang, self.name)
mkdir(d)
return d
def _doctrees_dir(self):
"""
Returns the directory where the doctrees are stored. If the
directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b._doctrees_dir()
'.../doc/output/doctrees/en/tutorial'
"""
d = os.path.join(SAGE_DOC, "output", 'doctrees', self.lang, self.name)
mkdir(d)
return d
def _output_formats(self):
"""
Returns a list of the possible output formats.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b._output_formats()
['changes', 'html', 'htmlhelp', 'inventory', 'json', 'latex', 'linkcheck', 'pickle', 'web']
"""
#Go through all the attributes of self and check to
#see which ones have an 'is_output_format' attribute. These
#are the ones created with builder_helper.
output_formats = []
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
output_formats.append(attr)
output_formats.sort()
return output_formats
def pdf(self):
"""
Builds the PDF files for this document. This is done by first
(re)-building the LaTeX output, going into that LaTeX
directory, and running 'make all-pdf' there.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b.pdf() #not tested
"""
self.latex()
tex_dir = self._output_dir('latex')
pdf_dir = self._output_dir('pdf')
if subprocess.call("cd '%s' && $MAKE all-pdf && mv -f *.pdf '%s'"%(tex_dir, pdf_dir), shell=True):
raise RuntimeError("failed to run $MAKE all-pdf in %s"%tex_dir)
logger.warning("Build finished. The built documents can be found in %s", pdf_dir)
def clean(self, *args):
shutil.rmtree(self._doctrees_dir())
output_formats = list(args) if args else self._output_formats()
for format in output_formats:
shutil.rmtree(self._output_dir(format), ignore_errors=True)
html = builder_helper('html')
pickle = builder_helper('pickle')
web = pickle
json = builder_helper('json')
htmlhelp = builder_helper('htmlhelp')
latex = builder_helper('latex')
changes = builder_helper('changes')
linkcheck = builder_helper('linkcheck')
# import the customized builder for object.inv files
inventory = builder_helper('inventory')
##########################################
# Parallel Building Ref Manual #
##########################################
def build_other_doc(args):
document = args[0]
name = args[1]
kwds = args[2]
args = args[3:]
logger.warning("\nBuilding %s.\n" % document)
getattr(get_builder(document), name)(*args, **kwds)
class AllBuilder(object):
"""
A class used to build all of the documentation.
"""
def __getattr__(self, attr):
"""
For any attributes not explicitly defined, we just go through
all of the documents and call their attr. For example,
'AllBuilder().json()' will go through all of the documents
and call the json() method on their builders.
"""
from functools import partial
return partial(self._wrapper, attr)
def _wrapper(self, name, *args, **kwds):
"""
This is the function which goes through all of the documents
and does the actual building.
"""
import time
start = time.time()
docs = self.get_all_documents()
refs = [x for x in docs if x.endswith('reference')]
others = [x for x in docs if not x.endswith('reference')]
# Build the reference manual twice to resolve references. That is,
# build once with the inventory builder to construct the intersphinx
# inventory files, and then build the second time for real. So the
# first build should be as fast as possible;
logger.warning("\nBuilding reference manual, first pass.\n")
for document in refs:
getattr(get_builder(document), 'inventory')(*args, **kwds)
logger.warning("Building reference manual, second pass.\n")
for document in refs:
getattr(get_builder(document), name)(*args, **kwds)
# build the other documents in parallel
from multiprocessing import Pool
pool = Pool(NUM_THREADS, maxtasksperchild=1)
L = [(doc, name, kwds) + args for doc in others]
# map_async handles KeyboardInterrupt correctly. Plain map and
# apply_async does not, so don't use it.
pool.map_async(build_other_doc, L, 1).get(99999)
pool.close()
pool.join()
logger.warning("Elapsed time: %.1f seconds."%(time.time()-start))
logger.warning("Done building the documentation!")
def get_all_documents(self):
"""
Returns a list of all of the documents. A document is a directory within one of
the language subdirectories of SAGE_DOC specified by the global LANGUAGES
variable.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: documents = builder.AllBuilder().get_all_documents()
sage: 'en/tutorial' in documents
True
sage: documents[0] == 'en/reference'
True
"""
documents = []
for lang in LANGUAGES:
for document in os.listdir(os.path.join(SAGE_DOC, lang)):
if (document not in OMIT
and os.path.isdir(os.path.join(SAGE_DOC, lang, document))):
documents.append(os.path.join(lang, document))
# Ensure that the reference guide is compiled first so that links from
# the other document to it are correctly resolved.
if 'en/reference' in documents:
documents.remove('en/reference')
documents.insert(0, 'en/reference')
return documents
class WebsiteBuilder(DocBuilder):
def html(self):
"""
After we've finished building the website index page, we copy
everything one directory up. Then we call
:meth:`create_html_redirects`.
"""
DocBuilder.html(self)
html_output_dir = self._output_dir('html')
for f in os.listdir(html_output_dir):
src = os.path.join(html_output_dir, f)
dst = os.path.join(html_output_dir, '..', f)
if os.path.isdir(src):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
self.create_html_redirects()
def create_html_redirects(self):
"""
Writes a number of small HTML files; these are files which
used to contain the main content of the reference manual
before before splitting the manual into multiple
documents. After the split, those files have moved, so in each
old location, write a file which redirects to the new version.
(This is so old URLs to pieces of the reference manual still
open the correct files.)
"""
# The simple html template which will cause a redirect to the
# correct file
html_template = """<html><head>
<meta HTTP-EQUIV="REFRESH" content="0; url=%s">
</head><body></body></html>"""
reference_dir = os.path.abspath(os.path.join(self._output_dir('html'),
'..', 'reference'))
reference_builder = ReferenceBuilder('reference')
refdir = os.path.join(os.environ['SAGE_DOC'], 'en', 'reference')
for document in reference_builder.get_all_documents(refdir):
#path is the directory above reference dir
path = os.path.abspath(os.path.join(reference_dir, '..'))
# the name of the subdocument
document_name = document.split('/')[1]
# the sage directory within a subdocument, for example
# /path/to/.../output/html/en/reference/algebras/sage
sage_directory = os.path.join(path, document, 'sage')
# Walk through all of the files in the sage_directory
for dirpath, dirnames, filenames in os.walk(sage_directory):
# a string like reference/algebras/sage/algebras
short_path = dirpath[len(path)+1:]
# a string like sage/algebras
shorter_path = os.path.join(*short_path.split(os.sep)[2:])
#Make the shorter path directory
try:
os.makedirs(os.path.join(reference_dir, shorter_path))
except OSError:
pass
for filename in filenames:
if not filename.endswith('html'):
continue
# the name of the html file we are going to create
redirect_filename = os.path.join(reference_dir, shorter_path, filename)
# the number of levels up we need to use in the relative url
levels_up = len(shorter_path.split(os.sep))
# the relative url that we will redirect to
redirect_url = "/".join(['..']*levels_up + [document_name, shorter_path, filename])
# write the html file which performs the redirect
with open(redirect_filename, 'w') as f:
f.write(html_template % redirect_url)
def clean(self):
"""
When we clean the output for the website index, we need to
remove all of the HTML that were placed in the parent
directory.
"""
html_output_dir = self._output_dir('html')
parent_dir = os.path.realpath(os.path.join(html_output_dir, '..'))
for filename in os.listdir(html_output_dir):
parent_filename = os.path.join(parent_dir, filename)
if not os.path.exists(parent_filename):
continue
if os.path.isdir(parent_filename):
shutil.rmtree(parent_filename, ignore_errors=True)
else:
os.unlink(parent_filename)
DocBuilder.clean(self)
class ReferenceBuilder(AllBuilder):
"""
This class builds the reference manual. It uses DocBuilder to
build the top-level page and ReferenceSubBuilder for each
sub-component.
"""
def __init__(self, name, lang='en'):
"""
Records the reference manual's name, in case it's not
identical to 'reference'.
"""
AllBuilder.__init__(self)
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = doc[0]
self.lang = lang
def _output_dir(self, type, lang='en'):
"""
Returns the directory where the output of type type is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.ReferenceBuilder('reference')
sage: b._output_dir('html')
'.../doc/output/html/en/reference'
"""
d = os.path.join(SAGE_DOC, "output", type, lang, self.name)
mkdir(d)
return d
def _wrapper(self, format, *args, **kwds):
"""
Builds reference manuals. For each language, it builds the
top-level document and its components.
"""
for lang in LANGUAGES:
refdir = os.path.join(SAGE_DOC, lang, self.name)
if not os.path.exists(refdir):
continue
output_dir = self._output_dir(format, lang)
from multiprocessing import Pool
pool = Pool(NUM_THREADS, maxtasksperchild=1)
L = [(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)]
# (See comment in AllBuilder._wrapper about using map instead of apply.)
pool.map_async(build_ref_doc, L, 1).get(99999)
pool.close()
pool.join()
# The html refman must be build at the end to ensure correct
# merging of indexes and inventories.
# Sphinx is run here in the current process (not in a
# subprocess) and the IntersphinxCache gets populated to be
# used for the second pass of the reference manual and for
# the other documents.
getattr(DocBuilder(self.name, lang), format)(*args, **kwds)
# PDF: we need to build master index file which lists all
# of the PDF file. So we create an html file, based on
# the file index.html from the "website" target.
if format == 'pdf':
# First build the website page. (This only takes a
# few seconds.)
getattr(get_builder('website'), 'html')()
# Copy the relevant pieces of
# output/html/en/website/_static to output_dir.
# (Don't copy all of _static to save some space: we
# don't need all of the MathJax stuff, and in
# particular we don't need the fonts.)
website_dir = os.path.join(SAGE_DOC, 'output', 'html',
'en', 'website')
static_files = ['COPYING.txt', 'basic.css', 'blank.gif',
'default.css', 'doctools.js', 'favicon.ico',
'file.png', 'jquery.js', 'minus.png',
'pdf.png', 'plus.png', 'pygments.css',
'sage.css', 'sageicon.png', 'sagelogo.png',
'searchtools.js', 'sidebar.js', 'underscore.js']
mkdir(os.path.join(output_dir, '_static'))
for f in static_files:
try:
shutil.copyfile(os.path.join(website_dir, '_static', f),
os.path.join(output_dir, '_static', f))
except IOError: # original file does not exist
pass
# Now modify website's index.html page and write it
# to output_dir.
f = open(os.path.join(website_dir, 'index.html'))
html = f.read().replace('Documentation', 'Reference')
f.close()
html_output_dir = os.path.dirname(website_dir)
html = html.replace('http://www.sagemath.org',
os.path.join(html_output_dir, 'index.html'))
# From index.html, we want the preamble and the tail.
html_end_preamble = html.find('<h1>Sage Reference')
html_bottom = html.rfind('</table>') + len('</table>')
# For the content, we modify doc/en/reference/index.rst,
# which has two parts: the body and the table of contents.
f = open(os.path.join(SAGE_DOC, lang, 'reference', 'index.rst'))
rst = f.read()
f.close()
# Replace rst links with html links. There are two forms:
#
# `blah`__ followed by __ LINK
#
# :doc:`blah <module/index>`
#
# Change the first form to
#
# <a href="LINK">blah</a>
#
# Change the second form to
#
# <a href="module/module.pdf">blah <img src="_static/pdf.png" /></a>
#
rst = re.sub('`([^`]*)`__\.\n\n__ (.*)',
r'<a href="\2">\1</a>.', rst)
rst = re.sub(r':doc:`([^<]*?)\s+<(.*)/index>`',
r'<a href="\2/\2.pdf">\1 <img src="_static/pdf.png" /></a>',
rst)
# Get rid of todolist and miscellaneous rst markup.
rst = rst.replace('.. toctree::', '')
rst = rst.replace(':maxdepth: 2', '')
rst = rst.replace('todolist', '')
start = rst.find('=\n') + 1
end = rst.find('Table of Contents')
# Body: add paragraph <p> markup.
rst_body = rst[start:end]
rst_body = rst_body.replace('\n\n', '</p>\n<p>')
start = rst.find('Table of Contents') + 2*len('Table of Contents') + 1
# Don't include the indices.
end = rst.find('Indices and Tables')
# TOC: change * to <li>, change rst headers to html headers.
rst_toc = rst[start:end]
rst_toc = rst_toc.replace('*', '<li>')
rst_toc = re.sub('\n([A-Z][a-zA-Z, ]*)\n-*\n',
'</ul>\n\n\n<h2>\\1</h2>\n\n<ul>\n', rst_toc)
# Now write the file.
new_index = open(os.path.join(output_dir, 'index.html'), 'w')
new_index.write(html[:html_end_preamble])
new_index.write('<h1>' + rst[:rst.find('\n')] +
' (PDF version)'+ '</h1>')
new_index.write(rst_body)
new_index.write('<h2>Table of Contents</h2>\n\n<ul>')
new_index.write(rst_toc)
new_index.write('</ul>\n\n')
new_index.write(html[html_bottom:])
new_index.close()
logger.warning('''
PDF documents have been created in subdirectories of
%s
Alternatively, you can open
%s
for a webpage listing all of the documents.''' % (output_dir,
os.path.join(output_dir,
'index.html')))
def get_all_documents(self, refdir):
"""
Returns a list of all reference manual components to build.
We add a component name if it's a subdirectory of the manual's
directory and contains a file named 'index.rst'.
We return the largest component (most subdirectory entries)
first since they will take the longest to build.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.ReferenceBuilder('reference')
sage: refdir = os.path.join(os.environ['SAGE_DOC'], 'en', b.name)
sage: sorted(b.get_all_documents(refdir))
['reference/algebras', 'reference/arithgroup', ..., 'reference/tensor']
"""
documents = []
for doc in os.listdir(refdir):
directory = os.path.join(refdir, doc)
if os.path.exists(os.path.join(directory, 'index.rst')):
n = len(os.listdir(directory))
documents.append((-n, os.path.join(self.name, doc)))
return [ doc[1] for doc in sorted(documents) ]
class ReferenceSubBuilder(DocBuilder):
"""
This class builds sub-components of the reference manual. It is
resposible for making sure the auto generated ReST files for the
Sage library are up to date.
When building any output, we must first go through and check
to see if we need to update any of the autogenerated ReST
files. There are two cases where this would happen:
1. A new module gets added to one of the toctrees.
2. The actual module gets updated and possibly contains a new
title.
"""
def __init__(self, *args, **kwds):
DocBuilder.__init__(self, *args, **kwds)
self._wrap_builder_helpers()
def _wrap_builder_helpers(self):
from functools import partial, update_wrapper
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
f = partial(self._wrapper, attr)
f.is_output_format = True
update_wrapper(f, getattr(self, attr))
setattr(self, attr, f)
def _wrapper(self, build_type, *args, **kwds):
"""
This is the wrapper around the builder_helper methods that
goes through and makes sure things are up to date.
"""
# Delete the auto-generated .rst files, if the inherited
# and/or underscored members options have changed.
global options
inherit_prev = self.get_cache().get('option_inherited')
underscore_prev = self.get_cache().get('option_underscore')
if (inherit_prev is None or inherit_prev != options.inherited or
underscore_prev is None or underscore_prev != options.underscore):
logger.info("Detected change(s) in inherited and/or underscored members option(s).")
self.clean_auto()
self.get_cache.clear_cache()
# After "sage -clone", refresh the .rst file mtimes in
# environment.pickle.
if options.update_mtimes:
logger.info("Checking for .rst file mtimes to update...")
self.update_mtimes()
#Update the .rst files for modified Python modules
logger.info("Updating .rst files with modified modules...")
for module_name in self.get_modified_modules():
self.write_auto_rest_file(module_name.replace(os.path.sep, '.'))
#Write the .rst files for newly included modules
logger.info("Writing .rst files for newly-included modules...")
for module_name in self.get_newly_included_modules(save=True):
self.write_auto_rest_file(module_name)
#Copy over the custom .rst files from _sage
_sage = os.path.join(self.dir, '_sage')
if os.path.exists(_sage):
logger.info("Copying over custom .rst files from %s ...", _sage)
shutil.copytree(_sage, os.path.join(self.dir, 'sage'))
getattr(DocBuilder, build_type)(self, *args, **kwds)
def cache_filename(self):
"""
Returns the filename where the pickle of the dictionary of
already generated ReST files is stored.
"""
return os.path.join(self._doctrees_dir(), 'reference.pickle')
@cached_method
def get_cache(self):
"""
Retrieve the cache of already generated ReST files. If it
doesn't exist, then we just return an empty dictionary. If it
is corrupted, return an empty dictionary.
"""
filename = self.cache_filename()
if not os.path.exists(filename):
return {}
import cPickle
file = open(self.cache_filename(), 'rb')
try:
cache = cPickle.load(file)
except StandardError:
logger.debug("Cache file '%s' is corrupted; ignoring it..."% filename)
cache = {}
else:
logger.debug("Loaded .rst file cache: %s", filename)
finally:
file.close()
return cache
def save_cache(self):
"""
Save the cache of already generated ReST files.
"""
cache = self.get_cache()
global options
cache['option_inherited'] = options.inherited
cache['option_underscore'] = options.underscore
import cPickle
file = open(self.cache_filename(), 'wb')
cPickle.dump(cache, file)
file.close()
logger.debug("Saved .rst file cache: %s", self.cache_filename())
def get_sphinx_environment(self):
"""
Returns the Sphinx environment for this project.
"""
from sphinx.environment import BuildEnvironment
class Foo(object):
pass
config = Foo()
config.values = []
env_pickle = os.path.join(self._doctrees_dir(), 'environment.pickle')
try:
env = BuildEnvironment.frompickle(config, env_pickle)
logger.debug("Opened Sphinx environment: %s", env_pickle)
return env
except IOError as err:
logger.debug("Failed to open Sphinx environment: %s", err)
pass
def update_mtimes(self):
"""
Updates the modification times for ReST files in the Sphinx
environment for this project.
"""
env = self.get_sphinx_environment()
if env is not None:
import time
for doc in env.all_docs:
env.all_docs[doc] = time.time()
logger.info("Updated %d .rst file mtimes", len(env.all_docs))
# This is the only place we need to save (as opposed to
# load) Sphinx's pickle, so we do it right here.
env_pickle = os.path.join(self._doctrees_dir(),
'environment.pickle')
# When cloning a new branch (see
# SAGE_LOCAL/bin/sage-clone), we hard link the doc output.
# To avoid making unlinked, potentially inconsistent
# copies of the environment, we *don't* use
# env.topickle(env_pickle), which first writes a temporary
# file. We adapt sphinx.environment's
# BuildEnvironment.topickle:
import cPickle, types
# remove unpicklable attributes
env.set_warnfunc(None)
del env.config.values
picklefile = open(env_pickle, 'wb')
# remove potentially pickling-problematic values from config
for key, val in vars(env.config).items():
if key.startswith('_') or isinstance(val, (types.ModuleType,
types.FunctionType,
types.ClassType)):
del env.config[key]
try:
cPickle.dump(env, picklefile, cPickle.HIGHEST_PROTOCOL)
finally:
picklefile.close()
logger.debug("Saved Sphinx environment: %s", env_pickle)
def get_modified_modules(self):
"""
Returns an iterator for all the modules that have been modified
since the documentation was last built.
"""
env = self.get_sphinx_environment()
if env is None:
logger.debug("Stopped check for modified modules.")
return
try:
added, changed, removed = env.get_outdated_files(False)
logger.info("Sphinx found %d modified modules", len(changed))
except OSError as err:
logger.debug("Sphinx failed to determine modified modules: %s", err)
self.clean_auto()
return
for name in changed:
# Only pay attention to files in a directory sage/... In
# particular, don't treat a file like 'sagetex.rst' in
# doc/en/reference/misc as an autogenerated file: see
# #14199.
if name.startswith('sage' + os.sep):
yield name
def print_modified_modules(self):
"""
Prints a list of all the modules that have been modified since
the documentation was last built.
"""
for module_name in self.get_modified_modules():
print module_name
def get_all_rst_files(self, exclude_sage=True):
"""
Returns an iterator for all rst files which are not
autogenerated.
"""
for directory, subdirs, files in os.walk(self.dir):
if exclude_sage and directory.startswith(os.path.join(self.dir, 'sage')):
continue
for filename in files:
if not filename.endswith('.rst'):
continue
yield os.path.join(directory, filename)
def get_all_included_modules(self):
"""
Returns an iterator for all modules which are included in the
reference manual.
"""
for filename in self.get_all_rst_files():
for module in self.get_modules(filename):
yield module
def get_newly_included_modules(self, save=False):
"""
Returns an iterator for all modules that appear in the
toctrees that don't appear in the cache.
"""
cache = self.get_cache()
new_modules = 0
for module in self.get_all_included_modules():
if module not in cache:
cache[module] = True
new_modules += 1
yield module
logger.info("Found %d newly included modules", new_modules)
if save:
self.save_cache()
def print_newly_included_modules(self):
"""
Prints all of the modules that appear in the toctrees that
don't appear in the cache.
"""
for module_name in self.get_newly_included_modules():
print module_name
def get_modules(self, filename):
"""
Given a filename for a ReST file, return an iterator for
all of the autogenerated ReST files that it includes.
"""
#Create the regular expression used to detect an autogenerated file
auto_re = re.compile('^\s*(..\/)*(sage(nb)?\/[\w\/]*)\s*$')
#Read the lines
f = open(filename)
lines = f.readlines()
f.close()
for line in lines:
match = auto_re.match(line)
if match:
yield match.group(2).replace(os.path.sep, '.')
def get_module_docstring_title(self, module_name):
"""
Returns the title of the module from its docstring.
"""
#Try to import the module
try:
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
return "UNABLE TO IMPORT MODULE"
module = sys.modules[module_name]
#Get the docstring
doc = module.__doc__
if doc is None:
doc = module.doc if hasattr(module, 'doc') else ""
#Extract the title
i = doc.find('\n')
if i != -1:
return doc[i+1:].lstrip().splitlines()[0]
else:
return doc
def auto_rest_filename(self, module_name):
"""
Returns the name of the file associated to a given module
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: import builder
sage: builder.ReferenceSubBuilder("reference").auto_rest_filename("sage.combinat.partition")
'.../doc/en/reference/sage/combinat/partition.rst'
"""
return self.dir + os.path.sep + module_name.replace('.',os.path.sep) + '.rst'
def write_auto_rest_file(self, module_name):
"""
Writes the autogenerated ReST file for module_name.
"""
if not module_name.startswith('sage'):
return
filename = self.auto_rest_filename(module_name)
mkdir(os.path.dirname(filename))
outfile = open(filename, 'w')
title = self.get_module_docstring_title(module_name)
if title == '':
logger.error("Warning: Missing title for %s", module_name)
title = "MISSING TITLE"
# Don't doctest the autogenerated file.
outfile.write(".. nodoctest\n\n")
# Now write the actual content.
outfile.write(".. _%s:\n\n"%module_name)
outfile.write(title + '\n')
outfile.write('='*len(title) + "\n\n")
outfile.write('.. This file has been autogenerated.\n\n')
global options
inherited = ':inherited-members:' if options.inherited else ''
automodule = '''
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
%s
'''
outfile.write(automodule % (module_name, inherited))
outfile.close()
def clean_auto(self):
"""
Remove the cache file for the autogenerated files as well as
the files themselves.
"""
if os.path.exists(self.cache_filename()):
os.unlink(self.cache_filename())
logger.debug("Deleted .rst cache file: %s", self.cache_filename())
import shutil
try:
shutil.rmtree(os.path.join(self.dir, 'sage'))
logger.debug("Deleted auto-generated .rst files in: %s",
os.path.join(self.dir, 'sage'))
except OSError:
pass
def get_unincluded_modules(self):
"""
Returns an iterator for all the modules in the Sage library
which are not included in the reference manual.
"""
#Make a dictionary of the included modules
included_modules = {}
for module_name in self.get_all_included_modules():
included_modules[module_name] = True
base_path = os.path.join(SAGE_SRC, 'sage')
for directory, subdirs, files in os.walk(base_path):
for filename in files:
if not (filename.endswith('.py') or
filename.endswith('.pyx')):
continue
path = os.path.join(directory, filename)
#Create the module name
module_name = path[len(base_path):].replace(os.path.sep, '.')
module_name = 'sage' + module_name
module_name = module_name[:-4] if module_name.endswith('pyx') else module_name[:-3]
#Exclude some ones -- we don't want init the manual
if module_name.endswith('__init__') or module_name.endswith('all'):
continue
if module_name not in included_modules:
yield module_name
def print_unincluded_modules(self):
"""
Prints all of the modules which are not included in the Sage
reference manual.
"""
for module_name in self.get_unincluded_modules():
print module_name
def print_included_modules(self):
"""
Prints all of the modules that are included in the Sage reference
manual.
"""
for module_name in self.get_all_included_modules():
print module_name
def get_builder(name):
"""
Returns an appropriate *Builder object for the document ``name``.
DocBuilder and its subclasses do all the real work in building the
documentation.
"""
if name == 'all':
return AllBuilder()
elif name.endswith('reference'):
return ReferenceBuilder(name)
elif 'reference' in name:
return ReferenceSubBuilder(name)
elif name.endswith('website'):
return WebsiteBuilder(name)
elif name in get_documents() or name in AllBuilder().get_all_documents():
return DocBuilder(name)
else:
print "'%s' is not a recognized document. Type 'sage -docbuild -D' for a list"%name
print "of documents, or 'sage -docbuild --help' for more help."
sys.exit(1)
def format_columns(lst, align='<', cols=None, indent=4, pad=3, width=80):
"""
Utility function that formats a list as a simple table and returns
a Unicode string representation. The number of columns is
computed from the other options, unless it's passed as a keyword
argument. For help on Python's string formatter, see
http://docs.python.org/library/string.html#format-string-syntax
"""
# Can we generalize this (efficiently) to other / multiple inputs
# and generators?
size = max(map(len, lst)) + pad
if cols is None:
import math
cols = math.trunc((width - indent) / size)
s = " " * indent
for i in xrange(len(lst)):
if i != 0 and i % cols == 0:
s += "\n" + " " * indent
s += "{0:{1}{2}}".format(lst[i], align, size)
s += "\n"
return unicode(s)
def help_usage(s=u"", compact=False):
"""
Appends and returns a brief usage message for the Sage
documentation builder. If 'compact' is False, the function adds a
final newline character.
"""
s += "sage -docbuild [OPTIONS] DOCUMENT (FORMAT | COMMAND)"
if not compact:
s += "\n"
return s
def help_description(s=u"", compact=False):
"""
Appends and returns a brief description of the Sage documentation
builder. If 'compact' is False, the function adds a final newline
character.
"""
s += "Build or return information about Sage documentation.\n"
s += " DOCUMENT name of the document to build\n"
s += " FORMAT document output format\n"
s += " COMMAND document-specific command\n"
s += "A DOCUMENT and either a FORMAT or a COMMAND are required,\n"
s += "unless a list of one or more of these is requested."
if not compact:
s += "\n"
return s
def help_examples(s=u""):
"""
Appends and returns some usage examples for the Sage documentation
builder.
"""
s += "Examples:\n"
s += " sage -docbuild -FDC all\n"
s += " sage -docbuild constructions pdf\n"
s += " sage -docbuild reference html -jv3\n"
s += " sage -docbuild --mathjax tutorial html\n"
s += " sage -docbuild reference print_unincluded_modules\n"
s += " sage -docbuild developer -j html --sphinx-opts -q,-aE --verbose 2"
return s
def get_documents():
"""
Returns a list of document names the Sage documentation builder
will accept as command-line arguments.
"""
all_b = AllBuilder()
docs = all_b.get_all_documents()
docs = [(d[3:] if d[0:3] == 'en/' else d) for d in docs]
return docs
def help_documents(s=u""):
"""
Appends and returns a tabular list of documents, including a
shortcut 'all' for all documents, available to the Sage
documentation builder.
"""
docs = get_documents()
s += "DOCUMENTs:\n"
s += format_columns(docs + ['all (!)'])
s += "(!) Builds everything.\n\n"
if 'reference' in docs:
s+= "Other valid document names take the form 'reference/DIR', where\n"
s+= "DIR is a subdirectory of SAGE_DOC/en/reference/.\n"
s+= "This builds just the specified part of the reference manual.\n"
return s
def get_formats():
"""
Returns a list of output formats the Sage documentation builder
will accept on the command-line.
"""
tut_b = DocBuilder('en/tutorial')
formats = tut_b._output_formats()
formats.remove('html')
return ['html', 'pdf'] + formats
def help_formats(s=u""):
"""
Appends and returns a tabular list of output formats available to
the Sage documentation builder.
"""
s += "FORMATs:\n"
s += format_columns(get_formats())
return s
def help_commands(name='all', s=u""):
"""
Appends and returns a tabular list of commands, if any, the Sage
documentation builder can run on the indicated document. The
default is to list all commands for all documents.
"""
# To do: Generate the lists dynamically, using class attributes,
# as with the Builders above.
command_dict = { 'reference' : [
'print_included_modules', 'print_modified_modules (*)',
'print_unincluded_modules', 'print_newly_included_modules (*)',
] }
for doc in command_dict:
if name == 'all' or doc == name:
s += "COMMANDs for the DOCUMENT '" + doc + "':\n"
s += format_columns(command_dict[doc])
s += "(*) Since the last build.\n"
return s
def help_message_long(option, opt_str, value, parser):
"""
Prints an extended help message for the Sage documentation builder
and exits.
"""
help_funcs = [ help_usage, help_description, help_documents,
help_formats, help_commands, parser.format_option_help,
help_examples ]
for f in help_funcs:
print f()
sys.exit(0)
def help_message_short(option=None, opt_str=None, value=None, parser=None,
error=False):
"""
Prints a help message for the Sage documentation builder. The
message includes command-line usage and a list of options. The
message is printed only on the first call. If error is True
during this call, the message is printed only if the user hasn't
requested a list (e.g., documents, formats, commands).
"""
if not hasattr(parser.values, 'printed_help'):
if error == True:
if not hasattr(parser.values, 'printed_list'):
parser.print_help()
else:
parser.print_help()
setattr(parser.values, 'printed_help', 1)
def help_wrapper(option, opt_str, value, parser):
"""
A helper wrapper for command-line options to the Sage
documentation builder that print lists, such as document names,
formats, and document-specific commands.
"""
if option.dest == 'commands':
print help_commands(value),
if option.dest == 'documents':
print help_documents(),
if option.dest == 'formats':
print help_formats(),
setattr(parser.values, 'printed_list', 1)
class IndentedHelpFormatter2(optparse.IndentedHelpFormatter, object):
"""
Custom help formatter class for optparse's OptionParser.
"""
def format_description(self, description):
"""
Returns a formatted description, preserving any original
explicit new line characters.
"""
if description:
lines_in = description.split('\n')
lines_out = [self._format_text(line) for line in lines_in]
return "\n".join(lines_out) + "\n"
else:
return ""
def format_heading(self, heading):
"""
Returns a formatted heading using the superclass' formatter.
If the heading is 'options', up to case, the function converts
it to ALL CAPS. This allows us to match the heading 'OPTIONS' with
the same token in the builder's usage message.
"""
if heading.lower() == 'options':
heading = "OPTIONS"
return super(IndentedHelpFormatter2, self).format_heading(heading)
def setup_parser():
"""
Sets up and returns a command-line OptionParser instance for the
Sage documentation builder.
"""
# Documentation: http://docs.python.org/library/optparse.html
parser = optparse.OptionParser(add_help_option=False,
usage=help_usage(compact=True),
formatter=IndentedHelpFormatter2(),
description=help_description(compact=True))
# Standard options. Note: We use explicit option.dest names
# to avoid ambiguity.
standard = optparse.OptionGroup(parser, "Standard")
standard.add_option("-h", "--help",
action="callback", callback=help_message_short,
help="show a help message and exit")
standard.add_option("-H", "--help-all",
action="callback", callback=help_message_long,
help="show an extended help message and exit")
standard.add_option("-D", "--documents", dest="documents",
action="callback", callback=help_wrapper,
help="list all available DOCUMENTs")
standard.add_option("-F", "--formats", dest="formats",
action="callback", callback=help_wrapper,
help="list all output FORMATs")
standard.add_option("-C", "--commands", dest="commands",
type="string", metavar="DOC",
action="callback", callback=help_wrapper,
help="list all COMMANDs for DOCUMENT DOC; use 'all' to list all")
standard.add_option("-i", "--inherited", dest="inherited",
default=False, action="store_true",
help="include inherited members in reference manual; may be slow, may fail for PDF output")
standard.add_option("-u", "--underscore", dest="underscore",
default=False, action="store_true",
help="include variables prefixed with '_' in reference manual; may be slow, may fail for PDF output")
standard.add_option("-j", "--mathjax", "--jsmath", dest="mathjax",
action="store_true",
help="render math using MathJax; FORMATs: html, json, pickle, web")
standard.add_option("--no-pdf-links", dest="no_pdf_links",
action="store_true",
help="do not include PDF links in DOCUMENT 'website'; FORMATs: html, json, pickle, web")
standard.add_option("--warn-links", dest="warn_links",
default=False, action="store_true",
help="issue a warning whenever a link is not properly resolved; equivalent to '--sphinx-opts -n' (sphinx option: nitpicky)")
standard.add_option("--check-nested", dest="check_nested",
action="store_true",
help="check picklability of nested classes in DOCUMENT 'reference'")
standard.add_option("-N", "--no-colors", dest="color", default=True,
action="store_false",
help="do not color output; does not affect children")
standard.add_option("-q", "--quiet", dest="verbose",
action="store_const", const=0,
help="work quietly; same as --verbose=0")
standard.add_option("-v", "--verbose", dest="verbose",
type="int", default=1, metavar="LEVEL",
action="store",
help="report progress at LEVEL=0 (quiet), 1 (normal), 2 (info), or 3 (debug); does not affect children")
parser.add_option_group(standard)
# Advanced options.
advanced = optparse.OptionGroup(parser, "Advanced",
"Use these options with care.")
advanced.add_option("-S", "--sphinx-opts", dest="sphinx_opts",
type="string", metavar="OPTS",
action="store",
help="pass comma-separated OPTS to sphinx-build")
advanced.add_option("-U", "--update-mtimes", dest="update_mtimes",
default=False, action="store_true",
help="before building reference manual, update modification times for auto-generated ReST files")
parser.add_option_group(advanced)
return parser
def setup_logger(verbose=1, color=True):
"""
Sets up and returns a Python Logger instance for the Sage
documentation builder. The optional argument sets logger's level
and message format.
"""
# Set up colors. Adapted from sphinx.cmdline.
import sphinx.util.console as c
if not color or not sys.stdout.isatty() or not c.color_terminal():
c.nocolor()
# Available colors: black, darkgray, (dark)red, dark(green),
# brown, yellow, (dark)blue, purple, fuchsia, turquoise, teal,
# lightgray, white. Available styles: reset, bold, faint,
# standout, underline, blink.
# Set up log record formats.
format_std = "%(message)s"
formatter = logging.Formatter(format_std)
# format_debug = "%(module)s #%(lineno)s %(funcName)s() %(message)s"
fields = ['%(module)s', '#%(lineno)s', '%(funcName)s()', '%(message)s']
colors = ['darkblue', 'darkred', 'brown', 'reset']
styles = ['reset', 'reset', 'reset', 'reset']
format_debug = ""
for i in xrange(len(fields)):
format_debug += c.colorize(styles[i], c.colorize(colors[i], fields[i]))
if i != len(fields):
format_debug += " "
# Documentation: http://docs.python.org/library/logging.html
logger = logging.getLogger('doc.common.builder')
# Note: There's also Handler.setLevel(). The argument is the
# lowest severity message that the respective logger or handler
# will pass on. The default levels are DEBUG, INFO, WARNING,
# ERROR, and CRITICAL. We use "WARNING" for normal verbosity and
# "ERROR" for quiet operation. It's possible to define custom
# levels. See the documentation for details.
if verbose == 0:
logger.setLevel(logging.ERROR)
if verbose == 1:
logger.setLevel(logging.WARNING)
if verbose == 2:
logger.setLevel(logging.INFO)
if verbose == 3:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_debug)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class IntersphinxCache:
"""
Replace sphinx.ext.intersphinx.fetch_inventory by an in-memory
cached version.
"""
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
"""
Return the result of ``sphinx.ext.intersphinx.fetch_inventory()``
from a cache if possible. Otherwise, call
``sphinx.ext.intersphinx.fetch_inventory()`` and cache the result.
"""
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i
if __name__ == '__main__':
# Parse the command-line.
parser = setup_parser()
options, args = parser.parse_args()
# Get the name and type (target format) of the document we are
# trying to build.
try:
name, type = args
except ValueError:
help_message_short(parser=parser, error=True)
sys.exit(1)
# Set up module-wide logging.
logger = setup_logger(options.verbose, options.color)
# Process selected options.
#
# MathJax: this check usually has no practical effect, since
# SAGE_DOC_MATHJAX is set to "True" by the script sage-env.
# To disable MathJax, set SAGE_DOC_MATHJAX to "no" or "False".
if options.mathjax or (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
os.environ['SAGE_DOC_MATHJAX'] = 'True'
if options.check_nested:
os.environ['SAGE_CHECK_NESTED'] = 'True'
if options.underscore:
os.environ['SAGE_DOC_UNDERSCORE'] = "True"
if options.sphinx_opts:
ALLSPHINXOPTS += options.sphinx_opts.replace(',', ' ') + " "
if options.no_pdf_links:
WEBSITESPHINXOPTS = " -A hide_pdf_links=1 "
if options.warn_links:
ALLSPHINXOPTS += "-n "
# Make sure common/static exists.
mkdir(os.path.join(SAGE_DOC, 'common', 'static'))
import sage.all
# Minimize GAP/libGAP RAM usage when we build the docs
set_gap_memory_pool_size(1) # 1 MB
# Set up Intersphinx cache
C = IntersphinxCache()
# Get the builder and build.
getattr(get_builder(name), type)()
| 38.894701 | 148 | 0.58313 |
"""
The documentation builder
It is the starting point for building documentation, and is
responsible to figure out what to build and with which options. The
actual documentation build for each individual document is then done
in a subprocess call to sphinx, see :func:`builder_helper`.
* The builder can be configured in build_options.py
* The sphinx subprocesses are configured in conf.py
"""
import logging, optparse, os, shutil, subprocess, sys, re
import sphinx.cmdline
import sphinx.util.console
import sphinx.ext.intersphinx
try:
sys.path.remove(os.path.realpath(os.getcwd()))
except ValueError:
pass
from sage.misc.cachefunc import cached_method
from sage.misc.misc import sage_makedirs as mkdir
from sage.env import SAGE_DOC, SAGE_SRC
execfile(os.path.join(SAGE_DOC, 'common' , 'build_options.py'))
sage: b._doctrees_dir()
'.../doc/output/doctrees/en/tutorial'
"""
d = os.path.join(SAGE_DOC, "output", 'doctrees', self.lang, self.name)
mkdir(d)
return d
def _output_formats(self):
"""
Returns a list of the possible output formats.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b._output_formats()
['changes', 'html', 'htmlhelp', 'inventory', 'json', 'latex', 'linkcheck', 'pickle', 'web']
"""
output_formats = []
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
output_formats.append(attr)
output_formats.sort()
return output_formats
def pdf(self):
"""
Builds the PDF files for this document. This is done by first
(re)-building the LaTeX output, going into that LaTeX
directory, and running 'make all-pdf' there.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.DocBuilder('tutorial')
sage: b.pdf() #not tested
"""
self.latex()
tex_dir = self._output_dir('latex')
pdf_dir = self._output_dir('pdf')
if subprocess.call("cd '%s' && $MAKE all-pdf && mv -f *.pdf '%s'"%(tex_dir, pdf_dir), shell=True):
raise RuntimeError("failed to run $MAKE all-pdf in %s"%tex_dir)
logger.warning("Build finished. The built documents can be found in %s", pdf_dir)
def clean(self, *args):
shutil.rmtree(self._doctrees_dir())
output_formats = list(args) if args else self._output_formats()
for format in output_formats:
shutil.rmtree(self._output_dir(format), ignore_errors=True)
html = builder_helper('html')
pickle = builder_helper('pickle')
web = pickle
json = builder_helper('json')
htmlhelp = builder_helper('htmlhelp')
latex = builder_helper('latex')
changes = builder_helper('changes')
linkcheck = builder_helper('linkcheck')
inventory = builder_helper('inventory')
pool.join()
logger.warning("Elapsed time: %.1f seconds."%(time.time()-start))
logger.warning("Done building the documentation!")
def get_all_documents(self):
"""
Returns a list of all of the documents. A document is a directory within one of
the language subdirectories of SAGE_DOC specified by the global LANGUAGES
variable.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: documents = builder.AllBuilder().get_all_documents()
sage: 'en/tutorial' in documents
True
sage: documents[0] == 'en/reference'
True
"""
documents = []
for lang in LANGUAGES:
for document in os.listdir(os.path.join(SAGE_DOC, lang)):
if (document not in OMIT
and os.path.isdir(os.path.join(SAGE_DOC, lang, document))):
documents.append(os.path.join(lang, document))
# Ensure that the reference guide is compiled first so that links from
# the other document to it are correctly resolved.
if 'en/reference' in documents:
documents.remove('en/reference')
documents.insert(0, 'en/reference')
return documents
class WebsiteBuilder(DocBuilder):
def html(self):
"""
After we've finished building the website index page, we copy
everything one directory up. Then we call
:meth:`create_html_redirects`.
"""
DocBuilder.html(self)
html_output_dir = self._output_dir('html')
for f in os.listdir(html_output_dir):
src = os.path.join(html_output_dir, f)
dst = os.path.join(html_output_dir, '..', f)
if os.path.isdir(src):
shutil.rmtree(dst, ignore_errors=True)
shutil.copytree(src, dst)
else:
shutil.copy2(src, dst)
self.create_html_redirects()
def create_html_redirects(self):
"""
Writes a number of small HTML files; these are files which
used to contain the main content of the reference manual
before before splitting the manual into multiple
documents. After the split, those files have moved, so in each
old location, write a file which redirects to the new version.
(This is so old URLs to pieces of the reference manual still
open the correct files.)
"""
html_template = """<html><head>
<meta HTTP-EQUIV="REFRESH" content="0; url=%s">
</head><body></body></html>"""
reference_dir = os.path.abspath(os.path.join(self._output_dir('html'),
'..', 'reference'))
reference_builder = ReferenceBuilder('reference')
refdir = os.path.join(os.environ['SAGE_DOC'], 'en', 'reference')
for document in reference_builder.get_all_documents(refdir):
path = os.path.abspath(os.path.join(reference_dir, '..'))
document_name = document.split('/')[1]
sage_directory = os.path.join(path, document, 'sage')
for dirpath, dirnames, filenames in os.walk(sage_directory):
short_path = dirpath[len(path)+1:]
shorter_path = os.path.join(*short_path.split(os.sep)[2:])
try:
os.makedirs(os.path.join(reference_dir, shorter_path))
except OSError:
pass
for filename in filenames:
if not filename.endswith('html'):
continue
redirect_filename = os.path.join(reference_dir, shorter_path, filename)
levels_up = len(shorter_path.split(os.sep))
redirect_url = "/".join(['..']*levels_up + [document_name, shorter_path, filename])
with open(redirect_filename, 'w') as f:
f.write(html_template % redirect_url)
def clean(self):
"""
When we clean the output for the website index, we need to
remove all of the HTML that were placed in the parent
directory.
"""
html_output_dir = self._output_dir('html')
parent_dir = os.path.realpath(os.path.join(html_output_dir, '..'))
for filename in os.listdir(html_output_dir):
parent_filename = os.path.join(parent_dir, filename)
if not os.path.exists(parent_filename):
continue
if os.path.isdir(parent_filename):
shutil.rmtree(parent_filename, ignore_errors=True)
else:
os.unlink(parent_filename)
DocBuilder.clean(self)
class ReferenceBuilder(AllBuilder):
"""
This class builds the reference manual. It uses DocBuilder to
build the top-level page and ReferenceSubBuilder for each
sub-component.
"""
def __init__(self, name, lang='en'):
"""
Records the reference manual's name, in case it's not
identical to 'reference'.
"""
AllBuilder.__init__(self)
doc = name.split(os.path.sep)
if doc[0] in LANGUAGES:
lang = doc[0]
doc.pop(0)
self.name = doc[0]
self.lang = lang
def _output_dir(self, type, lang='en'):
"""
Returns the directory where the output of type type is stored.
If the directory does not exist, then it will automatically be
created.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.ReferenceBuilder('reference')
sage: b._output_dir('html')
'.../doc/output/html/en/reference'
"""
d = os.path.join(SAGE_DOC, "output", type, lang, self.name)
mkdir(d)
return d
def _wrapper(self, format, *args, **kwds):
"""
Builds reference manuals. For each language, it builds the
top-level document and its components.
"""
for lang in LANGUAGES:
refdir = os.path.join(SAGE_DOC, lang, self.name)
if not os.path.exists(refdir):
continue
output_dir = self._output_dir(format, lang)
from multiprocessing import Pool
pool = Pool(NUM_THREADS, maxtasksperchild=1)
L = [(doc, lang, format, kwds) + args for doc in self.get_all_documents(refdir)]
pool.map_async(build_ref_doc, L, 1).get(99999)
pool.close()
pool.join()
getattr(DocBuilder(self.name, lang), format)(*args, **kwds)
if format == 'pdf':
getattr(get_builder('website'), 'html')()
# don't need all of the MathJax stuff, and in
website_dir = os.path.join(SAGE_DOC, 'output', 'html',
'en', 'website')
static_files = ['COPYING.txt', 'basic.css', 'blank.gif',
'default.css', 'doctools.js', 'favicon.ico',
'file.png', 'jquery.js', 'minus.png',
'pdf.png', 'plus.png', 'pygments.css',
'sage.css', 'sageicon.png', 'sagelogo.png',
'searchtools.js', 'sidebar.js', 'underscore.js']
mkdir(os.path.join(output_dir, '_static'))
for f in static_files:
try:
shutil.copyfile(os.path.join(website_dir, '_static', f),
os.path.join(output_dir, '_static', f))
except IOError: # original file does not exist
pass
# Now modify website's index.html page and write it
f = open(os.path.join(website_dir, 'index.html'))
html = f.read().replace('Documentation', 'Reference')
f.close()
html_output_dir = os.path.dirname(website_dir)
html = html.replace('http://www.sagemath.org',
os.path.join(html_output_dir, 'index.html'))
html_end_preamble = html.find('<h1>Sage Reference')
html_bottom = html.rfind('</table>') + len('</table>')
f = open(os.path.join(SAGE_DOC, lang, 'reference', 'index.rst'))
rst = f.read()
f.close()
rst = re.sub('`([^`]*)`__\.\n\n__ (.*)',
r'<a href="\2">\1</a>.', rst)
rst = re.sub(r':doc:`([^<]*?)\s+<(.*)/index>`',
r'<a href="\2/\2.pdf">\1 <img src="_static/pdf.png" /></a>',
rst)
rst = rst.replace('.. toctree::', '')
rst = rst.replace(':maxdepth: 2', '')
rst = rst.replace('todolist', '')
start = rst.find('=\n') + 1
end = rst.find('Table of Contents')
rst_body = rst[start:end]
rst_body = rst_body.replace('\n\n', '</p>\n<p>')
start = rst.find('Table of Contents') + 2*len('Table of Contents') + 1
end = rst.find('Indices and Tables')
# TOC: change * to <li>, change rst headers to html headers.
rst_toc = rst[start:end]
rst_toc = rst_toc.replace('*', '<li>')
rst_toc = re.sub('\n([A-Z][a-zA-Z, ]*)\n-*\n',
'</ul>\n\n\n<h2>\\1</h2>\n\n<ul>\n', rst_toc)
# Now write the file.
new_index = open(os.path.join(output_dir, 'index.html'), 'w')
new_index.write(html[:html_end_preamble])
new_index.write('<h1>' + rst[:rst.find('\n')] +
' (PDF version)'+ '</h1>')
new_index.write(rst_body)
new_index.write('<h2>Table of Contents</h2>\n\n<ul>')
new_index.write(rst_toc)
new_index.write('</ul>\n\n')
new_index.write(html[html_bottom:])
new_index.close()
logger.warning('''
PDF documents have been created in subdirectories of
%s
Alternatively, you can open
%s
for a webpage listing all of the documents.''' % (output_dir,
os.path.join(output_dir,
'index.html')))
def get_all_documents(self, refdir):
"""
Returns a list of all reference manual components to build.
We add a component name if it's a subdirectory of the manual's
directory and contains a file named 'index.rst'.
We return the largest component (most subdirectory entries)
first since they will take the longest to build.
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: b = builder.ReferenceBuilder('reference')
sage: refdir = os.path.join(os.environ['SAGE_DOC'], 'en', b.name)
sage: sorted(b.get_all_documents(refdir))
['reference/algebras', 'reference/arithgroup', ..., 'reference/tensor']
"""
documents = []
for doc in os.listdir(refdir):
directory = os.path.join(refdir, doc)
if os.path.exists(os.path.join(directory, 'index.rst')):
n = len(os.listdir(directory))
documents.append((-n, os.path.join(self.name, doc)))
return [ doc[1] for doc in sorted(documents) ]
class ReferenceSubBuilder(DocBuilder):
"""
This class builds sub-components of the reference manual. It is
resposible for making sure the auto generated ReST files for the
Sage library are up to date.
When building any output, we must first go through and check
to see if we need to update any of the autogenerated ReST
files. There are two cases where this would happen:
1. A new module gets added to one of the toctrees.
2. The actual module gets updated and possibly contains a new
title.
"""
def __init__(self, *args, **kwds):
DocBuilder.__init__(self, *args, **kwds)
self._wrap_builder_helpers()
def _wrap_builder_helpers(self):
from functools import partial, update_wrapper
for attr in dir(self):
if hasattr(getattr(self, attr), 'is_output_format'):
f = partial(self._wrapper, attr)
f.is_output_format = True
update_wrapper(f, getattr(self, attr))
setattr(self, attr, f)
def _wrapper(self, build_type, *args, **kwds):
"""
This is the wrapper around the builder_helper methods that
goes through and makes sure things are up to date.
"""
# Delete the auto-generated .rst files, if the inherited
# and/or underscored members options have changed.
global options
inherit_prev = self.get_cache().get('option_inherited')
underscore_prev = self.get_cache().get('option_underscore')
if (inherit_prev is None or inherit_prev != options.inherited or
underscore_prev is None or underscore_prev != options.underscore):
logger.info("Detected change(s) in inherited and/or underscored members option(s).")
self.clean_auto()
self.get_cache.clear_cache()
# After "sage -clone", refresh the .rst file mtimes in
# environment.pickle.
if options.update_mtimes:
logger.info("Checking for .rst file mtimes to update...")
self.update_mtimes()
#Update the .rst files for modified Python modules
logger.info("Updating .rst files with modified modules...")
for module_name in self.get_modified_modules():
self.write_auto_rest_file(module_name.replace(os.path.sep, '.'))
#Write the .rst files for newly included modules
logger.info("Writing .rst files for newly-included modules...")
for module_name in self.get_newly_included_modules(save=True):
self.write_auto_rest_file(module_name)
#Copy over the custom .rst files from _sage
_sage = os.path.join(self.dir, '_sage')
if os.path.exists(_sage):
logger.info("Copying over custom .rst files from %s ...", _sage)
shutil.copytree(_sage, os.path.join(self.dir, 'sage'))
getattr(DocBuilder, build_type)(self, *args, **kwds)
def cache_filename(self):
"""
Returns the filename where the pickle of the dictionary of
already generated ReST files is stored.
"""
return os.path.join(self._doctrees_dir(), 'reference.pickle')
@cached_method
def get_cache(self):
"""
Retrieve the cache of already generated ReST files. If it
doesn't exist, then we just return an empty dictionary. If it
is corrupted, return an empty dictionary.
"""
filename = self.cache_filename()
if not os.path.exists(filename):
return {}
import cPickle
file = open(self.cache_filename(), 'rb')
try:
cache = cPickle.load(file)
except StandardError:
logger.debug("Cache file '%s' is corrupted; ignoring it..."% filename)
cache = {}
else:
logger.debug("Loaded .rst file cache: %s", filename)
finally:
file.close()
return cache
def save_cache(self):
"""
Save the cache of already generated ReST files.
"""
cache = self.get_cache()
global options
cache['option_inherited'] = options.inherited
cache['option_underscore'] = options.underscore
import cPickle
file = open(self.cache_filename(), 'wb')
cPickle.dump(cache, file)
file.close()
logger.debug("Saved .rst file cache: %s", self.cache_filename())
def get_sphinx_environment(self):
"""
Returns the Sphinx environment for this project.
"""
from sphinx.environment import BuildEnvironment
class Foo(object):
pass
config = Foo()
config.values = []
env_pickle = os.path.join(self._doctrees_dir(), 'environment.pickle')
try:
env = BuildEnvironment.frompickle(config, env_pickle)
logger.debug("Opened Sphinx environment: %s", env_pickle)
return env
except IOError as err:
logger.debug("Failed to open Sphinx environment: %s", err)
pass
def update_mtimes(self):
"""
Updates the modification times for ReST files in the Sphinx
environment for this project.
"""
env = self.get_sphinx_environment()
if env is not None:
import time
for doc in env.all_docs:
env.all_docs[doc] = time.time()
logger.info("Updated %d .rst file mtimes", len(env.all_docs))
env_pickle = os.path.join(self._doctrees_dir(),
'environment.pickle')
# When cloning a new branch (see
# SAGE_LOCAL/bin/sage-clone), we hard link the doc output.
# To avoid making unlinked, potentially inconsistent
# copies of the environment, we *don't* use
# BuildEnvironment.topickle:
import cPickle, types
# remove unpicklable attributes
env.set_warnfunc(None)
del env.config.values
picklefile = open(env_pickle, 'wb')
# remove potentially pickling-problematic values from config
for key, val in vars(env.config).items():
if key.startswith('_') or isinstance(val, (types.ModuleType,
types.FunctionType,
types.ClassType)):
del env.config[key]
try:
cPickle.dump(env, picklefile, cPickle.HIGHEST_PROTOCOL)
finally:
picklefile.close()
logger.debug("Saved Sphinx environment: %s", env_pickle)
def get_modified_modules(self):
"""
Returns an iterator for all the modules that have been modified
since the documentation was last built.
"""
env = self.get_sphinx_environment()
if env is None:
logger.debug("Stopped check for modified modules.")
return
try:
added, changed, removed = env.get_outdated_files(False)
logger.info("Sphinx found %d modified modules", len(changed))
except OSError as err:
logger.debug("Sphinx failed to determine modified modules: %s", err)
self.clean_auto()
return
for name in changed:
# Only pay attention to files in a directory sage/... In
# particular, don't treat a file like 'sagetex.rst' in
if name.startswith('sage' + os.sep):
yield name
def print_modified_modules(self):
"""
Prints a list of all the modules that have been modified since
the documentation was last built.
"""
for module_name in self.get_modified_modules():
print module_name
def get_all_rst_files(self, exclude_sage=True):
"""
Returns an iterator for all rst files which are not
autogenerated.
"""
for directory, subdirs, files in os.walk(self.dir):
if exclude_sage and directory.startswith(os.path.join(self.dir, 'sage')):
continue
for filename in files:
if not filename.endswith('.rst'):
continue
yield os.path.join(directory, filename)
def get_all_included_modules(self):
"""
Returns an iterator for all modules which are included in the
reference manual.
"""
for filename in self.get_all_rst_files():
for module in self.get_modules(filename):
yield module
def get_newly_included_modules(self, save=False):
"""
Returns an iterator for all modules that appear in the
toctrees that don't appear in the cache.
"""
cache = self.get_cache()
new_modules = 0
for module in self.get_all_included_modules():
if module not in cache:
cache[module] = True
new_modules += 1
yield module
logger.info("Found %d newly included modules", new_modules)
if save:
self.save_cache()
def print_newly_included_modules(self):
"""
Prints all of the modules that appear in the toctrees that
don't appear in the cache.
"""
for module_name in self.get_newly_included_modules():
print module_name
def get_modules(self, filename):
"""
Given a filename for a ReST file, return an iterator for
all of the autogenerated ReST files that it includes.
"""
auto_re = re.compile('^\s*(..\/)*(sage(nb)?\/[\w\/]*)\s*$')
f = open(filename)
lines = f.readlines()
f.close()
for line in lines:
match = auto_re.match(line)
if match:
yield match.group(2).replace(os.path.sep, '.')
def get_module_docstring_title(self, module_name):
"""
Returns the title of the module from its docstring.
"""
try:
__import__(module_name)
except ImportError as err:
logger.error("Warning: Could not import %s %s", module_name, err)
return "UNABLE TO IMPORT MODULE"
module = sys.modules[module_name]
doc = module.__doc__
if doc is None:
doc = module.doc if hasattr(module, 'doc') else ""
i = doc.find('\n')
if i != -1:
return doc[i+1:].lstrip().splitlines()[0]
else:
return doc
def auto_rest_filename(self, module_name):
"""
Returns the name of the file associated to a given module
EXAMPLES::
sage: import os, sys; sys.path.append(os.environ['SAGE_DOC']+'/common/'); import builder
sage: import builder
sage: builder.ReferenceSubBuilder("reference").auto_rest_filename("sage.combinat.partition")
'.../doc/en/reference/sage/combinat/partition.rst'
"""
return self.dir + os.path.sep + module_name.replace('.',os.path.sep) + '.rst'
def write_auto_rest_file(self, module_name):
"""
Writes the autogenerated ReST file for module_name.
"""
if not module_name.startswith('sage'):
return
filename = self.auto_rest_filename(module_name)
mkdir(os.path.dirname(filename))
outfile = open(filename, 'w')
title = self.get_module_docstring_title(module_name)
if title == '':
logger.error("Warning: Missing title for %s", module_name)
title = "MISSING TITLE"
outfile.write(".. nodoctest\n\n")
# Now write the actual content.
outfile.write(".. _%s:\n\n"%module_name)
outfile.write(title + '\n')
outfile.write('='*len(title) + "\n\n")
outfile.write('.. This file has been autogenerated.\n\n')
global options
inherited = ':inherited-members:' if options.inherited else ''
automodule = '''
.. automodule:: %s
:members:
:undoc-members:
:show-inheritance:
%s
'''
outfile.write(automodule % (module_name, inherited))
outfile.close()
def clean_auto(self):
"""
Remove the cache file for the autogenerated files as well as
the files themselves.
"""
if os.path.exists(self.cache_filename()):
os.unlink(self.cache_filename())
logger.debug("Deleted .rst cache file: %s", self.cache_filename())
import shutil
try:
shutil.rmtree(os.path.join(self.dir, 'sage'))
logger.debug("Deleted auto-generated .rst files in: %s",
os.path.join(self.dir, 'sage'))
except OSError:
pass
def get_unincluded_modules(self):
"""
Returns an iterator for all the modules in the Sage library
which are not included in the reference manual.
"""
#Make a dictionary of the included modules
included_modules = {}
for module_name in self.get_all_included_modules():
included_modules[module_name] = True
base_path = os.path.join(SAGE_SRC, 'sage')
for directory, subdirs, files in os.walk(base_path):
for filename in files:
if not (filename.endswith('.py') or
filename.endswith('.pyx')):
continue
path = os.path.join(directory, filename)
#Create the module name
module_name = path[len(base_path):].replace(os.path.sep, '.')
module_name = 'sage' + module_name
module_name = module_name[:-4] if module_name.endswith('pyx') else module_name[:-3]
#Exclude some ones -- we don't want init the manual
if module_name.endswith('__init__') or module_name.endswith('all'):
continue
if module_name not in included_modules:
yield module_name
def print_unincluded_modules(self):
"""
Prints all of the modules which are not included in the Sage
reference manual.
"""
for module_name in self.get_unincluded_modules():
print module_name
def print_included_modules(self):
"""
Prints all of the modules that are included in the Sage reference
manual.
"""
for module_name in self.get_all_included_modules():
print module_name
def get_builder(name):
"""
Returns an appropriate *Builder object for the document ``name``.
DocBuilder and its subclasses do all the real work in building the
documentation.
"""
if name == 'all':
return AllBuilder()
elif name.endswith('reference'):
return ReferenceBuilder(name)
elif 'reference' in name:
return ReferenceSubBuilder(name)
elif name.endswith('website'):
return WebsiteBuilder(name)
elif name in get_documents() or name in AllBuilder().get_all_documents():
return DocBuilder(name)
else:
print "'%s' is not a recognized document. Type 'sage -docbuild -D' for a list"%name
print "of documents, or 'sage -docbuild --help' for more help."
sys.exit(1)
def format_columns(lst, align='<', cols=None, indent=4, pad=3, width=80):
"""
Utility function that formats a list as a simple table and returns
a Unicode string representation. The number of columns is
computed from the other options, unless it's passed as a keyword
argument. For help on Python's string formatter, see
http://docs.python.org/library/string.html#format-string-syntax
"""
size = max(map(len, lst)) + pad
if cols is None:
import math
cols = math.trunc((width - indent) / size)
s = " " * indent
for i in xrange(len(lst)):
if i != 0 and i % cols == 0:
s += "\n" + " " * indent
s += "{0:{1}{2}}".format(lst[i], align, size)
s += "\n"
return unicode(s)
def help_usage(s=u"", compact=False):
"""
Appends and returns a brief usage message for the Sage
documentation builder. If 'compact' is False, the function adds a
final newline character.
"""
s += "sage -docbuild [OPTIONS] DOCUMENT (FORMAT | COMMAND)"
if not compact:
s += "\n"
return s
def help_description(s=u"", compact=False):
"""
Appends and returns a brief description of the Sage documentation
builder. If 'compact' is False, the function adds a final newline
character.
"""
s += "Build or return information about Sage documentation.\n"
s += " DOCUMENT name of the document to build\n"
s += " FORMAT document output format\n"
s += " COMMAND document-specific command\n"
s += "A DOCUMENT and either a FORMAT or a COMMAND are required,\n"
s += "unless a list of one or more of these is requested."
if not compact:
s += "\n"
return s
def help_examples(s=u""):
"""
Appends and returns some usage examples for the Sage documentation
builder.
"""
s += "Examples:\n"
s += " sage -docbuild -FDC all\n"
s += " sage -docbuild constructions pdf\n"
s += " sage -docbuild reference html -jv3\n"
s += " sage -docbuild --mathjax tutorial html\n"
s += " sage -docbuild reference print_unincluded_modules\n"
s += " sage -docbuild developer -j html --sphinx-opts -q,-aE --verbose 2"
return s
def get_documents():
"""
Returns a list of document names the Sage documentation builder
will accept as command-line arguments.
"""
all_b = AllBuilder()
docs = all_b.get_all_documents()
docs = [(d[3:] if d[0:3] == 'en/' else d) for d in docs]
return docs
def help_documents(s=u""):
"""
Appends and returns a tabular list of documents, including a
shortcut 'all' for all documents, available to the Sage
documentation builder.
"""
docs = get_documents()
s += "DOCUMENTs:\n"
s += format_columns(docs + ['all (!)'])
s += "(!) Builds everything.\n\n"
if 'reference' in docs:
s+= "Other valid document names take the form 'reference/DIR', where\n"
s+= "DIR is a subdirectory of SAGE_DOC/en/reference/.\n"
s+= "This builds just the specified part of the reference manual.\n"
return s
def get_formats():
"""
Returns a list of output formats the Sage documentation builder
will accept on the command-line.
"""
tut_b = DocBuilder('en/tutorial')
formats = tut_b._output_formats()
formats.remove('html')
return ['html', 'pdf'] + formats
def help_formats(s=u""):
"""
Appends and returns a tabular list of output formats available to
the Sage documentation builder.
"""
s += "FORMATs:\n"
s += format_columns(get_formats())
return s
def help_commands(name='all', s=u""):
"""
Appends and returns a tabular list of commands, if any, the Sage
documentation builder can run on the indicated document. The
default is to list all commands for all documents.
"""
command_dict = { 'reference' : [
'print_included_modules', 'print_modified_modules (*)',
'print_unincluded_modules', 'print_newly_included_modules (*)',
] }
for doc in command_dict:
if name == 'all' or doc == name:
s += "COMMANDs for the DOCUMENT '" + doc + "':\n"
s += format_columns(command_dict[doc])
s += "(*) Since the last build.\n"
return s
def help_message_long(option, opt_str, value, parser):
"""
Prints an extended help message for the Sage documentation builder
and exits.
"""
help_funcs = [ help_usage, help_description, help_documents,
help_formats, help_commands, parser.format_option_help,
help_examples ]
for f in help_funcs:
print f()
sys.exit(0)
def help_message_short(option=None, opt_str=None, value=None, parser=None,
error=False):
"""
Prints a help message for the Sage documentation builder. The
message includes command-line usage and a list of options. The
message is printed only on the first call. If error is True
during this call, the message is printed only if the user hasn't
requested a list (e.g., documents, formats, commands).
"""
if not hasattr(parser.values, 'printed_help'):
if error == True:
if not hasattr(parser.values, 'printed_list'):
parser.print_help()
else:
parser.print_help()
setattr(parser.values, 'printed_help', 1)
def help_wrapper(option, opt_str, value, parser):
"""
A helper wrapper for command-line options to the Sage
documentation builder that print lists, such as document names,
formats, and document-specific commands.
"""
if option.dest == 'commands':
print help_commands(value),
if option.dest == 'documents':
print help_documents(),
if option.dest == 'formats':
print help_formats(),
setattr(parser.values, 'printed_list', 1)
class IndentedHelpFormatter2(optparse.IndentedHelpFormatter, object):
"""
Custom help formatter class for optparse's OptionParser.
"""
def format_description(self, description):
"""
Returns a formatted description, preserving any original
explicit new line characters.
"""
if description:
lines_in = description.split('\n')
lines_out = [self._format_text(line) for line in lines_in]
return "\n".join(lines_out) + "\n"
else:
return ""
def format_heading(self, heading):
"""
Returns a formatted heading using the superclass' formatter.
If the heading is 'options', up to case, the function converts
it to ALL CAPS. This allows us to match the heading 'OPTIONS' with
the same token in the builder's usage message.
"""
if heading.lower() == 'options':
heading = "OPTIONS"
return super(IndentedHelpFormatter2, self).format_heading(heading)
def setup_parser():
"""
Sets up and returns a command-line OptionParser instance for the
Sage documentation builder.
"""
parser = optparse.OptionParser(add_help_option=False,
usage=help_usage(compact=True),
formatter=IndentedHelpFormatter2(),
description=help_description(compact=True))
standard = optparse.OptionGroup(parser, "Standard")
standard.add_option("-h", "--help",
action="callback", callback=help_message_short,
help="show a help message and exit")
standard.add_option("-H", "--help-all",
action="callback", callback=help_message_long,
help="show an extended help message and exit")
standard.add_option("-D", "--documents", dest="documents",
action="callback", callback=help_wrapper,
help="list all available DOCUMENTs")
standard.add_option("-F", "--formats", dest="formats",
action="callback", callback=help_wrapper,
help="list all output FORMATs")
standard.add_option("-C", "--commands", dest="commands",
type="string", metavar="DOC",
action="callback", callback=help_wrapper,
help="list all COMMANDs for DOCUMENT DOC; use 'all' to list all")
standard.add_option("-i", "--inherited", dest="inherited",
default=False, action="store_true",
help="include inherited members in reference manual; may be slow, may fail for PDF output")
standard.add_option("-u", "--underscore", dest="underscore",
default=False, action="store_true",
help="include variables prefixed with '_' in reference manual; may be slow, may fail for PDF output")
standard.add_option("-j", "--mathjax", "--jsmath", dest="mathjax",
action="store_true",
help="render math using MathJax; FORMATs: html, json, pickle, web")
standard.add_option("--no-pdf-links", dest="no_pdf_links",
action="store_true",
help="do not include PDF links in DOCUMENT 'website'; FORMATs: html, json, pickle, web")
standard.add_option("--warn-links", dest="warn_links",
default=False, action="store_true",
help="issue a warning whenever a link is not properly resolved; equivalent to '--sphinx-opts -n' (sphinx option: nitpicky)")
standard.add_option("--check-nested", dest="check_nested",
action="store_true",
help="check picklability of nested classes in DOCUMENT 'reference'")
standard.add_option("-N", "--no-colors", dest="color", default=True,
action="store_false",
help="do not color output; does not affect children")
standard.add_option("-q", "--quiet", dest="verbose",
action="store_const", const=0,
help="work quietly; same as --verbose=0")
standard.add_option("-v", "--verbose", dest="verbose",
type="int", default=1, metavar="LEVEL",
action="store",
help="report progress at LEVEL=0 (quiet), 1 (normal), 2 (info), or 3 (debug); does not affect children")
parser.add_option_group(standard)
advanced = optparse.OptionGroup(parser, "Advanced",
"Use these options with care.")
advanced.add_option("-S", "--sphinx-opts", dest="sphinx_opts",
type="string", metavar="OPTS",
action="store",
help="pass comma-separated OPTS to sphinx-build")
advanced.add_option("-U", "--update-mtimes", dest="update_mtimes",
default=False, action="store_true",
help="before building reference manual, update modification times for auto-generated ReST files")
parser.add_option_group(advanced)
return parser
def setup_logger(verbose=1, color=True):
"""
Sets up and returns a Python Logger instance for the Sage
documentation builder. The optional argument sets logger's level
and message format.
"""
# Set up colors. Adapted from sphinx.cmdline.
import sphinx.util.console as c
if not color or not sys.stdout.isatty() or not c.color_terminal():
c.nocolor()
# Available colors: black, darkgray, (dark)red, dark(green),
# brown, yellow, (dark)blue, purple, fuchsia, turquoise, teal,
# lightgray, white. Available styles: reset, bold, faint,
# standout, underline, blink.
# Set up log record formats.
format_std = "%(message)s"
formatter = logging.Formatter(format_std)
# format_debug = "%(module)s #%(lineno)s %(funcName)s() %(message)s"
fields = ['%(module)s', '
colors = ['darkblue', 'darkred', 'brown', 'reset']
styles = ['reset', 'reset', 'reset', 'reset']
format_debug = ""
for i in xrange(len(fields)):
format_debug += c.colorize(styles[i], c.colorize(colors[i], fields[i]))
if i != len(fields):
format_debug += " "
# Documentation: http://docs.python.org/library/logging.html
logger = logging.getLogger('doc.common.builder')
# Note: There's also Handler.setLevel(). The argument is the
# levels. See the documentation for details.
if verbose == 0:
logger.setLevel(logging.ERROR)
if verbose == 1:
logger.setLevel(logging.WARNING)
if verbose == 2:
logger.setLevel(logging.INFO)
if verbose == 3:
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(format_debug)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class IntersphinxCache:
"""
Replace sphinx.ext.intersphinx.fetch_inventory by an in-memory
cached version.
"""
def __init__(self):
self.inventories = {}
self.real_fetch_inventory = sphinx.ext.intersphinx.fetch_inventory
sphinx.ext.intersphinx.fetch_inventory = self.fetch_inventory
def fetch_inventory(self, app, uri, inv):
"""
Return the result of ``sphinx.ext.intersphinx.fetch_inventory()``
from a cache if possible. Otherwise, call
``sphinx.ext.intersphinx.fetch_inventory()`` and cache the result.
"""
t = (uri, inv)
try:
return self.inventories[t]
except KeyError:
i = self.real_fetch_inventory(app, uri, inv)
self.inventories[t] = i
return i
if __name__ == '__main__':
# Parse the command-line.
parser = setup_parser()
options, args = parser.parse_args()
# Get the name and type (target format) of the document we are
# trying to build.
try:
name, type = args
except ValueError:
help_message_short(parser=parser, error=True)
sys.exit(1)
# Set up module-wide logging.
logger = setup_logger(options.verbose, options.color)
# Process selected options.
#
# MathJax: this check usually has no practical effect, since
# SAGE_DOC_MATHJAX is set to "True" by the script sage-env.
# To disable MathJax, set SAGE_DOC_MATHJAX to "no" or "False".
if options.mathjax or (os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'no'
and os.environ.get('SAGE_DOC_MATHJAX', 'no') != 'False'):
os.environ['SAGE_DOC_MATHJAX'] = 'True'
if options.check_nested:
os.environ['SAGE_CHECK_NESTED'] = 'True'
if options.underscore:
os.environ['SAGE_DOC_UNDERSCORE'] = "True"
if options.sphinx_opts:
ALLSPHINXOPTS += options.sphinx_opts.replace(',', ' ') + " "
if options.no_pdf_links:
WEBSITESPHINXOPTS = " -A hide_pdf_links=1 "
if options.warn_links:
ALLSPHINXOPTS += "-n "
# Make sure common/static exists.
mkdir(os.path.join(SAGE_DOC, 'common', 'static'))
import sage.all
# Minimize GAP/libGAP RAM usage when we build the docs
set_gap_memory_pool_size(1) # 1 MB
# Set up Intersphinx cache
C = IntersphinxCache()
# Get the builder and build.
getattr(get_builder(name), type)()
| false | true |
f7218cb7a745b7fd90503d36440f0281125e16d4 | 3,402 | py | Python | cogs/error.py | Py-Verse/PyBot | dfbb029925f4d207eaabbb4d02884c27fb3c4164 | [
"MIT"
] | 8 | 2021-03-07T08:52:31.000Z | 2021-04-24T21:44:36.000Z | cogs/error.py | Developing-Studio/ci-PyBot | 4eb5aa44c0e469e2ec4f4fb51094229c3bee9441 | [
"MIT"
] | 1 | 2021-03-07T10:21:08.000Z | 2021-03-07T10:32:08.000Z | cogs/error.py | Developing-Studio/ci-PyBot | 4eb5aa44c0e469e2ec4f4fb51094229c3bee9441 | [
"MIT"
] | 4 | 2021-03-07T10:30:51.000Z | 2021-03-11T14:30:14.000Z | import math
import os
import sys
import traceback
import discord
from discord.ext import commands
class Errors(commands.Cog):
"""
Error handler
"""
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Error cog loaded successfully")
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, "on_error"):
return
# get the original exception
error = getattr(error, "original", error)
if isinstance(error, commands.BotMissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Missing Permissions",
description=f"I am missing **{fmt}** permissions to run this command :(",
color=0xFF0000,
)
return
if isinstance(error, commands.DisabledCommand):
await ctx.send("This command has been disabled.")
return
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title="Cooldown",
description=f"This command is on cooldown, please retry in {math.ceil(error.retry_after)}s.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Insufficient Permission(s)",
description=f"You need the **{fmt}** permission(s) to use this command.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.UserInputError):
embed = discord.Embed(
title="Error",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send("This command cannot be used in direct messages.")
except discord.Forbidden:
raise error
return
if isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title="Permissions Not Satisfied",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.CommandNotFound):
return
print("Ignoring exception in command {}:".format(ctx.command), file=sys.stderr)
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
def setup(bot):
bot.add_cog(Errors(bot))
| 30.648649 | 109 | 0.531452 | import math
import os
import sys
import traceback
import discord
from discord.ext import commands
class Errors(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Error cog loaded successfully")
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
if hasattr(ctx.command, "on_error"):
return
error = getattr(error, "original", error)
if isinstance(error, commands.BotMissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Missing Permissions",
description=f"I am missing **{fmt}** permissions to run this command :(",
color=0xFF0000,
)
return
if isinstance(error, commands.DisabledCommand):
await ctx.send("This command has been disabled.")
return
if isinstance(error, commands.CommandOnCooldown):
embed = discord.Embed(
title="Cooldown",
description=f"This command is on cooldown, please retry in {math.ceil(error.retry_after)}s.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.MissingPermissions):
missing = [
perm.replace("_", " ").replace("guild", "server").title()
for perm in error.missing_perms
]
if len(missing) > 2:
fmt = "{}, and {}".format("**, **".join(missing[:-1]), missing[-1])
else:
fmt = " and ".join(missing)
embed = discord.Embed(
title="Insufficient Permission(s)",
description=f"You need the **{fmt}** permission(s) to use this command.",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.UserInputError):
embed = discord.Embed(
title="Error",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.NoPrivateMessage):
try:
await ctx.author.send("This command cannot be used in direct messages.")
except discord.Forbidden:
raise error
return
if isinstance(error, commands.CheckFailure):
embed = discord.Embed(
title="Permissions Not Satisfied",
color=0xFF0000,
)
await ctx.send(embed=embed)
return
if isinstance(error, commands.CommandNotFound):
return
print("Ignoring exception in command {}:".format(ctx.command), file=sys.stderr)
traceback.print_exception(
type(error), error, error.__traceback__, file=sys.stderr
)
def setup(bot):
bot.add_cog(Errors(bot))
| true | true |
f7218d6bb7dd8dbb82f3a28fabfbe622d4a4680d | 220 | py | Python | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | userlogin_test.py | kilonzijnr/passstore | e1f73d2599bbbd209e0242416c706c4ce259d3a5 | [
"MIT"
] | null | null | null | import unittest
from userlogin import User
class TestUser(unittest.TestCase):
"""
Test class to define test cases for the User class
Args:
unittest.TestCase: TestCase class creates test cases
""" | 24.444444 | 60 | 0.709091 | import unittest
from userlogin import User
class TestUser(unittest.TestCase): | true | true |
f7218df71a44862b66afa5b5c925534e4b131f25 | 3,289 | py | Python | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | 2 | 2020-03-18T17:13:00.000Z | 2020-03-25T02:34:03.000Z | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | null | null | null | computer_vision/learning-opencv-practical/image-process-100ask/Question_31_40/answers/answer_40.py | magic428/subjects_notes | 6930adbb3f445c11ca9d024abb12a53d6aca19e7 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read image
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape
# RGB > YCbCr
Y = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]
Cb = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.
Cr = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.
YCC = np.zeros_like(img, dtype=np.float32)
YCC[..., 0] = Y
YCC[..., 1] = Cb
YCC[..., 2] = Cr
# DCT
T = 8
K = 8
X = np.zeros((H, W, C), dtype=np.float64)
Q1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
Q2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),
(18, 21, 26, 66, 99, 99, 99, 99),
(24, 26, 56, 99, 99, 99, 99, 99),
(47, 66, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)
def w(x, y, u, v):
cu = 1.
cv = 1.
if u == 0:
cu /= np.sqrt(2)
if v == 0:
cv /= np.sqrt(2)
theta = np.pi / (2 * T)
return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))
for yi in range(0, H, T):
for xi in range(0, W, T):
for v in range(T):
for u in range(T):
for y in range(T):
for x in range(T):
for c in range(C):
X[v+yi, u+xi, c] += YCC[y+yi, x+xi, c] * w(x,y,u,v)
X[yi:yi+T, xi:xi+T, 0] = np.round(X[yi:yi+T, xi:xi+T, 0] / Q1) * Q1
X[yi:yi+T, xi:xi+T, 1] = np.round(X[yi:yi+T, xi:xi+T, 1] / Q2) * Q2
X[yi:yi+T, xi:xi+T, 2] = np.round(X[yi:yi+T, xi:xi+T, 2] / Q2) * Q2
# IDCT
IYCC = np.zeros((H, W, 3), dtype=np.float64)
for yi in range(0, H, T):
for xi in range(0, W, T):
for y in range(T):
for x in range(T):
for v in range(K):
for u in range(K):
IYCC[y+yi, x+xi] += X[v+yi, u+xi] * w(x,y,u,v)
# YCbCr > RGB
out = np.zeros_like(img, dtype=np.float32)
out[..., 2] = IYCC[..., 0] + (IYCC[..., 2] - 128.) * 1.4020
out[..., 1] = IYCC[..., 0] - (IYCC[..., 1] - 128.) * 0.3441 - (IYCC[..., 2] - 128.) * 0.7139
out[..., 0] = IYCC[..., 0] + (IYCC[..., 1] - 128.) * 1.7718
out[out>255] = 255
out = out.astype(np.uint8)
# MSE
v_max = 255.
mse = np.sum(np.power(np.abs(img.astype(np.float32) - out.astype(np.float32)), 2)) / (H * W * C)
psnr = 10 * np.log10(v_max ** 2 / mse)
print("PSNR >>", psnr)
bitrate = 1. * T * K ** 2 / (T ** 2)
print("bitrate >>", bitrate)
# Save result
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
| 32.245098 | 97 | 0.419884 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("imori.jpg").astype(np.float32)
H, W, C = img.shape
Y = 0.2990 * img[..., 2] + 0.5870 * img[..., 1] + 0.1140 * img[..., 0]
Cb = -0.1687 * img[..., 2] - 0.3313 * img[..., 1] + 0.5 * img[..., 0] + 128.
Cr = 0.5 * img[..., 2] - 0.4187 * img[..., 1] - 0.0813 * img[..., 0] + 128.
YCC = np.zeros_like(img, dtype=np.float32)
YCC[..., 0] = Y
YCC[..., 1] = Cb
YCC[..., 2] = Cr
T = 8
K = 8
X = np.zeros((H, W, C), dtype=np.float64)
Q1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),
(12, 12, 14, 19, 26, 58, 60, 55),
(14, 13, 16, 24, 40, 57, 69, 56),
(14, 17, 22, 29, 51, 87, 80, 62),
(18, 22, 37, 56, 68, 109, 103, 77),
(24, 35, 55, 64, 81, 104, 113, 92),
(49, 64, 78, 87, 103, 121, 120, 101),
(72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)
Q2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),
(18, 21, 26, 66, 99, 99, 99, 99),
(24, 26, 56, 99, 99, 99, 99, 99),
(47, 66, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99),
(99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)
def w(x, y, u, v):
cu = 1.
cv = 1.
if u == 0:
cu /= np.sqrt(2)
if v == 0:
cv /= np.sqrt(2)
theta = np.pi / (2 * T)
return (( 2 * cu * cv / T) * np.cos((2*x+1)*u*theta) * np.cos((2*y+1)*v*theta))
for yi in range(0, H, T):
for xi in range(0, W, T):
for v in range(T):
for u in range(T):
for y in range(T):
for x in range(T):
for c in range(C):
X[v+yi, u+xi, c] += YCC[y+yi, x+xi, c] * w(x,y,u,v)
X[yi:yi+T, xi:xi+T, 0] = np.round(X[yi:yi+T, xi:xi+T, 0] / Q1) * Q1
X[yi:yi+T, xi:xi+T, 1] = np.round(X[yi:yi+T, xi:xi+T, 1] / Q2) * Q2
X[yi:yi+T, xi:xi+T, 2] = np.round(X[yi:yi+T, xi:xi+T, 2] / Q2) * Q2
IYCC = np.zeros((H, W, 3), dtype=np.float64)
for yi in range(0, H, T):
for xi in range(0, W, T):
for y in range(T):
for x in range(T):
for v in range(K):
for u in range(K):
IYCC[y+yi, x+xi] += X[v+yi, u+xi] * w(x,y,u,v)
out = np.zeros_like(img, dtype=np.float32)
out[..., 2] = IYCC[..., 0] + (IYCC[..., 2] - 128.) * 1.4020
out[..., 1] = IYCC[..., 0] - (IYCC[..., 1] - 128.) * 0.3441 - (IYCC[..., 2] - 128.) * 0.7139
out[..., 0] = IYCC[..., 0] + (IYCC[..., 1] - 128.) * 1.7718
out[out>255] = 255
out = out.astype(np.uint8)
v_max = 255.
mse = np.sum(np.power(np.abs(img.astype(np.float32) - out.astype(np.float32)), 2)) / (H * W * C)
psnr = 10 * np.log10(v_max ** 2 / mse)
print("PSNR >>", psnr)
bitrate = 1. * T * K ** 2 / (T ** 2)
print("bitrate >>", bitrate)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("out.jpg", out)
| true | true |
f72190bc142f0445507b2063ace8933a5d98baaf | 2,238 | py | Python | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | examples/visexp.py | BatsiBoy/PyFrac | a898f6111295fa9196c382613639fc84e73d6035 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Name: Fractal Example - Exponential Curves
#Author: Sean Pope
#Example use of the fractal engine and coefficient block.
#Creates random coefficient blocks and draws frames to create a simple animation.
#This one is optimized for the exponential variation.
import matplotlib.pyplot as plt
import PyFrac as pf
plt.style.use('dark_background') #Mostly just used for the black background.
ax = plt.subplot(111,frameon=False) #Create a figure and axes for drawing.
ax.axes.get_xaxis().set_visible(False) #Hide axis
ax.axes.get_yaxis().set_visible(False)
plt.xlim(-1,1) #This function looks best in the biunit square.
plt.ylim(-1,1)
def quitloop(*args): #Closes the event loop when no longer needed.
global run
run = 0
return
fig = plt.gcf() #Get the figure that pyplot spawned.
fig.canvas.mpl_connect('close_event', quitloop) #If the window is closed, exit loop.
fig.canvas.mpl_connect('key_press_event', quitloop) #If a button is pressed, close.
mng = plt.get_current_fig_manager() #Grab the figure window
mng.full_screen_toggle() #Maximize the image to fill the screen.
""" Runtime variables """
run = 1 #Set to continue drawing frames, unset to terminate
framecount = 0 #Used to set frames drawn per coefficient block
frameclear = 0 #Starts deleting frames when set
coeffs = pf.coeffs.rand(0.9,0.2)
""" Main event loop. """
while(run):
framecount += 1
if framecount == 40: #Draws a new coefficient set if the current image is done.
frameclear = 1
coeffs = pf.coeffs.rand(0.9,0.2)
framecount -= 40 #Reset frame counter.
fractal = pf.engine.fractpoints(coeffs, 200, pf.variations.exponential) #Run the engine to get a figure.
plt.scatter(fractal['x'], fractal['y'], #Get the x,y coordinates for each point
marker='.', alpha=0.8, #Use small pixel markers with low opacity
c=fractal['color'], cmap='plasma', #Map the color row to this colormap.
s=25, edgecolor='none'
)
if frameclear:
del ax.collections[0] #Remove the oldest frame.
plt.pause(.01) #This pause draws the frame before looping.
plt.close(fig) | 34.430769 | 109 | 0.683199 |
import matplotlib.pyplot as plt
import PyFrac as pf
plt.style.use('dark_background')
ax = plt.subplot(111,frameon=False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.xlim(-1,1)
plt.ylim(-1,1)
def quitloop(*args):
global run
run = 0
return
fig = plt.gcf()
fig.canvas.mpl_connect('close_event', quitloop)
fig.canvas.mpl_connect('key_press_event', quitloop)
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
run = 1
framecount = 0
frameclear = 0
coeffs = pf.coeffs.rand(0.9,0.2)
while(run):
framecount += 1
if framecount == 40:
frameclear = 1
coeffs = pf.coeffs.rand(0.9,0.2)
framecount -= 40
fractal = pf.engine.fractpoints(coeffs, 200, pf.variations.exponential)
plt.scatter(fractal['x'], fractal['y'],
marker='.', alpha=0.8,
c=fractal['color'], cmap='plasma',
s=25, edgecolor='none'
)
if frameclear:
del ax.collections[0]
plt.pause(.01)
plt.close(fig) | true | true |
f721923f0db0c229c58f961a74feaeb820d768fc | 306 | py | Python | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 6 | 2020-03-24T15:58:42.000Z | 2020-04-18T13:32:41.000Z | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 1 | 2022-01-13T03:51:17.000Z | 2022-01-13T03:51:17.000Z | src/basic/011_thirds/use_requests.py | hbulpf/pydemo | 2989cc50781230718e46dcac5dc0ca70630ebffe | [
"Apache-2.0"
] | 1 | 2020-02-01T09:36:05.000Z | 2020-02-01T09:36:05.000Z | import requests
r = requests.get('https://www.baidu.com/')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
r = requests.get('https://www.baidu.com/', params={'wd': 'python'})
print(f'url:{r.url}')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
print(f'encoding:{r.encoding}') | 27.818182 | 67 | 0.679739 | import requests
r = requests.get('https://www.baidu.com/')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
r = requests.get('https://www.baidu.com/', params={'wd': 'python'})
print(f'url:{r.url}')
print(f'status_code:{r.status_code}')
print(f'text:{r.text}')
print(f'encoding:{r.encoding}') | true | true |
f721925123231063587335f88669b985aa41c584 | 489 | py | Python | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | 3 | 2017-01-26T11:29:18.000Z | 2018-02-02T14:54:03.000Z | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | null | null | null | examples/loadsheet.py | Daviid1010/ethercalc-python | af79cb5c69e2caa0b7f1d88b14be5ca60e7d6a0b | [
"BSD-2-Clause"
] | 6 | 2016-05-11T15:42:59.000Z | 2022-02-25T19:50:34.000Z | #!/usr/bin/env python3
import ethercalc
import argparse
import pprint
import sys
parser = argparse.ArgumentParser(description="Dump ethercalc sheet")
parser.add_argument("sheet", metavar='sheet', help="sheet name")
parser.add_argument("-f", "--format", dest="format",
help="format", default="socialcalc")
args = parser.parse_args()
data = sys.stdin.buffer.read()
e = ethercalc.EtherCalc("http://localhost:8000")
a = e.update(data, format=args.format, id=args.sheet)
| 28.764706 | 68 | 0.715746 |
import ethercalc
import argparse
import pprint
import sys
parser = argparse.ArgumentParser(description="Dump ethercalc sheet")
parser.add_argument("sheet", metavar='sheet', help="sheet name")
parser.add_argument("-f", "--format", dest="format",
help="format", default="socialcalc")
args = parser.parse_args()
data = sys.stdin.buffer.read()
e = ethercalc.EtherCalc("http://localhost:8000")
a = e.update(data, format=args.format, id=args.sheet)
| true | true |
f72194e07175df8c6208e51d9aafe054145aca68 | 200 | py | Python | drone_squadron/api/thruster_api.py | OrderAndCh4oS/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | 1 | 2020-05-20T09:44:37.000Z | 2020-05-20T09:44:37.000Z | drone_squadron/api/thruster_api.py | sarcoma/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | 1 | 2021-06-01T22:30:10.000Z | 2021-06-01T22:30:10.000Z | drone_squadron/api/thruster_api.py | OrderAndCh4oS/drone_squadron_api_prototype | 4d7c22cebb03576986d443634b17910cb460a60f | [
"MIT"
] | null | null | null | from drone_squadron.api.base_api import BaseApi
from drone_squadron.crud.thruster_crud import ThrusterCrud
class ThrusterApi(BaseApi):
def __init__(self):
super().__init__(ThrusterCrud)
| 25 | 58 | 0.79 | from drone_squadron.api.base_api import BaseApi
from drone_squadron.crud.thruster_crud import ThrusterCrud
class ThrusterApi(BaseApi):
def __init__(self):
super().__init__(ThrusterCrud)
| true | true |
f7219557f313231bf047af09d8d81a13981c3f2b | 368 | py | Python | durgo_sdk/integrations/django/middleware.py | safwanrahman/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T21:56:45.000Z | 2020-08-12T21:56:45.000Z | durgo_sdk/integrations/django/middleware.py | Alig1493/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | null | null | null | durgo_sdk/integrations/django/middleware.py | Alig1493/durgo-python | 79b740e0500e1ba2bce7edcb47996587a9449964 | [
"BSD-3-Clause"
] | 1 | 2020-03-21T18:30:28.000Z | 2020-03-21T18:30:28.000Z | from django.utils import timezone
class DurgoMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
start_time = timezone.now()
response = self.get_response(request)
end_time = timezone.now()
return response
| 21.647059 | 52 | 0.673913 | from django.utils import timezone
class DurgoMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
start_time = timezone.now()
response = self.get_response(request)
end_time = timezone.now()
return response
| true | true |
f72196382b201f0b3ce9c05e95a1507ab101ac39 | 367 | py | Python | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | 4 | 2019-02-18T08:58:19.000Z | 2021-11-05T01:20:32.000Z | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | null | null | null | test/testDepthfilling.py | zer01ike/HoleFilling | b1591485f37975c0793839880dbb6185a132d3f9 | [
"Apache-2.0"
] | 6 | 2018-05-21T10:08:20.000Z | 2021-11-05T01:20:35.000Z | from DepthFilling import DepthFilling
import cv2
DepthedImg = cv2.imread('../DataSet/Sequence/Warped/depth_0_w.bmp', 0)
DF = DepthFilling.DepthFilling(DepthedImg,63,63)
#depth_filled = DF.testKmeans(DepthedImg)
depth_filled = DF.depthfill()
cv2.imshow('depth', depth_filled)
cv2.imwrite('depthfill_book_0.bmp',depth_filled)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 28.230769 | 70 | 0.792916 | from DepthFilling import DepthFilling
import cv2
DepthedImg = cv2.imread('../DataSet/Sequence/Warped/depth_0_w.bmp', 0)
DF = DepthFilling.DepthFilling(DepthedImg,63,63)
depth_filled = DF.depthfill()
cv2.imshow('depth', depth_filled)
cv2.imwrite('depthfill_book_0.bmp',depth_filled)
cv2.waitKey(0)
cv2.destroyAllWindows()
| true | true |
f7219684ce3f2077f43b7fa0f52973b32fe1628b | 1,607 | py | Python | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/recorder/test_util.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Test util methods."""
from unittest.mock import MagicMock, patch
import pytest
from openpeerpower.components.recorder import util
from openpeerpower.components.recorder.const import DATA_INSTANCE
from tests.common import get_test_open_peer_power, init_recorder_component
@pytest.fixture
def opp_recorder():
"""Open Peer Power fixture with in-memory recorder."""
opp = get_test_open_peer_power()
def setup_recorder(config=None):
"""Set up with params."""
init_recorder_component(opp, config)
opp.start()
opp.block_till_done()
opp.data[DATA_INSTANCE].block_till_done()
return opp
yield setup_recorder
opp.stop()
def test_recorder_bad_commit(opp_recorder):
"""Bad _commit should retry 3 times."""
opp = opp_recorder()
def work(session):
"""Bad work."""
session.execute("select * from notthere")
with patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock, util.session_scope(opp=opp) as session:
res = util.commit(session, work)
assert res is False
assert e_mock.call_count == 3
def test_recorder_bad_execute(opp_recorder):
"""Bad execute, retry 3 times."""
from sqlalchemy.exc import SQLAlchemyError
opp_recorder()
def to_native():
"""Rasie exception."""
raise SQLAlchemyError()
mck1 = MagicMock()
mck1.to_native = to_native
with pytest.raises(SQLAlchemyError), patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock:
util.execute((mck1,))
assert e_mock.call_count == 2
| 25.109375 | 74 | 0.684505 | from unittest.mock import MagicMock, patch
import pytest
from openpeerpower.components.recorder import util
from openpeerpower.components.recorder.const import DATA_INSTANCE
from tests.common import get_test_open_peer_power, init_recorder_component
@pytest.fixture
def opp_recorder():
opp = get_test_open_peer_power()
def setup_recorder(config=None):
init_recorder_component(opp, config)
opp.start()
opp.block_till_done()
opp.data[DATA_INSTANCE].block_till_done()
return opp
yield setup_recorder
opp.stop()
def test_recorder_bad_commit(opp_recorder):
opp = opp_recorder()
def work(session):
session.execute("select * from notthere")
with patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock, util.session_scope(opp=opp) as session:
res = util.commit(session, work)
assert res is False
assert e_mock.call_count == 3
def test_recorder_bad_execute(opp_recorder):
from sqlalchemy.exc import SQLAlchemyError
opp_recorder()
def to_native():
raise SQLAlchemyError()
mck1 = MagicMock()
mck1.to_native = to_native
with pytest.raises(SQLAlchemyError), patch(
"openpeerpower.components.recorder.time.sleep"
) as e_mock:
util.execute((mck1,))
assert e_mock.call_count == 2
| true | true |
f72196e96928e506436940a1aaab2796da44a560 | 31,440 | py | Python | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | 02 Main/mainRUN.py | dengniewei/Face-Recognition-Class-Attendance-System | 58aa85ff3b378991da3ccebd69e6ace5ec2af93f | [
"MIT"
] | null | null | null | # 导入必要的模块
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QInputDialog
from PyQt5.QtGui import QImage, QIcon, QPixmap
from PyQt5.QtCore import QTimer, QDateTime, QCoreApplication, QThread
import sys, os
import cv2, imutils
# 导入UI主界面
import main
# 导入信息采集框界面
import infoUI
# 导入打印中文脚本
import ChinesePutText
# 导入人脸识别检测包
from imutils.video import VideoStream
import numpy as np
import pickle
# 导入眨眼检测必要的包
from scipy.spatial import distance as dist
from imutils import face_utils
from datetime import datetime
import dlib
# 导入数据库操作包
import pymysql
# 定义活体检测-眨眼检测类
class BlinksDetectThread(QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(BlinksDetectThread, self).__init__()
# 定义两个常数,一个用于眼睛纵横比以指示眨眼,第二个作为眨眼连续帧数的阈值
self.EYE_AR_THRESH = 0.25
self.EYE_AR_CONSEC_FRAMES = 3
# 初始化帧计数器和总闪烁次数
self.COUNTER = 0
self.TOTAL = 0
# 初始化变量
self.A = 0
self.B = 0
self.C = 0
self.leftEye = 0
self.rightEye = 0
self.leftEAR = 0
self.rightEAR = 0
self.ear = 0
# 线程启动停止标识符
self.BlinksFlag = 1
# 初始化摄像头
self.cap3 = cv2.VideoCapture()
# 定义眨眼检测距离函数
def eye_aspect_ratio(self, eye):
# 计算两组垂直方向上的眼睛标记(x,y)坐标之间的欧氏距离
self.A = dist.euclidean(eye[1], eye[5])
self.B = dist.euclidean(eye[2], eye[4])
# 计算水平方向上的眼睛标记(x,y)坐标之间的欧氏距离
self.C = dist.euclidean(eye[0], eye[3])
# 计算眼睛的纵横比
ear = (self.A + self.B) / (2.0 * self.C)
# 返回眼睛的纵横比
return ear
def run(self):
if self.BlinksFlag == 1:
# 初始化dlib的人脸检测器(基于HOG),然后创建面部标志预测器
print("[INFO] loading facial landmark predictor...")
shape_predictor_path = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_path)
# 分别提取左眼和右眼的面部标志的索引
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
# 在视频流的帧中循环
self.cap3.open(cv2.CAP_DSHOW)
while self.BlinksFlag == 1:
# 从线程视频文件流中抓取帧,调整其大小,并将其转换为灰度通道
vs = VideoStream(src=cv2.CAP_DSHOW).start()
frame3 = vs.read()
# ret, frame3 = self.cap3.read()
QApplication.processEvents()
frame3 = imutils.resize(frame3, width=900)
gray = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)
# 检测灰度帧中的人脸
rects = detector(gray, 0)
# 循环检测人脸
for rect in rects:
# 确定面部区域的面部标记,然后将面部标记(x,y)坐标转换为NumPy阵列
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
# 提取左眼和右眼坐标,然后使用坐标计算双眼的眼睛纵横比
self.leftEye = shape[lStart:lEnd]
self.rightEye = shape[rStart:rEnd]
self.leftEAR = self.eye_aspect_ratio(self.leftEye)
self.rightEAR = self.eye_aspect_ratio(self.rightEye)
# 两只眼睛的平均眼睛纵横比
self.ear = (self.leftEAR + self.rightEAR) / 2.0
# 检查眼睛纵横比是否低于闪烁阈值,如果是,则增加闪烁帧计数器;否则执行else
if self.ear < self.EYE_AR_THRESH:
self.COUNTER += 1
else:
# 如果眼睛闭合次数足够则增加眨眼总数
if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:
self.TOTAL += 1
# 重置眼框计数器
self.COUNTER = 0
self.trigger.emit()
if self.TOTAL == 1:
print("活体!眨眼次数为: {}".format(self.TOTAL))
# 定义停止线程操作
def terminate(self):
self.BlinksFlag = 0
if flag2 == 0:
VideoStream(src=cv2.CAP_DSHOW).stop()
#########################################################################################
class MainWindow(QWidget):
# 类构造函数
def __init__(self):
# super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。
super().__init__()
self.ui = main.Ui_Form()
self.ui.setupUi(self)
# 设置窗口名称和图标
self.setWindowTitle('人脸识别考勤系统')
self.setWindowIcon(QIcon('fcblogo.jpg'))
# label_time显示系统时间
timer = QTimer(self)
timer.timeout.connect(self.showTimeText)
timer.start()
# 初始化摄像头
# self.url = 0 # 这样调用摄像头会报错,并且会卡死。
self.url = cv2.CAP_DSHOW # 默认调用0,如果要调用摄像头1,可以这样写:cv2.CAP_DSHOW + 1
self.cap = cv2.VideoCapture()
# 设置单张图片背景
pixmap = QPixmap('background1.png')
self.ui.label_camera.setPixmap(pixmap)
# 设置摄像头按键连接函数
self.ui.bt_openCamera.clicked.connect(self.openCamera)
# 设置开始考勤按键的回调函数
self.ui.bt_startCheck.clicked.connect(self.autoControl)
# 设置活体检测按键的回调函数
self.ui.bt_blinks.clicked.connect(self.BlinksThread)
# 设置“退出系统”按键事件, 按下之后退出主界面
self.ui.bt_exit.clicked.connect(QCoreApplication.instance().quit)
# 设置信息采集按键连接
self.bt_gathering = self.ui.bt_gathering
# 设置区分打开摄像头还是人脸识别的标识符
self.switch_bt = 0
global flag2
flag2 = 0
# 初始化需要记录的人名
self.record_name1 = ([])
# 设置更新人脸数据库的按键连接函数
self.ui.bt_generator.clicked.connect(self.trainModel)
# 设置查询班级人数按键的连接函数
self.ui.bt_check.clicked.connect(self.checkNums)
# 设置请假按键的连接函数
self.ui.bt_leave.clicked.connect(self.leaveButton)
# 设置漏签补签按键的连接函数
self.ui.bt_Supplement.clicked.connect(self.supplymentButton)
# 设置对输入内容的删除提示
self.ui.lineEdit.setClearButtonEnabled(True)
self.ui.lineEdit_2.setClearButtonEnabled(True)
# 设置查看结果(显示未到和迟到)按键的连接函数
self.ui.bt_view.clicked.connect(self.showLateAbsentee)
self.checkTime, ok = QInputDialog.getText(self, '考勤时间设定', '请输入考勤时间(格式为00:00:00):')
# 显示系统时间以及相关文字提示函数
def showTimeText(self):
# 设置宽度
self.ui.label_time.setFixedWidth(200)
# 设置显示文本格式
self.ui.label_time.setStyleSheet(
# "QLabel{background:white;}" 此处设置背景色
# "QLabel{color:rgb(300,300,300,120); font-size:14px; font-weight:bold; font-family:宋体;}"
"QLabel{font-size:14px; font-weight:bold; font-family:宋体;}"
)
datetime = QDateTime.currentDateTime().toString()
self.ui.label_time.setText("" + datetime)
# 显示“人脸识别考勤系统”文字
self.ui.label_title.setFixedWidth(400)
self.ui.label_title.setStyleSheet(
"QLabel{font-size:30px; font-weight:bold; font-family:宋体;}")
self.ui.label_title.setText("人脸识别考勤系统")
def openCamera(self, url):
# 判断摄像头是否打开,如果打开则为true,反之为false
flag = self.cap.isOpened()
if flag == False:
self.ui.label_logo.clear()
self.cap.open(self.url)
self.showCamera()
elif flag == True:
self.cap.release()
self.ui.label_logo.clear()
self.ui.label_camera.clear()
self.ui.bt_openCamera.setText(u'打开相机')
# 进入考勤模式,通过switch_bt进行控制的函数
def autoControl(self):
if self.switch_bt == 0:
self.switch_bt = 1
flag2 = 1
self.ui.bt_startCheck.setText(u'退出考勤')
self.showCamera()
elif self.switch_bt == 1:
self.switch_bt = 0
flag2 = 0
self.ui.bt_startCheck.setText(u'开始考勤')
self.showCamera()
def BlinksThread(self):
bt_text = self.ui.bt_blinks.text()
if bt_text == '活体检测':
# 初始化眨眼检测线程
self.startThread = BlinksDetectThread()
self.startThread.start() # 启动线程
self.ui.bt_blinks.setText('停止检测')
else:
self.ui.bt_blinks.setText('活体检测')
# self.startThread.terminate() # 停止线程
def showCamera(self):
# 如果按键按下
if self.switch_bt == 0:
self.ui.label_logo.clear()
self.ui.bt_openCamera.setText(u'关闭相机')
while (self.cap.isOpened()):
# 以BGR格式读取图像
ret, self.image = self.cap.read(cv2.CAP_DSHOW)
QApplication.processEvents() # 这句代码告诉QT处理来处理任何没有被处理的事件,并且将控制权返回给调用者,让代码变的没有那么卡
# 将图像转换为RGB格式
show = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width,
self.showImage = QImage(show.data, show.shape[1], show.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
# 因为最后会存留一张图像在lable上,需要对lable进行清理
self.ui.label_camera.clear()
self.ui.bt_openCamera.setText(u'打开相机')
elif self.switch_bt == 1:
self.ui.label_logo.clear()
self.ui.bt_startCheck.setText(u'退出考勤')
# OpenCV深度学习人脸检测器的路径
detector = "face_detection_model"
# OpenCV深度学习面部嵌入模型的路径
embedding_model = "face_detection_model/openface_nn4.small2.v1.t7"
# 训练模型以识别面部的路径
recognizer_path = "output/recognizer.pickle"
# 标签编码器的路径
le_path = "output/le.pickle"
# 置信度
confidence_default = 0.5
# 从磁盘加载序列化面部检测器
protoPath = os.path.sep.join([detector, "deploy.prototxt"])
modelPath = os.path.sep.join([detector, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
# 从磁盘加载我们的序列化面嵌入模型
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
# 加载实际的人脸识别模型和标签
recognizer = pickle.loads(open(recognizer_path, "rb").read())
le = pickle.loads(open(le_path, "rb").read())
# 循环来自视频文件流的帧
while (self.cap.isOpened()):
# 从线程视频流中抓取帧
ret, frame = self.cap.read()
QApplication.processEvents()
# 调整框架的大小以使其宽度为900像素(同时保持纵横比),然后抓取图像尺寸
frame = imutils.resize(frame, width=900)
(h, w) = frame.shape[:2]
# 从图像构造一个blob
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
# 应用OpenCV的基于深度学习的人脸检测器来定位输入图像中的人脸
detector.setInput(imageBlob)
detections = detector.forward()
# 保存识别到的人脸
face_names = []
# 循环检测
for i in np.arange(0, detections.shape[2]):
# 提取与预测相关的置信度(即概率)
confidence = detections[0, 0, i, 2]
# 用于更新相机开关按键信息
flag = self.cap.isOpened()
if flag == False:
self.ui.bt_openCamera.setText(u'打开相机')
elif flag == True:
self.ui.bt_openCamera.setText(u'关闭相机')
# 过滤弱检测
if confidence > confidence_default:
# 计算面部边界框的(x,y)坐标
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# 提取面部ROI
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
# 确保面部宽度和高度足够大
if fW < 20 or fH < 20:
continue
# 为面部ROI构造一个blob,然后通过我们的面部嵌入模型传递blob以获得面部的128-d量化
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
# 执行分类识别面部
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
# 绘制面部的边界框以及相关的概率
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
frame = cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
face_names.append(name)
bt_liveness = self.ui.bt_blinks.text()
if bt_liveness == '停止检测':
ChineseText = ChinesePutText.put_chinese_text('microsoft.ttf')
frame = ChineseText.draw_text(frame, (330, 80), ' 请眨眨眼睛 ', 25, (55, 255, 55))
# 显示输出框架
show_video = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage。
# QImage(uchar * data, int width, int height, int bytesPerLine, Format format)
self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
self.set_name = set(face_names)
self.set_names = tuple(self.set_name)
self.recordNames()
# 因为最后一张画面会显示在GUI中,此处实现清除。
self.ui.label_camera.clear()
def recordNames(self):
if self.set_name.issubset(self.record_name1): # 如果self.set_names是self.record_names 的子集返回ture
pass # record_name1是要写进数据库中的名字信息 set_name是从摄像头中读出人脸的tuple形式
else:
self.different_name1 = self.set_name.difference(self.record_name1) # 获取到self.set_name有而self.record_name无的名字
self.record_name1 = self.set_name.union(self.record_name1) # 把self.record_name变成两个集合的并集
# different_name是为了获取到之前没有捕捉到的人脸,并再次将record_name1进行更新
# 将集合变成tuple,并统计人数
self.write_data = tuple(self.different_name1)
names_num = len(self.write_data)
# 显示签到人数
self.ui.lcd_2.display(len(self.record_name1))
if names_num > 0:
# 将签到信息写入数据库
self.lineTextInfo2 = []
# 打开数据库连接
db2 = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor2 = db2.cursor()
# 获取系统时间,保存到秒
import datetime
currentTime2 = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
results2 = self.useIDGetInfo(self.write_data[0])
# 判断是否迟到
import datetime
self.ymd = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.ymd2 = datetime.datetime.now().strftime("%H:%M:%S")
compareResult2 = self.compare_time('{}'.format(self.ymd2), '{}'.format(self.checkTime))
# 82800表示23个小时,在compare_time()函数中,如果第一个时间小于第二个时间,则为第一个时间加24h后再减去第二时间;
# 而正常的结果应该为'正常'.
if compareResult2 <= 82800:
self.description2 = '迟到'
else:
self.description2 = '正常'
self.lineTextInfo2.append((results2[0], results2[1], results2[2], currentTime2, self.description2))
print(self.lineTextInfo2)
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql2 = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users2 = self.lineTextInfo2
cursor2.executemany(insert_sql2, users2)
except Exception as e:
print(e)
print("SQL execute failed!")
else:
print("SQL execute success!")
QMessageBox.information(self, "Tips", "签到成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db2.commit()
cursor2.close()
db2.close()
# 比较时间大小,判断是否迟到
def compare_time(self, time1, time2):
import datetime
s_time = datetime.datetime.strptime(time1, '%H:%M:%S')
e_time = datetime.datetime.strptime(time2, '%H:%M:%S')
delta = s_time - e_time
return delta.seconds
# 查询班级人数
def checkNums(self):
# 选择的班级
input_Class = self.ui.comboBox.currentText()
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "select * from studentnums where class = {}".format(input_Class)
# 执行查询
if input_Class != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.nums = []
for i in results:
self.nums.append(i[1])
except:
print("Error: unable to fetch data")
# 用于查询每班的实到人数
sql2 = "select * from checkin where class = {}".format(input_Class)
# 执行查询
if input_Class != '':
try:
cursor.execute(sql2)
# 获取所有记录列表
results2 = cursor.fetchall()
self.nums2 = []
for i in results2:
self.nums2.append(i[2])
except:
print("Error: unable to fetch data")
# lcd控件显示人数
self.ui.lcd_1.display(self.nums[0])
self.ui.lcd_2.display(len(self.nums2))
# 关闭数据库连接
db.close()
# 请假/补签登记
def leaveButton(self):
self.leaveStudents(1)
def supplymentButton(self):
self.leaveStudents(2)
def leaveStudents(self, button):
self.lineTextInfo = []
# 为防止输入为空卡死,先进行是否输入数据的判断
if self.ui.lineEdit.isModified() or self.ui.lineEdit_2.isModified():
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 获取系统时间,保存到秒
currentTime = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if button == 1:
self.description = '请假'
self.lineTextID = self.ui.lineEdit.text()
results = self.useIDGetInfo(self.lineTextID)
elif button == 2:
self.description = '漏签补签'
self.lineTextID = self.ui.lineEdit_2.text()
results = self.useIDGetInfo(self.lineTextID)
self.lineTextInfo.append((results[0], results[1], results[2], currentTime, self.description))
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users = self.lineTextInfo
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "{} 登记成功,请勿重复操作!".format(self.description), QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db.commit()
cursor.close()
db.close()
else:
QMessageBox.warning(self, "warning", "学号不能为空,请输入后重试!", QMessageBox.Yes | QMessageBox.No)
# 输入框清零
self.ui.lineEdit.clear()
self.ui.lineEdit_2.clear()
# 使用ID当索引找到其它信息
def useIDGetInfo(self, ID):
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "select * from students where ID = {}".format(ID)
# 执行查询
if ID != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.checkInfo = []
for i in results:
self.checkInfo.append(i[1])
self.checkInfo.append(i[0])
self.checkInfo.append(i[2])
return self.checkInfo
except:
print("Error: unable to fetch data")
# 显示迟到和未到
def showLateAbsentee(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
# 一定要注意字符串在检索时要加''!
sql1 = "select name from checkin where Description = '{}'".format('迟到')
sql2 = "select name from students"
try:
cursor.execute(sql1)
results = cursor.fetchall()
self.lateNums = []
for x in results:
self.lateNums.append(x[0])
self.lateNums.sort()
# print(self.lateNums)
except:
print("Error: unable to fetch latedata")
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.allNums = []
for i in results2:
self.allNums.append(i[0])
self.allNums.sort()
print(self.allNums)
except:
print("Error: unable to fetch absenteedata")
db.commit()
cursor.close()
db.close()
# 集合运算,算出未到的和迟到的
self.AbsenteeNums = set(set(self.allNums) - set(self.lateNums))
self.AbsenteeNums = list(self.AbsenteeNums)
self.AbsenteeNums.sort()
# 在控件中显示未到的同学
rowLate = len(self.lateNums)
rowAbsentee = len(self.AbsenteeNums)
model1 = QtGui.QStandardItemModel(rowLate, 0)
# 设置数据行、列标题
model1.setHorizontalHeaderLabels(['姓名'])
# 设置填入数据内容
for row in range(rowLate):
item = QtGui.QStandardItem(self.lateNums[row])
# 设置每个位置的文本值
model1.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
View1 = self.ui.tableView_escape
View1.setModel(model1)
# 迟到显示
model2 = QtGui.QStandardItemModel(rowAbsentee, 0)
# 设置数据行、列标题
model2.setHorizontalHeaderLabels(['姓名'])
# 设置填入数据内容
for row in range(rowAbsentee):
item = QtGui.QStandardItem(self.AbsenteeNums[row])
# 设置每个位置的文本值
model2.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
View2 = self.ui.tableView_late
View2.setModel(model2)
# 训练人脸识别模型
def trainModel(self):
import GeneratorModel
GeneratorModel.Generator()
GeneratorModel.TrainModel()
print('Model have been trained!')
##########################################################################################
class infoDialog(QWidget):
def __init__(self):
# super()构造器方法返回父级的对象。__init__()方法是构造器的一个方法。
super().__init__()
self.Dialog = infoUI.Ui_Form()
self.Dialog.setupUi(self)
# 设置窗口名称和图标
self.setWindowTitle('个人信息采集')
self.setWindowIcon(QIcon('fcblogo.jpg'))
# 设置单张图片背景
pixmap = QPixmap('background2.png')
self.Dialog.label_capture.setPixmap(pixmap)
# 设置信息采集按键连接函数
self.Dialog.bt_collectInfo.clicked.connect(self.openCam)
# 设置拍照按键连接函数
self.Dialog.bt_takephoto.clicked.connect(self.takePhoto)
# 设置查询信息按键连接函数
self.Dialog.bt_checkInfo.clicked.connect(self.checkInfo)
# 设置写入信息按键连接函数
self.Dialog.bt_changeInfo.clicked.connect(self.changeInfo)
# 初始化信息导入列表
self.users = []
# 初始化摄像头
self.url2 = cv2.CAP_DSHOW
self.cap2 = cv2.VideoCapture()
# 初始化保存人脸数目
self.photos = 0
def handle_click(self):
if not self.isVisible():
self.show()
def handle_close(self):
self.close()
def openCam(self):
# 判断摄像头是否打开,如果打开则为true,反之为false
flagCam = self.cap2.isOpened()
if flagCam == False:
# 通过对话框设置被采集人学号
self.text, self.ok = QInputDialog.getText(self, '创建个人图像数据库', '请输入学号:')
if self.ok and self.text != '':
self.Dialog.label_capture.clear()
self.cap2.open(self.url2)
self.showCapture()
elif flagCam == True:
self.cap2.release()
self.Dialog.label_capture.clear()
self.Dialog.bt_collectInfo.setText(u'采集人像')
def showCapture(self):
self.Dialog.bt_collectInfo.setText(u'停止采集')
self.Dialog.label_capture.clear()
# 导入opencv人脸检测xml文件
cascade = 'haarcascades_cuda/haarcascade_frontalface_default.xml'
# 加载 Haar级联人脸检测库
detector = cv2.CascadeClassifier(cascade)
print("[INFO] starting video stream...")
# 循环来自视频文件流的帧
while self.cap2.isOpened():
ret, frame2 = self.cap2.read()
QApplication.processEvents()
self.orig = frame2.copy()
frame2 = imutils.resize(frame2, width=500)
rects = detector.detectMultiScale(cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY), scaleFactor=1.1,
minNeighbors=5, minSize=(30, 30))
for (x, y, w, h) in rects:
cv2.rectangle(frame2, (x, y), (x + w, y + h), (0, 255, 0), 2)
frame2 = cv2.putText(frame2, "Have token {}/20 faces".format(self.photos), (50, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
(200, 100, 50), 2)
# 显示输出框架
show_video2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB) # 这里指的是显示原图
# opencv读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage。
# QImage(uchar * data, int width, int height, int bytesPerLine, Format format)
self.showImage2 = QImage(show_video2.data, show_video2.shape[1], show_video2.shape[0], QImage.Format_RGB888)
self.Dialog.label_capture.setPixmap(QPixmap.fromImage(self.showImage2))
# 因为最后一张画面会显示在GUI中,此处实现清除。
self.Dialog.label_capture.clear()
# 创建文件夹
def mkdir(self, path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在, 存在=True; 不存在=False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
os.makedirs(path)
return True
def takePhoto(self):
self.photos += 1
self.filename = "D:\\Github\\class-attendance-system-based-on-face-recognition\\02 Main\\dataset\\{}\\".format(self.text)
self.mkdir(self.filename)
photo_save_path = os.path.join(os.path.dirname(os.path.abspath('__file__')), '{}'.format(self.filename))
self.showImage2.save(photo_save_path + datetime.now().strftime("%Y%m%d%H%M%S") + ".png")
# p = os.path.sep.join([output, "{}.png".format(str(total).zfill(5))])
# cv2.imwrite(p, self.showImage2)
if self.photos == 20:
QMessageBox.information(self, "Information", self.tr("采集成功!"), QMessageBox.Yes | QMessageBox.No)
# 数据库查询
def checkInfo(self):
# 键入ID
self.input_ID = self.Dialog.lineEdit_ID.text()
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 查询语句,实现通过ID关键字检索个人信息的功能
sql = "SELECT * FROM STUDENTS WHERE ID = {}".format(self.input_ID)
# 执行查询
if self.input_ID != '':
try:
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
self.lists = []
for i in results:
self.lists.append(i[0])
self.lists.append(i[1])
self.lists.append(i[2])
self.lists.append(i[3])
self.lists.append(i[4])
except:
print("Error: unable to fetch data")
# 设置显示数据层次结构,5行2列(包含行表头)
self.model = QtGui.QStandardItemModel(5, 0)
# 设置数据行、列标题
self.model.setHorizontalHeaderLabels(['值'])
self.model.setVerticalHeaderLabels(['学号', '姓名', '班级', '性别', '生日'])
# 设置填入数据内容
nums = len(self.lists)
if nums == 0:
QMessageBox.warning(self, "warning", "人脸数据库中无此人信息,请马上录入!", QMessageBox.Yes | QMessageBox.No)
for row in range(nums):
item = QtGui.QStandardItem(self.lists[row])
# 设置每个位置的文本值
self.model.setItem(row, 0, item)
# 指定显示的tableView控件,实例化表格视图
self.View = self.Dialog.tableView
self.View.setModel(self.model)
# 关闭数据库连接
db.close()
# 将采集信息写入数据库
def userInfo(self):
ID = self.Dialog.lineEdit_ID.text()
Name = self.Dialog.lineEdit_Name.text()
Class = self.Dialog.lineEdit_Class.text()
Sex = self.Dialog.lineEdit_Sex.text()
Birth = self.Dialog.lineEdit_Birth.text()
self.users.append((ID, Name, Class, Sex, Birth))
return self.users
def changeInfo(self):
# 打开数据库连接
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
# 使用cursor()方法获取操作游标
cursor = db.cursor()
# 写入数据库
try:
# 如果存在数据,先删除再写入。前提是设置唯一索引字段或者主键。
insert_sql = "replace into students(ID, Name, Class, Sex, Birthday) values(%s, %s, %s, %s, %s)"
users = self.userInfo()
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "录入成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
# 提交到数据库执行
db.commit()
# 关闭数据库
cursor.close()
# 关闭数据库连接
db.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
# 创建并显示窗口
mainWindow = MainWindow()
infoWindow = infoDialog()
mainWindow.ui.bt_gathering.clicked.connect(infoWindow.handle_click)
mainWindow.show()
sys.exit(app.exec_()) | 39.847909 | 132 | 0.535401 |
from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox, QInputDialog
from PyQt5.QtGui import QImage, QIcon, QPixmap
from PyQt5.QtCore import QTimer, QDateTime, QCoreApplication, QThread
import sys, os
import cv2, imutils
import main
import infoUI
import ChinesePutText
from imutils.video import VideoStream
import numpy as np
import pickle
from scipy.spatial import distance as dist
from imutils import face_utils
from datetime import datetime
import dlib
import pymysql
class BlinksDetectThread(QThread):
trigger = QtCore.pyqtSignal()
def __init__(self):
super(BlinksDetectThread, self).__init__()
self.EYE_AR_THRESH = 0.25
self.EYE_AR_CONSEC_FRAMES = 3
self.COUNTER = 0
self.TOTAL = 0
self.A = 0
self.B = 0
self.C = 0
self.leftEye = 0
self.rightEye = 0
self.leftEAR = 0
self.rightEAR = 0
self.ear = 0
self.BlinksFlag = 1
self.cap3 = cv2.VideoCapture()
def eye_aspect_ratio(self, eye):
self.A = dist.euclidean(eye[1], eye[5])
self.B = dist.euclidean(eye[2], eye[4])
self.C = dist.euclidean(eye[0], eye[3])
ear = (self.A + self.B) / (2.0 * self.C)
return ear
def run(self):
if self.BlinksFlag == 1:
print("[INFO] loading facial landmark predictor...")
shape_predictor_path = "shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(shape_predictor_path)
(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"]
(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"]
self.cap3.open(cv2.CAP_DSHOW)
while self.BlinksFlag == 1:
vs = VideoStream(src=cv2.CAP_DSHOW).start()
frame3 = vs.read()
QApplication.processEvents()
frame3 = imutils.resize(frame3, width=900)
gray = cv2.cvtColor(frame3, cv2.COLOR_BGR2GRAY)
rects = detector(gray, 0)
for rect in rects:
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
self.leftEye = shape[lStart:lEnd]
self.rightEye = shape[rStart:rEnd]
self.leftEAR = self.eye_aspect_ratio(self.leftEye)
self.rightEAR = self.eye_aspect_ratio(self.rightEye)
self.ear = (self.leftEAR + self.rightEAR) / 2.0
if self.ear < self.EYE_AR_THRESH:
self.COUNTER += 1
else:
if self.COUNTER >= self.EYE_AR_CONSEC_FRAMES:
self.TOTAL += 1
self.COUNTER = 0
self.trigger.emit()
if self.TOTAL == 1:
print("活体!眨眼次数为: {}".format(self.TOTAL))
def terminate(self):
self.BlinksFlag = 0
if flag2 == 0:
VideoStream(src=cv2.CAP_DSHOW).stop()
self.ui.bt_openCamera.setText(u'打开相机')
elif self.switch_bt == 1:
self.ui.label_logo.clear()
self.ui.bt_startCheck.setText(u'退出考勤')
detector = "face_detection_model"
embedding_model = "face_detection_model/openface_nn4.small2.v1.t7"
recognizer_path = "output/recognizer.pickle"
le_path = "output/le.pickle"
confidence_default = 0.5
protoPath = os.path.sep.join([detector, "deploy.prototxt"])
modelPath = os.path.sep.join([detector, "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(embedding_model)
recognizer = pickle.loads(open(recognizer_path, "rb").read())
le = pickle.loads(open(le_path, "rb").read())
while (self.cap.isOpened()):
ret, frame = self.cap.read()
QApplication.processEvents()
frame = imutils.resize(frame, width=900)
(h, w) = frame.shape[:2]
imageBlob = cv2.dnn.blobFromImage(
cv2.resize(frame, (300, 300)), 1.0, (300, 300),
(104.0, 177.0, 123.0), swapRB=False, crop=False)
detector.setInput(imageBlob)
detections = detector.forward()
face_names = []
for i in np.arange(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
flag = self.cap.isOpened()
if flag == False:
self.ui.bt_openCamera.setText(u'打开相机')
elif flag == True:
self.ui.bt_openCamera.setText(u'关闭相机')
if confidence > confidence_default:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
face = frame[startY:endY, startX:endX]
(fH, fW) = face.shape[:2]
if fW < 20 or fH < 20:
continue
faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)
embedder.setInput(faceBlob)
vec = embedder.forward()
preds = recognizer.predict_proba(vec)[0]
j = np.argmax(preds)
proba = preds[j]
name = le.classes_[j]
text = "{}: {:.2f}%".format(name, proba * 100)
y = startY - 10 if startY - 10 > 10 else startY + 10
cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 0, 255), 2)
frame = cv2.putText(frame, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
face_names.append(name)
bt_liveness = self.ui.bt_blinks.text()
if bt_liveness == '停止检测':
ChineseText = ChinesePutText.put_chinese_text('microsoft.ttf')
frame = ChineseText.draw_text(frame, (330, 80), ' 请眨眨眼睛 ', 25, (55, 255, 55))
show_video = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888)
self.ui.label_camera.setPixmap(QPixmap.fromImage(self.showImage))
self.set_name = set(face_names)
self.set_names = tuple(self.set_name)
self.recordNames()
self.ui.label_camera.clear()
def recordNames(self):
if self.set_name.issubset(self.record_name1):
pass
else:
self.different_name1 = self.set_name.difference(self.record_name1)
self.record_name1 = self.set_name.union(self.record_name1)
self.write_data = tuple(self.different_name1)
names_num = len(self.write_data)
self.ui.lcd_2.display(len(self.record_name1))
if names_num > 0:
self.lineTextInfo2 = []
db2 = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor2 = db2.cursor()
import datetime
currentTime2 = str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
results2 = self.useIDGetInfo(self.write_data[0])
import datetime
self.ymd = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.ymd2 = datetime.datetime.now().strftime("%H:%M:%S")
compareResult2 = self.compare_time('{}'.format(self.ymd2), '{}'.format(self.checkTime))
if compareResult2 <= 82800:
self.description2 = '迟到'
else:
self.description2 = '正常'
self.lineTextInfo2.append((results2[0], results2[1], results2[2], currentTime2, self.description2))
print(self.lineTextInfo2)
try:
insert_sql2 = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users2 = self.lineTextInfo2
cursor2.executemany(insert_sql2, users2)
except Exception as e:
print(e)
print("SQL execute failed!")
else:
print("SQL execute success!")
QMessageBox.information(self, "Tips", "签到成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
db2.commit()
cursor2.close()
db2.close()
def compare_time(self, time1, time2):
import datetime
s_time = datetime.datetime.strptime(time1, '%H:%M:%S')
e_time = datetime.datetime.strptime(time2, '%H:%M:%S')
delta = s_time - e_time
return delta.seconds
def checkNums(self):
input_Class = self.ui.comboBox.currentText()
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql = "select * from studentnums where class = {}".format(input_Class)
if input_Class != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.nums = []
for i in results:
self.nums.append(i[1])
except:
print("Error: unable to fetch data")
sql2 = "select * from checkin where class = {}".format(input_Class)
if input_Class != '':
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.nums2 = []
for i in results2:
self.nums2.append(i[2])
except:
print("Error: unable to fetch data")
self.ui.lcd_1.display(self.nums[0])
self.ui.lcd_2.display(len(self.nums2))
db.close()
def leaveButton(self):
self.leaveStudents(1)
def supplymentButton(self):
self.leaveStudents(2)
def leaveStudents(self, button):
self.lineTextInfo = []
if self.ui.lineEdit.isModified() or self.ui.lineEdit_2.isModified():
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
currentTime = str(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
if button == 1:
self.description = '请假'
self.lineTextID = self.ui.lineEdit.text()
results = self.useIDGetInfo(self.lineTextID)
elif button == 2:
self.description = '漏签补签'
self.lineTextID = self.ui.lineEdit_2.text()
results = self.useIDGetInfo(self.lineTextID)
self.lineTextInfo.append((results[0], results[1], results[2], currentTime, self.description))
try:
insert_sql = "replace into checkin(Name, ID, Class, Time, Description) values(%s, %s, %s, %s, %s)"
users = self.lineTextInfo
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "{} 登记成功,请勿重复操作!".format(self.description), QMessageBox.Yes | QMessageBox.No)
db.commit()
cursor.close()
db.close()
else:
QMessageBox.warning(self, "warning", "学号不能为空,请输入后重试!", QMessageBox.Yes | QMessageBox.No)
self.ui.lineEdit.clear()
self.ui.lineEdit_2.clear()
def useIDGetInfo(self, ID):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql = "select * from students where ID = {}".format(ID)
if ID != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.checkInfo = []
for i in results:
self.checkInfo.append(i[1])
self.checkInfo.append(i[0])
self.checkInfo.append(i[2])
return self.checkInfo
except:
print("Error: unable to fetch data")
def showLateAbsentee(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
sql1 = "select name from checkin where Description = '{}'".format('迟到')
sql2 = "select name from students"
try:
cursor.execute(sql1)
results = cursor.fetchall()
self.lateNums = []
for x in results:
self.lateNums.append(x[0])
self.lateNums.sort()
except:
print("Error: unable to fetch latedata")
try:
cursor.execute(sql2)
results2 = cursor.fetchall()
self.allNums = []
for i in results2:
self.allNums.append(i[0])
self.allNums.sort()
print(self.allNums)
except:
print("Error: unable to fetch absenteedata")
db.commit()
cursor.close()
db.close()
self.AbsenteeNums = set(set(self.allNums) - set(self.lateNums))
self.AbsenteeNums = list(self.AbsenteeNums)
self.AbsenteeNums.sort()
rowLate = len(self.lateNums)
rowAbsentee = len(self.AbsenteeNums)
model1 = QtGui.QStandardItemModel(rowLate, 0)
model1.setHorizontalHeaderLabels(['姓名'])
for row in range(rowLate):
item = QtGui.QStandardItem(self.lateNums[row])
model1.setItem(row, 0, item)
View1 = self.ui.tableView_escape
View1.setModel(model1)
model2 = QtGui.QStandardItemModel(rowAbsentee, 0)
model2.setHorizontalHeaderLabels(['姓名'])
for row in range(rowAbsentee):
item = QtGui.QStandardItem(self.AbsenteeNums[row])
model2.setItem(row, 0, item)
View2 = self.ui.tableView_late
View2.setModel(model2)
def trainModel(self):
import GeneratorModel
GeneratorModel.Generator()
GeneratorModel.TrainModel()
print('Model have been trained!')
)
if self.input_ID != '':
try:
cursor.execute(sql)
results = cursor.fetchall()
self.lists = []
for i in results:
self.lists.append(i[0])
self.lists.append(i[1])
self.lists.append(i[2])
self.lists.append(i[3])
self.lists.append(i[4])
except:
print("Error: unable to fetch data")
self.model = QtGui.QStandardItemModel(5, 0)
self.model.setHorizontalHeaderLabels(['值'])
self.model.setVerticalHeaderLabels(['学号', '姓名', '班级', '性别', '生日'])
nums = len(self.lists)
if nums == 0:
QMessageBox.warning(self, "warning", "人脸数据库中无此人信息,请马上录入!", QMessageBox.Yes | QMessageBox.No)
for row in range(nums):
item = QtGui.QStandardItem(self.lists[row])
self.model.setItem(row, 0, item)
self.View = self.Dialog.tableView
self.View.setModel(self.model)
db.close()
def userInfo(self):
ID = self.Dialog.lineEdit_ID.text()
Name = self.Dialog.lineEdit_Name.text()
Class = self.Dialog.lineEdit_Class.text()
Sex = self.Dialog.lineEdit_Sex.text()
Birth = self.Dialog.lineEdit_Birth.text()
self.users.append((ID, Name, Class, Sex, Birth))
return self.users
def changeInfo(self):
db = pymysql.connect("localhost", "root", "mysql105", "facerecognition")
cursor = db.cursor()
try:
insert_sql = "replace into students(ID, Name, Class, Sex, Birthday) values(%s, %s, %s, %s, %s)"
users = self.userInfo()
cursor.executemany(insert_sql, users)
except Exception as e:
print(e)
print("sql execute failed")
else:
print("sql execute success")
QMessageBox.warning(self, "warning", "录入成功,请勿重复操作!", QMessageBox.Yes | QMessageBox.No)
db.commit()
cursor.close()
db.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = MainWindow()
infoWindow = infoDialog()
mainWindow.ui.bt_gathering.clicked.connect(infoWindow.handle_click)
mainWindow.show()
sys.exit(app.exec_()) | true | true |
f721978ae0032f3792c8f2bb1e955820288a7de7 | 33,135 | py | Python | src/urllib3/response.py | imkaka/urllib3 | c96cf403fb4f24d414f40faf4691174e4c54ea0b | [
"MIT"
] | null | null | null | src/urllib3/response.py | imkaka/urllib3 | c96cf403fb4f24d414f40faf4691174e4c54ea0b | [
"MIT"
] | 1 | 2022-01-04T12:19:09.000Z | 2022-01-04T12:19:09.000Z | src/urllib3/response.py | sethmlarson/urllib3 | d4c25791cd5002a5234d882a28040db94ca38595 | [
"MIT"
] | null | null | null | import io
import json as _json
import logging
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
try:
try:
import brotlicffi as brotli # type: ignore[import]
except ImportError:
import brotli # type: ignore[import]
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if TYPE_CHECKING:
from typing_extensions import Literal
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None # type: ignore[assignment]
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None # type: ignore[assignment]
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush() # type: ignore[no-any-return]
return b""
class MultiDecoder(ContentDecoder):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def __init__(
self,
*,
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int,
version: int,
reason: Optional[str],
decode_content: bool,
request_url: Optional[str],
retries: Optional[Retry] = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]
self.status = status
self.version = version
self.reason = reason
self.decode_content = decode_content
self.request_url: Optional[str]
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: Optional[ContentDecoder] = None
def get_redirect_location(self) -> Union[Optional[str], "Literal[False]"]:
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> Any:
"""
Parses the body of the HTTP response as JSON.
To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.
This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.
Read more :ref:`here <json>`.
"""
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def url(self) -> Optional[str]:
raise NotImplementedError()
@property
def closed(self) -> bool:
raise NotImplementedError()
@property
def connection(self) -> Optional[HTTPConnection]:
raise NotImplementedError()
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
) -> Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: Optional[bool], flush_decoder: bool
) -> bytes:
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readable(self) -> bool:
return True
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> List[Tuple[str, str]]:
return list(self.headers.items())
def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> Optional[Union[str, "Literal[False]"]]:
return self.url
class HTTPResponse(BaseHTTPResponse):
"""
HTTP Response container.
Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
def __init__(
self,
body: _TYPE_BODY = "",
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int = 0,
version: int = 0,
reason: Optional[str] = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: Optional[_HttplibHTTPResponse] = None,
pool: Optional["HTTPConnectionPool"] = None,
connection: Optional[HTTPConnection] = None,
msg: Optional[_HttplibHTTPMessage] = None,
retries: Optional[Retry] = None,
enforce_content_length: bool = False,
request_method: Optional[str] = None,
request_url: Optional[str] = None,
auto_close: bool = True,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: Optional[_HttplibHTTPResponse] = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if self.retries is not None and self.retries.history:
self._request_url = self.retries.history[-1].redirect_location
else:
self._request_url = request_url
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: Optional[int] = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> Optional[HTTPConnection]:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
if bytes are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method: Optional[str]) -> Optional[int]:
"""
Set initial length value for Response content if available.
"""
length: Optional[int]
content_length: Optional[str] = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else: # if content_length is None
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> Generator[None, None, None]:
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e):
# SSL errors related to framing/MAC get wrapped and reraised here
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e # type: ignore[arg-type]
except (HTTPException, OSError) as e:
# This includes IncompleteRead.
raise ProtocolError(f"Connection broken: {e!r}", e) from e
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
"""
Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return None # type: ignore[return-value]
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
): # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(
ResponseCls: Type["HTTPResponse"], r: _HttplibHTTPResponse, **response_kw: Any
) -> "HTTPResponse":
"""
Given an :class:`http.client.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]
resp = ResponseCls(
body=r,
headers=headers, # type: ignore[arg-type]
status=r.status,
version=r.version,
reason=r.reason,
original_response=r,
**response_kw,
)
return resp
# Overrides from io.IOBase
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return, attr-defined]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
"""
Checks if the underlying file-like object looks like a
:class:`http.client.HTTPResponse` object. We do this by testing for
the fp attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise InvalidChunkLength(self, line) from None
def _handle_chunk(self, amt: Optional[int]) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: Optional[int] = None, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None: # type: ignore[union-attr]
return None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
@property
def url(self) -> Optional[str]:
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> Iterator[bytes]:
buffer: List[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
| 34.952532 | 110 | 0.582979 | import io
import json as _json
import logging
import zlib
from contextlib import contextmanager
from http.client import HTTPMessage as _HttplibHTTPMessage
from http.client import HTTPResponse as _HttplibHTTPResponse
from socket import timeout as SocketTimeout
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterator,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
try:
try:
import brotlicffi as brotli
except ImportError:
import brotli
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .connection import _TYPE_BODY, BaseSSLError, HTTPConnection, HTTPException
from .exceptions import (
BodyNotHttplibCompatible,
DecodeError,
HTTPError,
IncompleteRead,
InvalidChunkLength,
InvalidHeader,
ProtocolError,
ReadTimeoutError,
ResponseNotChunked,
SSLError,
)
from .util.response import is_fp_closed, is_response_to_head
from .util.retry import Retry
if TYPE_CHECKING:
from typing_extensions import Literal
from .connectionpool import HTTPConnectionPool
log = logging.getLogger(__name__)
class ContentDecoder:
def decompress(self, data: bytes) -> bytes:
raise NotImplementedError()
def flush(self) -> bytes:
raise NotImplementedError()
class DeflateDecoder(ContentDecoder):
def __init__(self) -> None:
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def decompress(self, data: bytes) -> bytes:
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def flush(self) -> bytes:
return self._obj.flush()
class GzipDecoderState:
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def decompress(self, data: bytes) -> bytes:
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
def flush(self) -> bytes:
return self._obj.flush()
if brotli is not None:
class BrotliDecoder(ContentDecoder):
def __init__(self) -> None:
self._obj = brotli.Decompressor()
if hasattr(self._obj, "decompress"):
setattr(self, "decompress", self._obj.decompress)
else:
setattr(self, "decompress", self._obj.process)
def flush(self) -> bytes:
if hasattr(self._obj, "flush"):
return self._obj.flush()
return b""
class MultiDecoder(ContentDecoder):
def __init__(self, modes: str) -> None:
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self) -> bytes:
return self._decoders[0].flush()
def decompress(self, data: bytes) -> bytes:
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode: str) -> ContentDecoder:
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class BaseHTTPResponse(io.IOBase):
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
DECODER_ERROR_CLASSES: Tuple[Type[Exception], ...] = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def __init__(
self,
*,
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int,
version: int,
reason: Optional[str],
decode_content: bool,
request_url: Optional[str],
retries: Optional[Retry] = None,
) -> None:
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.decode_content = decode_content
self.request_url: Optional[str]
self.retries = retries
self.chunked = False
tr_enc = self.headers.get("transfer-encoding", "").lower()
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
self._decoder: Optional[ContentDecoder] = None
def get_redirect_location(self) -> Union[Optional[str], "Literal[False]"]:
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
@property
def data(self) -> bytes:
raise NotImplementedError()
def json(self) -> Any:
data = self.data.decode("utf-8")
return _json.loads(data)
@property
def url(self) -> Optional[str]:
raise NotImplementedError()
@property
def closed(self) -> bool:
raise NotImplementedError()
@property
def connection(self) -> Optional[HTTPConnection]:
raise NotImplementedError()
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Iterator[bytes]:
raise NotImplementedError()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
raise NotImplementedError()
def read_chunked(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
) -> Iterator[bytes]:
raise NotImplementedError()
def release_conn(self) -> None:
raise NotImplementedError()
def drain_conn(self) -> None:
raise NotImplementedError()
def close(self) -> None:
raise NotImplementedError()
def _init_decoder(self) -> None:
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if encodings:
self._decoder = _get_decoder(content_encoding)
def _decode(
self, data: bytes, decode_content: Optional[bool], flush_decoder: bool
) -> bytes:
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
) from e
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self) -> bytes:
if self._decoder:
return self._decoder.decompress(b"") + self._decoder.flush()
return b""
# Compatibility methods for `io` module
def readable(self) -> bool:
return True
def readinto(self, b: bytearray) -> int:
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
# Compatibility methods for http.client.HTTPResponse
def getheaders(self) -> List[Tuple[str, str]]:
return list(self.headers.items())
def getheader(self, name: str, default: Optional[str] = None) -> Optional[str]:
return self.headers.get(name, default)
# Compatibility method for http.cookiejar
def info(self) -> HTTPHeaderDict:
return self.headers
def geturl(self) -> Optional[Union[str, "Literal[False]"]]:
return self.url
class HTTPResponse(BaseHTTPResponse):
def __init__(
self,
body: _TYPE_BODY = "",
headers: Optional[Union[Mapping[str, str], Mapping[bytes, bytes]]] = None,
status: int = 0,
version: int = 0,
reason: Optional[str] = None,
preload_content: bool = True,
decode_content: bool = True,
original_response: Optional[_HttplibHTTPResponse] = None,
pool: Optional["HTTPConnectionPool"] = None,
connection: Optional[HTTPConnection] = None,
msg: Optional[_HttplibHTTPMessage] = None,
retries: Optional[Retry] = None,
enforce_content_length: bool = False,
request_method: Optional[str] = None,
request_url: Optional[str] = None,
auto_close: bool = True,
) -> None:
super().__init__(
headers=headers,
status=status,
version=version,
reason=reason,
decode_content=decode_content,
request_url=request_url,
retries=retries,
)
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._body = None
self._fp: Optional[_HttplibHTTPResponse] = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
if self.retries is not None and self.retries.history:
self._request_url = self.retries.history[-1].redirect_location
else:
self._request_url = request_url
if body and isinstance(body, (str, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body # type: ignore[assignment]
# Are we using the chunked-style of transfer encoding?
self.chunk_left: Optional[int] = None
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def release_conn(self) -> None:
if not self._pool or not self._connection:
return None
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self) -> None:
try:
self.read()
except (HTTPError, OSError, BaseSSLError, HTTPException):
pass
@property
def data(self) -> bytes:
# For backwards-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body # type: ignore[return-value]
if self._fp:
return self.read(cache_content=True)
return None # type: ignore[return-value]
@property
def connection(self) -> Optional[HTTPConnection]:
return self._connection
def isclosed(self) -> bool:
return is_fp_closed(self._fp)
def tell(self) -> int:
return self._fp_bytes_read
def _init_length(self, request_method: Optional[str]) -> Optional[int]:
length: Optional[int]
content_length: Optional[str] = self.headers.get("content-length")
if content_length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
lengths = {int(val) for val in content_length.split(",")}
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % content_length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
else:
length = None
try:
status = int(self.status)
except ValueError:
status = 0
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
@contextmanager
def _error_catcher(self) -> Generator[None, None, None]:
clean_exit = False
try:
try:
yield
except SocketTimeout as e:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e
except BaseSSLError as e:
if "read operation timed out" not in str(e):
raise SSLError(e) from e
raise ReadTimeoutError(self._pool, None, "Read timed out.") from e
except (HTTPException, OSError) as e:
raise ProtocolError(f"Connection broken: {e!r}", e) from e
clean_exit = True
finally:
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
if self._original_response:
self._original_response.close()
if self._connection:
self._connection.close()
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(
self,
amt: Optional[int] = None,
decode_content: Optional[bool] = None,
cache_content: bool = False,
) -> bytes:
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return None # type: ignore[return-value]
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
):
self._fp.close()
flush_decoder = True
if (
self.enforce_content_length
and self.length_remaining is not None
and self.length_remaining != 0
):
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(
self, amt: Optional[int] = 2 ** 16, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
if self.chunked and self.supports_chunked_reads():
yield from self.read_chunked(amt, decode_content=decode_content)
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(
ResponseCls: Type["HTTPResponse"], r: _HttplibHTTPResponse, **response_kw: Any
) -> "HTTPResponse":
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
headers = HTTPHeaderDict(headers.items()) # type: ignore[assignment]
resp = ResponseCls(
body=r,
headers=headers, # type: ignore[arg-type]
status=r.status,
version=r.version,
reason=r.reason,
original_response=r,
**response_kw,
)
return resp
# Overrides from io.IOBase
def close(self) -> None:
if not self.closed and self._fp:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self) -> bool:
if not self.auto_close:
return io.IOBase.closed.__get__(self) # type: ignore[no-any-return, attr-defined]
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self) -> int:
if self._fp is None:
raise OSError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise OSError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self) -> None:
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def supports_chunked_reads(self) -> bool:
return hasattr(self._fp, "fp")
def _update_chunk_length(self) -> None:
# First, we'll figure out length of a chunk and then
if self.chunk_left is not None:
return None
line = self._fp.fp.readline() # type: ignore[union-attr]
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise InvalidChunkLength(self, line) from None
def _handle_chunk(self, amt: Optional[int]) -> bytes:
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
returned_chunk = chunk
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif self.chunk_left is not None and amt < self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left) # type: ignore[union-attr]
self._fp._safe_read(2) # type: ignore[union-attr] # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk # type: ignore[no-any-return]
def read_chunked(
self, amt: Optional[int] = None, decode_content: Optional[bool] = None
) -> Generator[bytes, None, None]:
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be http.client.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return None
if self._fp.fp is None:
return None
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
decoded = self._flush_decoder()
if decoded:
yield decoded
while self._fp is not None:
line = self._fp.fp.readline()
if not line:
break
if line == b"\r\n":
break
if self._original_response:
self._original_response.close()
@property
def url(self) -> Optional[str]:
return self._request_url
@url.setter
def url(self, url: str) -> None:
self._request_url = url
def __iter__(self) -> Iterator[bytes]:
buffer: List[bytes] = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunks = chunk.split(b"\n")
yield b"".join(buffer) + chunks[0] + b"\n"
for x in chunks[1:-1]:
yield x + b"\n"
if chunks[-1]:
buffer = [chunks[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
| true | true |
f7219c3cecb551332ea0053120d9d5497f55a298 | 4,400 | py | Python | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | null | null | null | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | 5 | 2020-02-29T01:15:24.000Z | 2020-02-29T21:55:03.000Z | pongcontroller.py | afghanimah/Pong | ad799bae29ed5f5cff2f2f70a7e42a5f02df7336 | [
"MIT"
] | null | null | null | from pyglet.window import key
import random
from pygletplus.controller import Controller
class PongController(Controller):
def __init__(self, scene):
super().__init__(scene)
self.keys = scene.keys
self.player = scene.player
self.cpu = scene.cpu
self.ball = scene.ball
self.close = scene.close
def update(self, dt):
if self.scene.paused:
return
self.player.update(dt)
self.cpu.follow(self.ball.sprite.x, self.ball.sprite.y)
self.cpu.update(dt)
self.ball.update(dt)
self.window_bound()
self.bounce_ball()
def on_key_press(self, symbol, _):
if symbol == key.ESCAPE:
self.close()
if symbol == key.SPACE:
self.scene.paused = not self.scene.paused
# player movement (decouple from player class):
if symbol == key.UP:
self.player.vy += self.player.speed
elif symbol == key.DOWN:
self.player.vy -= self.player.speed
def on_key_release(self, symbol, _):
if symbol == key.UP:
self.player.vy -= self.player.speed
elif symbol == key.DOWN:
self.player.vy += self.player.speed
@staticmethod
def bound_x(e, mini, maxi):
mini += e.sprite.width / 2
maxi -= e.sprite.width / 2
if e.sprite.x < mini:
e.sprite.x = mini
elif e.sprite.x > maxi:
e.sprite.x = maxi
@staticmethod
def bound_y(e, mini, maxi):
mini += e.sprite.height / 2
maxi -= e.sprite.height / 2
if e.sprite.y < mini:
e.sprite.y = mini
elif e.sprite.y > maxi:
e.sprite.y = maxi
def window_bound(self):
self.bound_x(self.player, 0, self.scene.width)
self.bound_y(self.player, 0, self.scene.height)
self.bound_x(self.cpu, 0, self.scene.width)
self.bound_y(self.cpu, 0, self.scene.height)
def bounce_ball(self):
x_min = self.scene.ball_img.anchor_x
x_max = self.scene.width - self.scene.ball_img.anchor_x
y_min = self.scene.ball_img.anchor_y
y_max = self.scene.height - self.scene.ball_img.anchor_y
# bounce off top and bottom walls of window
if self.ball.sprite.y < y_min:
self.ball.sprite.y = y_min
self.ball.vy *= -1
self.scene.bounce_sound.play()
elif self.ball.sprite.y > y_max:
self.ball.sprite.y = y_max
self.ball.vy *= -1
self.scene.bounce_sound.play()
# score a point if touch left or right walls of window
if self.ball.sprite.x < x_min:
self.ball.sprite.x = self.scene.width / 2 - 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = random.randint(300, 350)
self.ball.vy = random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.cpu_score += 1
self.scene.cpu_label.text = str(self.scene.cpu_score)
self.scene.point_sound.play()
elif self.ball.sprite.x > x_max:
self.ball.sprite.x = self.scene.width / 2 + 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = -random.randint(300, 350)
self.ball.vy = -random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.player_score += 1
self.scene.player_label.text = str(self.scene.player_score)
self.scene.point_sound.play()
if (self.player.sprite.x < self.ball.sprite.x < self.player.sprite.x + self.scene.paddle_img.anchor_x and
self.player.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.player.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.player.sprite.x + self.scene.paddle_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
elif (self.cpu.sprite.x > self.ball.sprite.x > self.cpu.sprite.x - self.scene.paddle_img.anchor_x and
self.cpu.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.cpu.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.cpu.sprite.x - self.scene.ball_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
| 39.285714 | 113 | 0.588636 | from pyglet.window import key
import random
from pygletplus.controller import Controller
class PongController(Controller):
def __init__(self, scene):
super().__init__(scene)
self.keys = scene.keys
self.player = scene.player
self.cpu = scene.cpu
self.ball = scene.ball
self.close = scene.close
def update(self, dt):
if self.scene.paused:
return
self.player.update(dt)
self.cpu.follow(self.ball.sprite.x, self.ball.sprite.y)
self.cpu.update(dt)
self.ball.update(dt)
self.window_bound()
self.bounce_ball()
def on_key_press(self, symbol, _):
if symbol == key.ESCAPE:
self.close()
if symbol == key.SPACE:
self.scene.paused = not self.scene.paused
if symbol == key.UP:
self.player.vy += self.player.speed
elif symbol == key.DOWN:
self.player.vy -= self.player.speed
def on_key_release(self, symbol, _):
if symbol == key.UP:
self.player.vy -= self.player.speed
elif symbol == key.DOWN:
self.player.vy += self.player.speed
@staticmethod
def bound_x(e, mini, maxi):
mini += e.sprite.width / 2
maxi -= e.sprite.width / 2
if e.sprite.x < mini:
e.sprite.x = mini
elif e.sprite.x > maxi:
e.sprite.x = maxi
@staticmethod
def bound_y(e, mini, maxi):
mini += e.sprite.height / 2
maxi -= e.sprite.height / 2
if e.sprite.y < mini:
e.sprite.y = mini
elif e.sprite.y > maxi:
e.sprite.y = maxi
def window_bound(self):
self.bound_x(self.player, 0, self.scene.width)
self.bound_y(self.player, 0, self.scene.height)
self.bound_x(self.cpu, 0, self.scene.width)
self.bound_y(self.cpu, 0, self.scene.height)
def bounce_ball(self):
x_min = self.scene.ball_img.anchor_x
x_max = self.scene.width - self.scene.ball_img.anchor_x
y_min = self.scene.ball_img.anchor_y
y_max = self.scene.height - self.scene.ball_img.anchor_y
if self.ball.sprite.y < y_min:
self.ball.sprite.y = y_min
self.ball.vy *= -1
self.scene.bounce_sound.play()
elif self.ball.sprite.y > y_max:
self.ball.sprite.y = y_max
self.ball.vy *= -1
self.scene.bounce_sound.play()
if self.ball.sprite.x < x_min:
self.ball.sprite.x = self.scene.width / 2 - 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = random.randint(300, 350)
self.ball.vy = random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.cpu_score += 1
self.scene.cpu_label.text = str(self.scene.cpu_score)
self.scene.point_sound.play()
elif self.ball.sprite.x > x_max:
self.ball.sprite.x = self.scene.width / 2 + 200
self.ball.sprite.y = self.scene.height / 2
self.ball.vx = -random.randint(300, 350)
self.ball.vy = -random.randint(300, 350) * (-1 if random.randint(0, 1) == 0 else 1)
self.scene.player_score += 1
self.scene.player_label.text = str(self.scene.player_score)
self.scene.point_sound.play()
if (self.player.sprite.x < self.ball.sprite.x < self.player.sprite.x + self.scene.paddle_img.anchor_x and
self.player.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.player.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.player.sprite.x + self.scene.paddle_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
elif (self.cpu.sprite.x > self.ball.sprite.x > self.cpu.sprite.x - self.scene.paddle_img.anchor_x and
self.cpu.sprite.y - self.scene.paddle_img.anchor_y < self.ball.sprite.y <
self.cpu.sprite.y + self.scene.paddle_img.anchor_y):
self.ball.sprite.x = self.cpu.sprite.x - self.scene.ball_img.anchor_x
self.ball.vx *= -1
self.scene.bounce_sound.play()
| true | true |
f7219cec0e09ba36054e4f7cf2c47cdd0bc5592a | 397 | py | Python | greaterwms/wsgi.py | chinxianjun2016/GreaterWMS | aacd0e15e0114f103eb57002e93670c008cce63b | [
"Apache-2.0"
] | 1 | 2021-02-17T14:04:29.000Z | 2021-02-17T14:04:29.000Z | greaterwms/wsgi.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | greaterwms/wsgi.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | """
WSGI config for greaterwms project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'greaterwms.settings')
application = get_wsgi_application()
| true | true |
f7219e0b94e3c48818431af7be65a1ddd8fdbbac | 2,454 | py | Python | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 6 | 2016-11-14T13:31:46.000Z | 2022-02-17T20:39:42.000Z | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 23 | 2016-10-21T11:18:34.000Z | 2021-12-08T17:33:01.000Z | tests/settings.py | hugorodgerbrown/django-onfido | 9e534f4725b61d982ffb2cd6a018ed1fffc353b6 | [
"MIT"
] | 7 | 2016-11-14T18:19:09.000Z | 2021-10-01T11:34:48.000Z | from os import getenv, path
from django.core.exceptions import ImproperlyConfigured
DEBUG = True
TEMPLATE_DEBUG = True
USE_TZ = True
USE_L10N = True
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "onfido.db"}}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"onfido",
"tests.test_app",
)
MIDDLEWARE = [
# default django middleware
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
PROJECT_DIR = path.abspath(path.join(path.dirname(__file__)))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path.join(PROJECT_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.messages.context_processors.messages",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
]
},
}
]
AUTH_USER_MODEL = "test_app.User"
STATIC_URL = "/static/"
SECRET_KEY = "onfido" # noqa: S105
ALLOWED_HOSTS = [
"127.0.0.1",
".ngrok.io",
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"simple": {"format": "%(levelname)s %(message)s"}},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"loggers": {
"": {"handlers": ["console"], "propagate": True, "level": "DEBUG"},
# 'django': {
# 'handlers': ['console'],
# 'propagate': True,
# 'level': 'WARNING',
# },
"onfido": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
ROOT_URLCONF = "tests.urls"
if not DEBUG:
raise ImproperlyConfigured("This settings file can only be used with DEBUG=True")
# False by default, but if True this will run the integration tests in test_integration
TEST_INTEGRATION = bool(getenv("ONFIDO_TEST_INTEGRATION", False))
| 26.106383 | 87 | 0.609617 | from os import getenv, path
from django.core.exceptions import ImproperlyConfigured
DEBUG = True
TEMPLATE_DEBUG = True
USE_TZ = True
USE_L10N = True
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "onfido.db"}}
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"onfido",
"tests.test_app",
)
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
PROJECT_DIR = path.abspath(path.join(path.dirname(__file__)))
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [path.join(PROJECT_DIR, "templates")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.messages.context_processors.messages",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
]
},
}
]
AUTH_USER_MODEL = "test_app.User"
STATIC_URL = "/static/"
SECRET_KEY = "onfido"
ALLOWED_HOSTS = [
"127.0.0.1",
".ngrok.io",
]
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"simple": {"format": "%(levelname)s %(message)s"}},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "simple",
}
},
"loggers": {
"": {"handlers": ["console"], "propagate": True, "level": "DEBUG"},
"onfido": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
ROOT_URLCONF = "tests.urls"
if not DEBUG:
raise ImproperlyConfigured("This settings file can only be used with DEBUG=True")
TEST_INTEGRATION = bool(getenv("ONFIDO_TEST_INTEGRATION", False))
| true | true |
f7219f3f9ed21cb04bfe7f510681ceaf677c32c5 | 4,351 | py | Python | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | null | null | null | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | null | null | null | src/dataload/contrib/docm/__init__.py | IsmailM/myvariant.info | 5af6ad68fc2c1eb539ab9e683a34bafd51ed5cb1 | [
"Apache-2.0"
] | 1 | 2018-11-17T09:16:59.000Z | 2018-11-17T09:16:59.000Z | __METADATA__ = {
"src_name": 'DOCM',
"src_url": 'http://docm.genome.wustl.edu/',
"version": None,
"field": "docm"
}
def load_data():
'''docm data are pre-loaded in our db.'''
raise NotImplementedError
def get_mapping():
mapping = {
"docm": {
"properties": {
"domain": {
"type": "string"
},
"all_domains": {
"type": "string"
},
"ref": {
"type": "string",
"analyzer": "string_lowercase"
},
"alt": {
"type": "string",
"analyzer": "string_lowercase"
},
"primary": {
"type": "byte" # just 0 or 1
},
"transcript_species": {
"type": "string",
"index": "no"
},
"ensembl_gene_id": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_version": {
"type": "string",
"index": "no"
},
"transcript_source": {
"type": "string",
"index": "no"
},
"source": {
"type": "string",
"analyzer": "string_lowercase"
},
"pubmed_id": {
"type": "string",
"index": "not_analyzed"
},
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"doid": {
"type": "string",
"analyzer": "string_lowercase"
},
"c_position": {
"type": "string",
"analyzer": "string_lowercase"
},
"hg19": {
"properties": {
"start": {
"type": "long"
},
"end": {
"type": "long"
}
}
},
"strand": {
"type": "byte",
"index": "no"
},
"deletion_substructures": {
"type": "string",
"index": "no"
},
"genename_source": {
"type": "string",
"index": "no"
},
"default_gene_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"aa_change": {
"type": "string",
"analyzer": "string_lowercase"
},
"url": {
"type": "string",
"index": "no"
},
"transcript_status": {
"type": "string",
"analyzer": "string_lowercase"
},
"trv_type": {
"type": "string",
"analyzer": "string_lowercase"
},
"disease": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"chrom": {
"type": "string", # actual value is integer
"analyzer": "string_lowercase"
},
"transcript_error": {
"type": "string",
"index": "no"
},
"genename": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"ucsc_cons": {
"type": "double"
}
}
}
}
return mapping
| 30.858156 | 79 | 0.290738 | __METADATA__ = {
"src_name": 'DOCM',
"src_url": 'http://docm.genome.wustl.edu/',
"version": None,
"field": "docm"
}
def load_data():
raise NotImplementedError
def get_mapping():
mapping = {
"docm": {
"properties": {
"domain": {
"type": "string"
},
"all_domains": {
"type": "string"
},
"ref": {
"type": "string",
"analyzer": "string_lowercase"
},
"alt": {
"type": "string",
"analyzer": "string_lowercase"
},
"primary": {
"type": "byte"
},
"transcript_species": {
"type": "string",
"index": "no"
},
"ensembl_gene_id": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_version": {
"type": "string",
"index": "no"
},
"transcript_source": {
"type": "string",
"index": "no"
},
"source": {
"type": "string",
"analyzer": "string_lowercase"
},
"pubmed_id": {
"type": "string",
"index": "not_analyzed"
},
"type": {
"type": "string",
"analyzer": "string_lowercase"
},
"doid": {
"type": "string",
"analyzer": "string_lowercase"
},
"c_position": {
"type": "string",
"analyzer": "string_lowercase"
},
"hg19": {
"properties": {
"start": {
"type": "long"
},
"end": {
"type": "long"
}
}
},
"strand": {
"type": "byte",
"index": "no"
},
"deletion_substructures": {
"type": "string",
"index": "no"
},
"genename_source": {
"type": "string",
"index": "no"
},
"default_gene_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"aa_change": {
"type": "string",
"analyzer": "string_lowercase"
},
"url": {
"type": "string",
"index": "no"
},
"transcript_status": {
"type": "string",
"analyzer": "string_lowercase"
},
"trv_type": {
"type": "string",
"analyzer": "string_lowercase"
},
"disease": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_name": {
"type": "string",
"analyzer": "string_lowercase"
},
"chrom": {
"type": "string",
"analyzer": "string_lowercase"
},
"transcript_error": {
"type": "string",
"index": "no"
},
"genename": {
"type": "string",
"analyzer": "string_lowercase",
"include_in_all": True
},
"ucsc_cons": {
"type": "double"
}
}
}
}
return mapping
| true | true |
f721a0175b21509fd3c11cdf9bddad74e4242372 | 12,176 | py | Python | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | yolov3_tf2/models.py | AVsolutionsai/YOLOv3_custom | d974e8305310cef31621b20128ba29c3b09ce2af | [
"MIT",
"OLDAP-2.2.1",
"Unlicense"
] | null | null | null | from absl import flags
from absl.flags import FLAGS
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .batch_norm import BatchNormalization
from .utils import broadcast_iou
yolo_max_boxes = 100
yolo_iou_threshold = 0.1
yolo_score_threshold = 0.1
# customize your model through the following parameters
flags.DEFINE_integer('yolo_max_boxes', 100, 'maximum number of detections at one time')
flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')
flags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x) # top left half-padding
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2) # skip connection
x = x_36 = DarknetBlock(x, 256, 8) # skip connection
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3) # skip connection
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
# concat with skip connection
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
# pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...classes))
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1) # original xywh for loss
# !!! grid[x][y] == (y, x)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2) # [gx, gy, 1, 2]
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
# boxes, conf, type
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=yolo_max_boxes,
max_total_size=yolo_max_boxes,
iou_threshold=yolo_iou_threshold,
score_threshold=yolo_score_threshold
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
# 1. transform all pred outputs
# y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
# 2. transform all true outputs
# y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
# give higher weights to small boxes
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
# 3. inverting the pred box equations
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
# 4. calculate all masks
obj_mask = tf.squeeze(true_obj, -1)
# ignore false positive when iou is over threshold
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
# 5. calculate all losses
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
# TODO: use binary_crossentropy instead
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
# 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| 37.235474 | 87 | 0.604221 | from absl import flags
from absl.flags import FLAGS
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Add,
Concatenate,
Conv2D,
Input,
Lambda,
LeakyReLU,
MaxPool2D,
UpSampling2D,
ZeroPadding2D,
)
from tensorflow.keras.regularizers import l2
from tensorflow.keras.losses import (
binary_crossentropy,
sparse_categorical_crossentropy
)
from .batch_norm import BatchNormalization
from .utils import broadcast_iou
yolo_max_boxes = 100
yolo_iou_threshold = 0.1
yolo_score_threshold = 0.1
flags.DEFINE_integer('yolo_max_boxes', 100, 'maximum number of detections at one time')
flags.DEFINE_float('yolo_iou_threshold', 0.5, 'iou threshold')
flags.DEFINE_float('yolo_score_threshold', 0.5, 'score threshold')
yolo_anchors = np.array([(10, 13), (16, 30), (33, 23), (30, 61), (62, 45),
(59, 119), (116, 90), (156, 198), (373, 326)],
np.float32) / 416
yolo_anchor_masks = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
yolo_tiny_anchors = np.array([(10, 14), (23, 27), (37, 58),
(81, 82), (135, 169), (344, 319)],
np.float32) / 416
yolo_tiny_anchor_masks = np.array([[3, 4, 5], [0, 1, 2]])
def DarknetConv(x, filters, size, strides=1, batch_norm=True):
if strides == 1:
padding = 'same'
else:
x = ZeroPadding2D(((1, 0), (1, 0)))(x)
padding = 'valid'
x = Conv2D(filters=filters, kernel_size=size,
strides=strides, padding=padding,
use_bias=not batch_norm, kernel_regularizer=l2(0.0005))(x)
if batch_norm:
x = BatchNormalization()(x)
x = LeakyReLU(alpha=0.1)(x)
return x
def DarknetResidual(x, filters):
prev = x
x = DarknetConv(x, filters // 2, 1)
x = DarknetConv(x, filters, 3)
x = Add()([prev, x])
return x
def DarknetBlock(x, filters, blocks):
x = DarknetConv(x, filters, 3, strides=2)
for _ in range(blocks):
x = DarknetResidual(x, filters)
return x
def Darknet(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 32, 3)
x = DarknetBlock(x, 64, 1)
x = DarknetBlock(x, 128, 2)
x = x_36 = DarknetBlock(x, 256, 8)
x = x_61 = DarknetBlock(x, 512, 8)
x = DarknetBlock(x, 1024, 4)
return tf.keras.Model(inputs, (x_36, x_61, x), name=name)
def DarknetTiny(name=None):
x = inputs = Input([None, None, 3])
x = DarknetConv(x, 16, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 32, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 64, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 128, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = x_8 = DarknetConv(x, 256, 3)
x = MaxPool2D(2, 2, 'same')(x)
x = DarknetConv(x, 512, 3)
x = MaxPool2D(2, 1, 'same')(x)
x = DarknetConv(x, 1024, 3)
return tf.keras.Model(inputs, (x_8, x), name=name)
def YoloConv(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloConvTiny(filters, name=None):
def yolo_conv(x_in):
if isinstance(x_in, tuple):
inputs = Input(x_in[0].shape[1:]), Input(x_in[1].shape[1:])
x, x_skip = inputs
x = DarknetConv(x, filters, 1)
x = UpSampling2D(2)(x)
x = Concatenate()([x, x_skip])
else:
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters, 1)
return Model(inputs, x, name=name)(x_in)
return yolo_conv
def YoloOutput(filters, anchors, classes, name=None):
def yolo_output(x_in):
x = inputs = Input(x_in.shape[1:])
x = DarknetConv(x, filters * 2, 3)
x = DarknetConv(x, anchors * (classes + 5), 1, batch_norm=False)
x = Lambda(lambda x: tf.reshape(x, (-1, tf.shape(x)[1], tf.shape(x)[2],
anchors, classes + 5)))(x)
return tf.keras.Model(inputs, x, name=name)(x_in)
return yolo_output
def yolo_boxes(pred, anchors, classes):
grid_size = tf.shape(pred)[1]
box_xy, box_wh, objectness, class_probs = tf.split(
pred, (2, 2, 1, classes), axis=-1)
box_xy = tf.sigmoid(box_xy)
objectness = tf.sigmoid(objectness)
class_probs = tf.sigmoid(class_probs)
pred_box = tf.concat((box_xy, box_wh), axis=-1)
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
box_xy = (box_xy + tf.cast(grid, tf.float32)) / \
tf.cast(grid_size, tf.float32)
box_wh = tf.exp(box_wh) * anchors
box_x1y1 = box_xy - box_wh / 2
box_x2y2 = box_xy + box_wh / 2
bbox = tf.concat([box_x1y1, box_x2y2], axis=-1)
return bbox, objectness, class_probs, pred_box
def yolo_nms(outputs, anchors, masks, classes):
b, c, t = [], [], []
for o in outputs:
b.append(tf.reshape(o[0], (tf.shape(o[0])[0], -1, tf.shape(o[0])[-1])))
c.append(tf.reshape(o[1], (tf.shape(o[1])[0], -1, tf.shape(o[1])[-1])))
t.append(tf.reshape(o[2], (tf.shape(o[2])[0], -1, tf.shape(o[2])[-1])))
bbox = tf.concat(b, axis=1)
confidence = tf.concat(c, axis=1)
class_probs = tf.concat(t, axis=1)
scores = confidence * class_probs
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(bbox, (tf.shape(bbox)[0], -1, 1, 4)),
scores=tf.reshape(
scores, (tf.shape(scores)[0], -1, tf.shape(scores)[-1])),
max_output_size_per_class=yolo_max_boxes,
max_total_size=yolo_max_boxes,
iou_threshold=yolo_iou_threshold,
score_threshold=yolo_score_threshold
)
return boxes, scores, classes, valid_detections
def YoloV3(size=None, channels=3, anchors=yolo_anchors,
masks=yolo_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_36, x_61, x = Darknet(name='yolo_darknet')(x)
x = YoloConv(512, name='yolo_conv_0')(x)
output_0 = YoloOutput(512, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConv(256, name='yolo_conv_1')((x, x_61))
output_1 = YoloOutput(256, len(masks[1]), classes, name='yolo_output_1')(x)
x = YoloConv(128, name='yolo_conv_2')((x, x_36))
output_2 = YoloOutput(128, len(masks[2]), classes, name='yolo_output_2')(x)
if training:
return Model(inputs, (output_0, output_1, output_2), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
boxes_2 = Lambda(lambda x: yolo_boxes(x, anchors[masks[2]], classes),
name='yolo_boxes_2')(output_2)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3], boxes_2[:3]))
return Model(inputs, outputs, name='yolov3')
def YoloV3Tiny(size=None, channels=3, anchors=yolo_tiny_anchors,
masks=yolo_tiny_anchor_masks, classes=80, training=False):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
x = inputs = Input([size, size, channels], name='input')
x_8, x = DarknetTiny(name='yolo_darknet')(x)
x = YoloConvTiny(256, name='yolo_conv_0')(x)
output_0 = YoloOutput(256, len(masks[0]), classes, name='yolo_output_0')(x)
x = YoloConvTiny(128, name='yolo_conv_1')((x, x_8))
output_1 = YoloOutput(128, len(masks[1]), classes, name='yolo_output_1')(x)
if training:
return Model(inputs, (output_0, output_1), name='yolov3')
boxes_0 = Lambda(lambda x: yolo_boxes(x, anchors[masks[0]], classes),
name='yolo_boxes_0')(output_0)
boxes_1 = Lambda(lambda x: yolo_boxes(x, anchors[masks[1]], classes),
name='yolo_boxes_1')(output_1)
outputs = Lambda(lambda x: yolo_nms(x, anchors, masks, classes),
name='yolo_nms')((boxes_0[:3], boxes_1[:3]))
return Model(inputs, outputs, name='yolov3_tiny')
def YoloLoss(anchors, classes=80, ignore_thresh=0.5):
def yolo_loss(y_true, y_pred):
pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
y_pred, anchors, classes)
pred_xy = pred_xywh[..., 0:2]
pred_wh = pred_xywh[..., 2:4]
true_box, true_obj, true_class_idx = tf.split(
y_true, (4, 1, 1), axis=-1)
true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
true_wh = true_box[..., 2:4] - true_box[..., 0:2]
box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]
grid_size = tf.shape(y_true)[1]
grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
tf.cast(grid, tf.float32)
true_wh = tf.math.log(true_wh / anchors)
true_wh = tf.where(tf.math.is_inf(true_wh),
tf.zeros_like(true_wh), true_wh)
obj_mask = tf.squeeze(true_obj, -1)
best_iou = tf.map_fn(
lambda x: tf.reduce_max(broadcast_iou(x[0], tf.boolean_mask(
x[1], tf.cast(x[2], tf.bool))), axis=-1),
(pred_box, true_box, obj_mask),
tf.float32)
ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)
xy_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
wh_loss = obj_mask * box_loss_scale * \
tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
obj_loss = binary_crossentropy(true_obj, pred_obj)
obj_loss = obj_mask * obj_loss + \
(1 - obj_mask) * ignore_mask * obj_loss
class_loss = obj_mask * sparse_categorical_crossentropy(
true_class_idx, pred_class)
xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))
return xy_loss + wh_loss + obj_loss + class_loss
return yolo_loss
| true | true |
f721a01f25bf915b93bced32999e9d5635c07fda | 5,196 | py | Python | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 16 | 2017-06-30T20:05:05.000Z | 2022-03-08T21:03:19.000Z | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 342 | 2017-06-23T21:37:40.000Z | 2022-03-30T16:44:16.000Z | data_steward/cdr_cleaner/cleaning_rules/null_person_birthdate.py | lrwb-aou/curation | e80447e56d269dc2c9c8bc79e78218d4b0dc504c | [
"MIT"
] | 33 | 2017-07-01T00:12:20.000Z | 2022-01-26T18:06:53.000Z | """
Null Person Table Birth Date Fields
In the person table, the fields month_of_birth, day_of_birth, and birth_datetime should be nulled.
The year_of_birth field should remain unchanged.
Original Issue: DC-1356
"""
# Python imports
import logging
# Project imports
import constants.bq_utils as bq_consts
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, PERSON
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
NULL_DATE_QUERY = JINJA_ENV.from_string("""
UPDATE `{{project_id}}.{{dataset_id}}.{{person_table}}`
SET
birth_datetime = NULL,
month_of_birth = NULL,
day_of_birth = NULL
WHERE TRUE
""")
class NullPersonBirthdate(BaseCleaningRule):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = 'Set Patient Birthdate Fields to NULL'
super().__init__(issue_numbers=['DC1356'],
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
affected_tables=PERSON,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def setup_rule(self, client, *args, **keyword_args):
"""
Load required resources prior to executing cleaning rule queries.
Method to run data upload options before executing the first cleaning
rule of a class. For example, if your class requires loading a static
table, that load operation should be defined here. It SHOULD NOT BE
defined as part of get_query_specs().
:param client:
:return:
"""
pass
def get_query_specs(self, *args, **keyword_args):
"""
Interface to return a list of query dictionaries.
:returns: a list of query dictionaries. Each dictionary specifies
the query to execute and how to execute. The dictionaries are
stored in list order and returned in list order to maintain
an ordering.
"""
update_query = dict()
update_query[cdr_consts.QUERY] = NULL_DATE_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
person_table=PERSON)
return [update_query]
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return [self.sandbox_table_for(PERSON)]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(ARGS.project_id,
ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
| 37.381295 | 105 | 0.659738 |
import logging
import constants.bq_utils as bq_consts
from cdr_cleaner.cleaning_rules.base_cleaning_rule import BaseCleaningRule
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, PERSON
from utils import pipeline_logging
LOGGER = logging.getLogger(__name__)
NULL_DATE_QUERY = JINJA_ENV.from_string("""
UPDATE `{{project_id}}.{{dataset_id}}.{{person_table}}`
SET
birth_datetime = NULL,
month_of_birth = NULL,
day_of_birth = NULL
WHERE TRUE
""")
class NullPersonBirthdate(BaseCleaningRule):
def __init__(self, project_id, dataset_id, sandbox_dataset_id):
desc = 'Set Patient Birthdate Fields to NULL'
super().__init__(issue_numbers=['DC1356'],
description=desc,
affected_datasets=[cdr_consts.CONTROLLED_TIER_DEID],
affected_tables=PERSON,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id)
def setup_rule(self, client, *args, **keyword_args):
pass
def get_query_specs(self, *args, **keyword_args):
update_query = dict()
update_query[cdr_consts.QUERY] = NULL_DATE_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
person_table=PERSON)
return [update_query]
def setup_validation(self, client, *args, **keyword_args):
raise NotImplementedError("Please fix me.")
def validate_rule(self, client, *args, **keyword_args):
raise NotImplementedError("Please fix me.")
def get_sandbox_tablenames(self):
return [self.sandbox_table_for(PERSON)]
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(ARGS.project_id,
ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(NullPersonBirthdate,)])
| true | true |
f721a117918ff0bd279746d0e2b01e1cd2ecaeab | 183 | py | Python | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 122 | 2019-10-21T16:22:27.000Z | 2022-03-31T06:07:45.000Z | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 40 | 2019-11-08T14:38:51.000Z | 2022-03-15T13:07:38.000Z | benchmark/pysam_fasta_random_access.py | DishSri1/pyfastx | 4bfa6662fb50b7244565ad00ef6e99962b4f3169 | [
"MIT"
] | 8 | 2020-01-20T01:31:51.000Z | 2021-07-30T10:28:35.000Z | import sys
import pysam
idfile, fafile = sys.argv[1:]
fa = pysam.FastaFile(fafile)
with open(idfile) as fh:
for line in fh:
seqid = line.strip()
s = str(fa[seqid])
print(s)
| 14.076923 | 29 | 0.666667 | import sys
import pysam
idfile, fafile = sys.argv[1:]
fa = pysam.FastaFile(fafile)
with open(idfile) as fh:
for line in fh:
seqid = line.strip()
s = str(fa[seqid])
print(s)
| true | true |
f721a16e8f02f666fcdc92caae18ad6f00ef9e1f | 12,817 | py | Python | tests/utils/log/elasticmock/fake_elasticsearch.py | wileeam/airflow | f46be8152a4d89c57db4ca46f5b3339e4876b723 | [
"Apache-2.0"
] | 1 | 2020-02-17T17:40:14.000Z | 2020-02-17T17:40:14.000Z | tests/utils/log/elasticmock/fake_elasticsearch.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | 2 | 2021-05-12T12:41:51.000Z | 2021-09-29T17:47:43.000Z | tests/utils/log/elasticmock/fake_elasticsearch.py | devlocalca/airflow | 58c3542ed25061320ce61dbe0adf451a44c738dd | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Marcos Cardoso
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import query_params
from elasticsearch.exceptions import NotFoundError
from .utilities import get_random_id
# pylint: disable=redefined-builtin
# noinspection PyShadowingBuiltins
class FakeElasticsearch(Elasticsearch):
__documents_dict = None
def __init__(self):
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
'status': 200,
'cluster_name': 'elasticmock',
'version':
{
'lucene_version': '4.10.4',
'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4',
'number': '1.7.5',
'build_timestamp': '2016-02-02T09:55:30Z',
'build_snapshot': False
},
'name': 'Nightwatch',
'tagline': 'You Know, for Search'
}
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = list()
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append({
'_type': doc_type,
'_id': id,
'_source': body,
'_index': index,
'_version': version
})
return {
'_type': doc_type,
'_id': id,
'created': True,
'_version': version,
'_index': index
}
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_id') == id and document.get('_type') == doc_type:
result = True
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result['found'] = True
else:
error_data = {
'_index': index,
'_type': doc_type,
'_id': id,
'found': False
}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get('_id') == id:
if doc_type == '_all' or document.get('_type') == doc_type:
result = document
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get('_source')
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def count(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types\
and document.get('_type') not in searchable_doc_types:
continue
i += 1
result = {
'count': i,
'_shards': {
'successful': 1,
'failed': 0,
'total': 1
}
}
return result
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
matches = self._find_match(index, doc_type, body)
result = {
'hits': {
'total': len(matches),
'max_score': 1.0
},
'_shards': {
# Simulate indexes with 1 shard each
'successful': len(searchable_indexes),
'failed': 0,
'total': len(searchable_indexes)
},
'took': 1,
'timed_out': False
}
hits = []
for match in matches:
match['_score'] = 1.0
hits.append(match)
result['hits']['hits'] = hits
return result
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_type') == doc_type and document.get('_id') == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
'found': found,
'_index': index,
'_type': doc_type,
'_id': id,
'_version': 1,
}
if found:
return result_dict
else:
raise NotFoundError(404, json.dumps(result_dict))
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))
result_dict = {}
for key, value in body.items():
text = value.get('text')
suggestion = int(text) + 1 if isinstance(text, int) \
else '{0}_suggestion'.format(text)
result_dict[key] = [
{
'text': text,
'length': 1,
'options': [
{
'text': suggestion,
'freq': 1,
'score': 1.0
}
],
'offset': 0
}
]
return result_dict
def _find_match(self, index, doc_type, body): # pylint: disable=unused-argument
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = body['query']['bool']['must'][0] # only support one must
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types and document.get('_type') not in searchable_doc_types:
continue
if 'match_phrase' in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must['match_phrase']:
query_val = must['match_phrase'][query_id]
if query_id in document['_source']:
if query_val in document['_source'][query_id]:
# use in as a proxy for match_phrase
matches.append(document)
def _normalize_index_to_list(self, index):
# Ensure to have a list of index
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, str):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
# Check index(es) exists
for searchable_index in searchable_indexes:
if searchable_index not in self.__documents_dict:
raise NotFoundError(404,
'IndexMissingException[[{0}] missing]'
.format(searchable_index))
return searchable_indexes
@staticmethod
def _normalize_doc_type_to_list(doc_type):
# Ensure to have a list of index
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, str):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
# Is it the correct exception to use ?
raise ValueError("Invalid param 'index'")
return searchable_doc_types
# pylint: enable=redefined-builtin
| 37.920118 | 105 | 0.581727 |
import json
from elasticsearch import Elasticsearch
from elasticsearch.client.utils import query_params
from elasticsearch.exceptions import NotFoundError
from .utilities import get_random_id
class FakeElasticsearch(Elasticsearch):
__documents_dict = None
def __init__(self):
self.__documents_dict = {}
@query_params()
def ping(self, params=None):
return True
@query_params()
def info(self, params=None):
return {
'status': 200,
'cluster_name': 'elasticmock',
'version':
{
'lucene_version': '4.10.4',
'build_hash': '00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4',
'number': '1.7.5',
'build_timestamp': '2016-02-02T09:55:30Z',
'build_snapshot': False
},
'name': 'Nightwatch',
'tagline': 'You Know, for Search'
}
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
if index not in self.__documents_dict:
self.__documents_dict[index] = list()
if id is None:
id = get_random_id()
version = 1
self.__documents_dict[index].append({
'_type': doc_type,
'_id': id,
'_source': body,
'_index': index,
'_version': version
})
return {
'_type': doc_type,
'_id': id,
'created': True,
'_version': version,
'_index': index
}
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
result = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_id') == id and document.get('_type') == doc_type:
result = True
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
result = None
if index in self.__documents_dict:
result = self.find_document(doc_type, id, index, result)
if result:
result['found'] = True
else:
error_data = {
'_index': index,
'_type': doc_type,
'_id': id,
'found': False
}
raise NotFoundError(404, json.dumps(error_data))
return result
def find_document(self, doc_type, id, index, result):
for document in self.__documents_dict[index]:
if document.get('_id') == id:
if doc_type == '_all' or document.get('_type') == doc_type:
result = document
break
return result
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
document = self.get(index=index, doc_type=doc_type, id=id, params=params)
return document.get('_source')
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def count(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
i = 0
for searchable_index in searchable_indexes:
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types\
and document.get('_type') not in searchable_doc_types:
continue
i += 1
result = {
'count': i,
'_shards': {
'successful': 1,
'failed': 0,
'total': 1
}
}
return result
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
searchable_indexes = self._normalize_index_to_list(index)
matches = self._find_match(index, doc_type, body)
result = {
'hits': {
'total': len(matches),
'max_score': 1.0
},
'_shards': {
'successful': len(searchable_indexes),
'failed': 0,
'total': len(searchable_indexes)
},
'took': 1,
'timed_out': False
}
hits = []
for match in matches:
match['_score'] = 1.0
hits.append(match)
result['hits']['hits'] = hits
return result
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
found = False
if index in self.__documents_dict:
for document in self.__documents_dict[index]:
if document.get('_type') == doc_type and document.get('_id') == id:
found = True
self.__documents_dict[index].remove(document)
break
result_dict = {
'found': found,
'_index': index,
'_type': doc_type,
'_id': id,
'_version': 1,
}
if found:
return result_dict
else:
raise NotFoundError(404, json.dumps(result_dict))
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
if index is not None and index not in self.__documents_dict:
raise NotFoundError(404, 'IndexMissingException[[{0}] missing]'.format(index))
result_dict = {}
for key, value in body.items():
text = value.get('text')
suggestion = int(text) + 1 if isinstance(text, int) \
else '{0}_suggestion'.format(text)
result_dict[key] = [
{
'text': text,
'length': 1,
'options': [
{
'text': suggestion,
'freq': 1,
'score': 1.0
}
],
'offset': 0
}
]
return result_dict
def _find_match(self, index, doc_type, body):
searchable_indexes = self._normalize_index_to_list(index)
searchable_doc_types = self._normalize_doc_type_to_list(doc_type)
must = body['query']['bool']['must'][0]
matches = []
for searchable_index in searchable_indexes:
self.find_document_in_searchable_index(matches, must, searchable_doc_types, searchable_index)
return matches
def find_document_in_searchable_index(self, matches, must, searchable_doc_types, searchable_index):
for document in self.__documents_dict[searchable_index]:
if searchable_doc_types and document.get('_type') not in searchable_doc_types:
continue
if 'match_phrase' in must:
self.match_must_phrase(document, matches, must)
else:
matches.append(document)
@staticmethod
def match_must_phrase(document, matches, must):
for query_id in must['match_phrase']:
query_val = must['match_phrase'][query_id]
if query_id in document['_source']:
if query_val in document['_source'][query_id]:
matches.append(document)
def _normalize_index_to_list(self, index):
if index is None:
searchable_indexes = self.__documents_dict.keys()
elif isinstance(index, str):
searchable_indexes = [index]
elif isinstance(index, list):
searchable_indexes = index
else:
raise ValueError("Invalid param 'index'")
for searchable_index in searchable_indexes:
if searchable_index not in self.__documents_dict:
raise NotFoundError(404,
'IndexMissingException[[{0}] missing]'
.format(searchable_index))
return searchable_indexes
@staticmethod
def _normalize_doc_type_to_list(doc_type):
if doc_type is None:
searchable_doc_types = []
elif isinstance(doc_type, str):
searchable_doc_types = [doc_type]
elif isinstance(doc_type, list):
searchable_doc_types = doc_type
else:
raise ValueError("Invalid param 'index'")
return searchable_doc_types
| true | true |
f721a1a1b37e686e4f48a58bde1c7698c1b3c997 | 6,863 | py | Python | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | secret/gama/genetic_programming/compilers/scikitlearn.py | israel-cj/GAMA-GEISHA | 210101df0e280d5c2eb5d325fc26d551bba74ed6 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import logging
import os
import time
from typing import Callable, Tuple, Optional, Sequence
import stopit
from sklearn.base import TransformerMixin, is_classifier
from sklearn.model_selection import ShuffleSplit, cross_validate, check_cv
from sklearn.pipeline import Pipeline
from gama.utilities.evaluation_library import Evaluation
from gama.utilities.generic.stopwatch import Stopwatch
import numpy as np
from gama.utilities.metrics import Metric
from gama.genetic_programming.components import Individual, PrimitiveNode, Fitness
log = logging.getLogger(__name__)
def primitive_node_to_sklearn(primitive_node: PrimitiveNode) -> object:
hyperparameters = {
terminal.output: terminal.value for terminal in primitive_node._terminals
}
return primitive_node._primitive.identifier(**hyperparameters)
def compile_individual(
individual: Individual,
parameter_checks=None,
preprocessing_steps: Sequence[Tuple[str, TransformerMixin]] = None,
) -> Pipeline:
steps = [
(str(i), primitive_node_to_sklearn(primitive))
for i, primitive in enumerate(individual.primitives)
]
if preprocessing_steps:
steps = steps + list(reversed(preprocessing_steps))
return Pipeline(list(reversed(steps)))
def object_is_valid_pipeline(o):
""" Determines if object behaves like a scikit-learn pipeline. """
return (
o is not None
and hasattr(o, "fit")
and hasattr(o, "predict")
and hasattr(o, "steps")
)
def evaluate_pipeline(
pipeline, x, y_train, timeout: float, metrics: Tuple[Metric], cv=5, subsample=None,
) -> Tuple:
""" Score `pipeline` with k-fold CV according to `metrics` on (a subsample of) X, y
Returns
-------
Tuple:
prediction: np.ndarray if successful, None if not
scores: tuple with one float per metric, each value is -inf on fail.
estimators: list of fitted pipelines if successful, None if not
error: None if successful, otherwise an Exception
"""
if not object_is_valid_pipeline(pipeline):
raise TypeError(f"Pipeline must not be None and requires fit, predict, steps.")
if not timeout > 0:
raise ValueError(f"`timeout` must be greater than 0, is {timeout}.")
prediction, estimators = None, None
# default score for e.g. timeout or failure
scores = tuple([float("-inf")] * len(metrics))
with stopit.ThreadingTimeout(timeout) as c_mgr:
try:
if isinstance(subsample, int) and subsample < len(y_train):
sampler = ShuffleSplit(n_splits=1, train_size=subsample, random_state=0)
idx, _ = next(sampler.split(x))
x, y_train = x.iloc[idx, :], y_train[idx]
splitter = check_cv(cv, y_train, is_classifier(pipeline))
result = cross_validate(
pipeline,
x,
y_train,
cv=splitter,
return_estimator=True,
scoring=[m.name for m in metrics],
error_score="raise",
)
scores = tuple([np.mean(result[f"test_{m.name}"]) for m in metrics])
estimators = result["estimator"]
for (estimator, (_, test)) in zip(estimators, splitter.split(x, y_train)):
if any([m.requires_probabilities for m in metrics]):
fold_pred = estimator.predict_proba(x.iloc[test, :])
else:
fold_pred = estimator.predict(x.iloc[test, :])
if prediction is None:
if fold_pred.ndim == 2:
prediction = np.empty(shape=(len(y_train), fold_pred.shape[1]))
else:
prediction = np.empty(shape=(len(y_train),))
prediction[test] = fold_pred
# prediction, scores, estimators = cross_val_predict_score(
# pipeline, x, y_train, cv=cv, metrics=metrics
# )
except stopit.TimeoutException:
# This exception is handled by the ThreadingTimeout context manager.
raise
except KeyboardInterrupt:
raise
except Exception as e:
return prediction, scores, estimators, e
if c_mgr.state == c_mgr.INTERRUPTED:
# A TimeoutException was raised, but not by the context manager.
# This indicates that the outer context manager (the ea) timed out.
raise stopit.utils.TimeoutException()
if not c_mgr:
# For now we treat an eval timeout the same way as
# e.g. NaN exceptions and use the default score.
return prediction, scores, estimators, stopit.TimeoutException()
return prediction, tuple(scores), estimators, None
def evaluate_individual(
individual: Individual,
evaluate_pipeline: Callable,
timeout: float = 1e6,
deadline: Optional[float] = None,
add_length_to_score: bool = True,
**kwargs,
) -> Evaluation:
""" Evaluate the pipeline specified by individual, and record
Parameters
----------
individual: Individual
Blueprint for the pipeline to evaluate.
evaluate_pipeline: Callable
Function which takes the pipeline and produces validation predictions,
scores, estimators and errors.
timeout: float (default=1e6)
Maximum time in seconds that the evaluation is allowed to take.
Don't depend on high accuracy.
A shorter timeout is imposed if `deadline` is in less than `timeout` seconds.
deadline: float, optional
A time in seconds since epoch.
Cut off evaluation at `deadline` even if `timeout` seconds have not yet elapsed.
add_length_to_score: bool (default=True)
Add the length of the individual to the score result of the evaluation.
**kwargs: Dict, optional (default=None)
Passed to `evaluate_pipeline` function.
Returns
-------
Evaluation
"""
result = Evaluation(individual, pid=os.getpid())
result.start_time = datetime.now()
if deadline is not None:
time_to_deadline = deadline - time.time()
timeout = min(timeout, time_to_deadline)
with Stopwatch() as wall_time, Stopwatch(time.process_time) as process_time:
evaluation = evaluate_pipeline(individual.pipeline, timeout=timeout, **kwargs)
result._predictions, result.score, result._estimators, error = evaluation
if error is not None:
result.error = f"{type(error)} {str(error)}"
result.duration = wall_time.elapsed_time
if add_length_to_score:
result.score = result.score + (-len(individual.primitives),)
individual.fitness = Fitness(
result.score,
result.start_time,
wall_time.elapsed_time,
process_time.elapsed_time,
)
return result | 37.298913 | 88 | 0.652047 | from datetime import datetime
import logging
import os
import time
from typing import Callable, Tuple, Optional, Sequence
import stopit
from sklearn.base import TransformerMixin, is_classifier
from sklearn.model_selection import ShuffleSplit, cross_validate, check_cv
from sklearn.pipeline import Pipeline
from gama.utilities.evaluation_library import Evaluation
from gama.utilities.generic.stopwatch import Stopwatch
import numpy as np
from gama.utilities.metrics import Metric
from gama.genetic_programming.components import Individual, PrimitiveNode, Fitness
log = logging.getLogger(__name__)
def primitive_node_to_sklearn(primitive_node: PrimitiveNode) -> object:
hyperparameters = {
terminal.output: terminal.value for terminal in primitive_node._terminals
}
return primitive_node._primitive.identifier(**hyperparameters)
def compile_individual(
individual: Individual,
parameter_checks=None,
preprocessing_steps: Sequence[Tuple[str, TransformerMixin]] = None,
) -> Pipeline:
steps = [
(str(i), primitive_node_to_sklearn(primitive))
for i, primitive in enumerate(individual.primitives)
]
if preprocessing_steps:
steps = steps + list(reversed(preprocessing_steps))
return Pipeline(list(reversed(steps)))
def object_is_valid_pipeline(o):
return (
o is not None
and hasattr(o, "fit")
and hasattr(o, "predict")
and hasattr(o, "steps")
)
def evaluate_pipeline(
pipeline, x, y_train, timeout: float, metrics: Tuple[Metric], cv=5, subsample=None,
) -> Tuple:
if not object_is_valid_pipeline(pipeline):
raise TypeError(f"Pipeline must not be None and requires fit, predict, steps.")
if not timeout > 0:
raise ValueError(f"`timeout` must be greater than 0, is {timeout}.")
prediction, estimators = None, None
scores = tuple([float("-inf")] * len(metrics))
with stopit.ThreadingTimeout(timeout) as c_mgr:
try:
if isinstance(subsample, int) and subsample < len(y_train):
sampler = ShuffleSplit(n_splits=1, train_size=subsample, random_state=0)
idx, _ = next(sampler.split(x))
x, y_train = x.iloc[idx, :], y_train[idx]
splitter = check_cv(cv, y_train, is_classifier(pipeline))
result = cross_validate(
pipeline,
x,
y_train,
cv=splitter,
return_estimator=True,
scoring=[m.name for m in metrics],
error_score="raise",
)
scores = tuple([np.mean(result[f"test_{m.name}"]) for m in metrics])
estimators = result["estimator"]
for (estimator, (_, test)) in zip(estimators, splitter.split(x, y_train)):
if any([m.requires_probabilities for m in metrics]):
fold_pred = estimator.predict_proba(x.iloc[test, :])
else:
fold_pred = estimator.predict(x.iloc[test, :])
if prediction is None:
if fold_pred.ndim == 2:
prediction = np.empty(shape=(len(y_train), fold_pred.shape[1]))
else:
prediction = np.empty(shape=(len(y_train),))
prediction[test] = fold_pred
except stopit.TimeoutException:
raise
except KeyboardInterrupt:
raise
except Exception as e:
return prediction, scores, estimators, e
if c_mgr.state == c_mgr.INTERRUPTED:
raise stopit.utils.TimeoutException()
if not c_mgr:
return prediction, scores, estimators, stopit.TimeoutException()
return prediction, tuple(scores), estimators, None
def evaluate_individual(
individual: Individual,
evaluate_pipeline: Callable,
timeout: float = 1e6,
deadline: Optional[float] = None,
add_length_to_score: bool = True,
**kwargs,
) -> Evaluation:
result = Evaluation(individual, pid=os.getpid())
result.start_time = datetime.now()
if deadline is not None:
time_to_deadline = deadline - time.time()
timeout = min(timeout, time_to_deadline)
with Stopwatch() as wall_time, Stopwatch(time.process_time) as process_time:
evaluation = evaluate_pipeline(individual.pipeline, timeout=timeout, **kwargs)
result._predictions, result.score, result._estimators, error = evaluation
if error is not None:
result.error = f"{type(error)} {str(error)}"
result.duration = wall_time.elapsed_time
if add_length_to_score:
result.score = result.score + (-len(individual.primitives),)
individual.fitness = Fitness(
result.score,
result.start_time,
wall_time.elapsed_time,
process_time.elapsed_time,
)
return result | true | true |
f721a1be56454def41dd34025c62ee217a56159a | 70,696 | py | Python | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 5 | 2022-01-05T00:41:46.000Z | 2022-03-21T07:22:58.000Z | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 25 | 2021-04-17T09:26:47.000Z | 2022-01-02T20:06:55.000Z | venv/Lib/site-packages/networkx/algorithms/shortest_paths/weighted.py | amelliaaas/tugastkc4 | f442382c72379e911f3780543b95345a3b1c9407 | [
"Apache-2.0"
] | 20 | 2021-11-07T13:55:56.000Z | 2021-12-02T10:54:01.000Z | """
Shortest path algorithms for weighed graphs.
"""
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors
__all__ = [
"dijkstra_path",
"dijkstra_path_length",
"bidirectional_dijkstra",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"bellman_ford_path",
"bellman_ford_path_length",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"negative_edge_cycle",
"goldberg_radzik",
"johnson",
]
def _weight_function(G, weight):
"""Returns a function that returns the weight of an edge.
The returned function is specifically suitable for input to
functions :func:`_dijkstra` and :func:`_bellman_ford_relaxation`.
Parameters
----------
G : NetworkX graph.
weight : string or function
If it is callable, `weight` itself is returned. If it is a string,
it is assumed to be the name of the edge attribute that represents
the weight of an edge. In that case, a function is returned that
gets the edge weight according to the specified edge attribute.
Returns
-------
function
This function returns a callable that accepts exactly three inputs:
a node, an node adjacent to the first one, and the edge attribute
dictionary for the eedge joining those nodes. That function returns
a number representing the weight of an edge.
If `G` is a multigraph, and `weight` is not callable, the
minimum edge weight over all parallel edges is returned. If any edge
does not have an attribute with key `weight`, it is assumed to
have weight one.
"""
if callable(weight):
return weight
# If the weight keyword argument is not callable, we assume it is a
# string representing the edge attribute containing the weight of
# the edge.
if G.is_multigraph():
return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())
return lambda u, v, data: data.get(weight, 1)
def dijkstra_path(G, source, target, weight="weight"):
"""Returns the shortest weighted path from source to target in G.
Uses Dijkstra's Method to compute the shortest weighted path
between two nodes in a graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.dijkstra_path(G, 0, 4))
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
The weight function can be used to include node weights.
>>> def func(u, v, d):
... node_u_wt = G.nodes[u].get("node_weight", 1)
... node_v_wt = G.nodes[v].get("node_weight", 1)
... edge_wt = d.get("weight", 1)
... return node_u_wt / 2 + node_v_wt / 2 + edge_wt
In this example we take the average of start and end node
weights of an edge and add it to the weight of the edge.
The function :func:`single_source_dijkstra` computes both
path and length-of-path if you need both, use that.
See Also
--------
bidirectional_dijkstra
bellman_ford_path
single_source_dijkstra
"""
(length, path) = single_source_dijkstra(G, source, target=target, weight=weight)
return path
def dijkstra_path_length(G, source, target, weight="weight"):
"""Returns the shortest weighted path length in G from source to target.
Uses Dijkstra's Method to compute the shortest weighted path length
between two nodes in a graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : number
Shortest path length.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.dijkstra_path_length(G, 0, 4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
The function :func:`single_source_dijkstra` computes both
path and length-of-path if you need both, use that.
See Also
--------
bidirectional_dijkstra
bellman_ford_path_length
single_source_dijkstra
"""
if source == target:
return 0
weight = _weight_function(G, weight)
length = _dijkstra(G, source, weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from e
def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"):
"""Find shortest weighted paths in G from a source node.
Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_dijkstra_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"):
"""Find shortest weighted path lengths in G from a source node.
Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : dict
Dict keyed by node to shortest path length from source.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.single_source_dijkstra_path_length(G, 0)
>>> length[4]
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
See Also
--------
single_source_dijkstra, single_source_bellman_ford_path_length
"""
return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths from a source node.
Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Uses Dijkstra's algorithm to compute shortest paths and lengths
between a source and all other reachable nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list.
If target is None, paths and lengths to all nodes are computed.
The return value is a tuple of two dictionaries keyed by target nodes.
The first dictionary stores distance to each target node.
The second stores the path to each target node.
If target is not None, returns a tuple (distance, path), where
distance is the distance from source to target and path is a list
representing the path from source to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.single_source_dijkstra(G, 0)
>>> print(length[4])
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
>>> path[4]
[0, 1, 2, 3, 4]
>>> length, path = nx.single_source_dijkstra(G, 0, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Based on the Python cookbook recipe (119466) at
https://code.activestate.com/recipes/119466/
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
single_source_dijkstra_path
single_source_dijkstra_path_length
single_source_bellman_ford
"""
return multi_source_dijkstra(
G, {source}, cutoff=cutoff, target=target, weight=weight
)
def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"):
"""Find shortest weighted paths in G from a given set of source
nodes.
Compute shortest path between any of the source nodes and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
paths : dictionary
Dictionary of shortest paths keyed by target.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.multi_source_dijkstra_path(G, {0, 4})
>>> path[1]
[0, 1]
>>> path[3]
[4, 3]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra, multi_source_bellman_ford
"""
length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight)
return path
def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"):
"""Find shortest weighted path lengths in G from a given set of
source nodes.
Compute the shortest path length between any of the source nodes and
all other reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length : dict
Dict keyed by node to shortest path length to nearest source.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = nx.multi_source_dijkstra_path_length(G, {0, 4})
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 1
4: 0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra
"""
if not sources:
raise ValueError("sources must not be empty")
weight = _weight_function(G, weight)
return _dijkstra_multisource(G, sources, weight, cutoff=cutoff)
def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths from a given set of
source nodes.
Uses Dijkstra's algorithm to compute the shortest paths and lengths
between one of the source nodes and the given `target`, or all other
reachable nodes if not specified, for a weighted graph.
Parameters
----------
G : NetworkX graph
sources : non-empty set of nodes
Starting nodes for paths. If this is just a set containing a
single node, then all paths computed by this function will start
from that node. If there are two or more nodes in the set, the
computed paths may begin from any one of the start nodes.
target : node label, optional
Ending node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list
If target is None, returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from one of the source nodes.
The second stores the path from one of the sources to that node.
If target is not None, returns a tuple of (distance, path) where
distance is the distance from source to target and path is a list
representing the path from source to target.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.multi_source_dijkstra(G, {0, 4})
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 1
4: 0
>>> path[1]
[0, 1]
>>> path[3]
[4, 3]
>>> length, path = nx.multi_source_dijkstra(G, {0, 4}, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The weight function can be used to hide edges by returning None.
So ``weight = lambda u, v, d: 1 if d['color']=="red" else None``
will find the shortest red path.
Based on the Python cookbook recipe (119466) at
https://code.activestate.com/recipes/119466/
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
Raises
------
ValueError
If `sources` is empty.
NodeNotFound
If any of `sources` is not in `G`.
See Also
--------
multi_source_dijkstra_path
multi_source_dijkstra_path_length
"""
if not sources:
raise ValueError("sources must not be empty")
if target in sources:
return (0, [target])
weight = _weight_function(G, weight)
paths = {source: [source] for source in sources} # dictionary of paths
dist = _dijkstra_multisource(
G, sources, weight, paths=paths, cutoff=cutoff, target=target
)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
raise nx.NetworkXNoPath(f"No path to {target}.") from e
def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None):
"""Uses Dijkstra's algorithm to find shortest weighted paths from a
single source.
This is a convenience function for :func:`_dijkstra_multisource`
with all the arguments the same, except the keyword argument
`sources` set to ``[source]``.
"""
return _dijkstra_multisource(
G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target
)
def _dijkstra_multisource(
G, sources, weight, pred=None, paths=None, cutoff=None, target=None
):
"""Uses Dijkstra's algorithm to find shortest weighted paths
Parameters
----------
G : NetworkX graph
sources : non-empty iterable of nodes
Starting nodes for paths. If this is just an iterable containing
a single node, then all paths computed by this function will
start from that node. If there are two or more nodes in this
iterable, the computed paths may begin from any one of the start
nodes.
weight: function
Function with (u, v, data) input that returns that edges weight
pred: dict of lists, optional(default=None)
dict to store a list of predecessors keyed by that node
If None, predecessors are not stored.
paths: dict, optional (default=None)
dict to store the path list from source to each node, keyed by node.
If None, paths are not stored.
target : node label, optional
Ending node for path. Search is halted when target is found.
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
Returns
-------
distance : dictionary
A mapping from node to shortest distance to that node from one
of the source nodes.
Raises
------
NodeNotFound
If any of `sources` is not in `G`.
Notes
-----
The optional predecessor and path dictionaries can be accessed by
the caller through the original pred and paths objects passed
as arguments. No need to explicitly return pred or paths.
"""
G_succ = G._succ if G.is_directed() else G._adj
push = heappush
pop = heappop
dist = {} # dictionary of final distances
seen = {}
# fringe is heapq with 3-tuples (distance,c,node)
# use the count c to avoid comparing nodes (may not be able to)
c = count()
fringe = []
for source in sources:
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
seen[source] = 0
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue # already searched this node.
dist[v] = d
if v == target:
break
for u, e in G_succ[v].items():
cost = weight(v, u, e)
if cost is None:
continue
vu_dist = dist[v] + cost
if cutoff is not None:
if vu_dist > cutoff:
continue
if u in dist:
u_dist = dist[u]
if vu_dist < u_dist:
raise ValueError("Contradictory paths found:", "negative weights?")
elif pred is not None and vu_dist == u_dist:
pred[u].append(v)
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
if paths is not None:
paths[u] = paths[v] + [u]
if pred is not None:
pred[u] = [v]
elif vu_dist == seen[u]:
if pred is not None:
pred[u].append(v)
# The optional predecessor and path dictionaries can be accessed
# by the caller via the pred and paths objects passed as arguments.
return dist
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"):
"""Compute weighted shortest path length and predecessors.
Uses Dijkstra's Method to obtain the shortest weighted paths
and return dictionaries of predecessors for each node and
distance for each node from the `source`.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
pred, distance : dictionaries
Returns two dictionaries representing a list of predecessors
of a node and the distance to each node.
Warning: If target is specified, the dicts are incomplete as they
only contain information for the nodes along a path to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The list of predecessors contains more than one element only when
there are more than one shortest paths to the key node.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.dijkstra_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0])]
>>> sorted(dist.items())
[(0, 0), (1, 1)]
"""
weight = _weight_function(G, weight)
pred = {source: []} # dictionary of predecessors
return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))
def all_pairs_dijkstra(G, cutoff=None, weight="weight"):
"""Find shortest weighted paths and lengths between all nodes.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edge[u][v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Yields
------
(node, (distance, path)) : (node obj, (dict, dict))
Each source node has two associated dicts. The first holds distance
keyed by target and the second holds paths keyed by target.
(See single_source_dijkstra for the source/target node terminology.)
If desired you can apply `dict()` to this function to create a dict
keyed by source node to the two dicts.
Examples
--------
>>> G = nx.path_graph(5)
>>> len_path = dict(nx.all_pairs_dijkstra(G))
>>> print(len_path[3][0][1])
2
>>> for node in [0, 1, 2, 3, 4]:
... print(f"3 - {node}: {len_path[3][0][node]}")
3 - 0: 3
3 - 1: 2
3 - 2: 1
3 - 3: 0
3 - 4: 1
>>> len_path[3][1][1]
[3, 2, 1]
>>> for n, (dist, path) in nx.all_pairs_dijkstra(G):
... print(path[1])
[0, 1]
[1]
[2, 1]
[3, 2, 1]
[4, 3, 2, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The yielded dicts only have keys for reachable nodes.
"""
for n in G:
dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight)
yield (n, (dist, path))
def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"):
"""Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_dijkstra_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
length = single_source_dijkstra_path_length
for n in G:
yield (n, length(G, n, cutoff=cutoff, weight=weight))
def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"):
"""Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
cutoff : integer or float, optional
Length (sum of edge weights) at which the search is stopped.
If cutoff is provided, only return paths with summed weight <= cutoff.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_dijkstra_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_bellman_ford_path
"""
path = single_source_dijkstra_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, cutoff=cutoff, weight=weight))
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> pred, dist = nx.bellman_ford_predecessor_and_distance(G, 0, 1)
>>> sorted(pred.items())
[(0, []), (1, [0]), (2, [1]), (3, [2]), (4, [3])]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.bellman_ford_predecessor_and_distance(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cost cycle detected.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
In NetworkX v2.1 and prior, the source node had predecessor `[None]`.
In NetworkX v2.2 this changed to the source node having predecessor `[]`
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
def _bellman_ford(
G, source, weight, pred=None, paths=None, dist=None, target=None, heuristic=True
):
"""Relaxation loop for Bellman–Ford algorithm.
This is an implementation of the SPFA variant.
See https://en.wikipedia.org/wiki/Shortest_Path_Faster_Algorithm
Parameters
----------
G : NetworkX graph
source: list
List of source nodes. The shortest path from any of the source
nodes will be found if multiple sources are provided.
weight : function
The weight of an edge is the value returned by the function. The
function must accept exactly three positional arguments: the two
endpoints of an edge and the dictionary of edge attributes for
that edge. The function must return a number.
pred: dict of lists, optional (default=None)
dict to store a list of predecessors keyed by that node
If None, predecessors are not stored
paths: dict, optional (default=None)
dict to store the path list from source to each node, keyed by node
If None, paths are not stored
dist: dict, optional (default=None)
dict to store distance from source to the keyed node
If None, returned dist dict contents default to 0 for every node in the
source list
target: node label, optional
Ending node for path. Path lengths to other destinations may (and
probably will) be incorrect.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a hopefully negligible cost.
Returns
-------
Returns a dict keyed by node to the distance from the source.
Dicts for paths and pred are in the mutated input dicts by those names.
Raises
------
NodeNotFound
If any of `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle
"""
for s in source:
if s not in G:
raise nx.NodeNotFound(f"Source {s} not in G")
if pred is None:
pred = {v: [] for v in source}
if dist is None:
dist = {v: 0 for v in source}
# Heuristic Storage setup. Note: use None because nodes cannot be None
nonexistent_edge = (None, None)
pred_edge = {v: None for v in source}
recent_update = {v: nonexistent_edge for v in source}
G_succ = G.succ if G.is_directed() else G.adj
inf = float("inf")
n = len(G)
count = {}
q = deque(source)
in_q = set(source)
while q:
u = q.popleft()
in_q.remove(u)
# Skip relaxations if any of the predecessors of u is in the queue.
if all(pred_u not in in_q for pred_u in pred[u]):
dist_u = dist[u]
for v, e in G_succ[u].items():
dist_v = dist_u + weight(u, v, e)
if dist_v < dist.get(v, inf):
# In this conditional branch we are updating the path with v.
# If it happens that some earlier update also added node v
# that implies the existence of a negative cycle since
# after the update node v would lie on the update path twice.
# The update path is stored up to one of the source nodes,
# therefore u is always in the dict recent_update
if heuristic:
if v in recent_update[u]:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
# Transfer the recent update info from u to v if the
# same source node is the head of the update path.
# If the source node is responsible for the cost update,
# then clear the history and use it instead.
if v in pred_edge and pred_edge[v] == u:
recent_update[v] = recent_update[u]
else:
recent_update[v] = (u, v)
if v not in in_q:
q.append(v)
in_q.add(v)
count_v = count.get(v, 0) + 1
if count_v == n:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
count[v] = count_v
dist[v] = dist_v
pred[v] = [u]
pred_edge[v] = u
elif dist.get(v) is not None and dist_v == dist.get(v):
pred[v].append(u)
if paths is not None:
sources = set(source)
dsts = [target] if target is not None else pred
for dst in dsts:
gen = _build_paths_from_predecessors(sources, dst, pred)
paths[dst] = next(gen)
return dist
def bellman_ford_path(G, source, target, weight="weight"):
"""Returns the shortest path from source to target in a weighted graph G.
Parameters
----------
G : NetworkX graph
source : node
Starting node
target : node
Ending node
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
path : list
List of nodes in a shortest path.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.bellman_ford_path(G, 0, 4))
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
dijkstra_path, bellman_ford_path_length
"""
length, path = single_source_bellman_ford(G, source, target=target, weight=weight)
return path
def bellman_ford_path_length(G, source, target, weight="weight"):
"""Returns the shortest path length from source to target
in a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
starting node for path
target : node label
ending node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
length : number
Shortest path length.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.bellman_ford_path_length(G, 0, 4))
4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
dijkstra_path_length, bellman_ford_path
"""
if source == target:
return 0
weight = _weight_function(G, weight)
length = _bellman_ford(G, [source], weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from e
def single_source_bellman_ford_path(G, source, weight="weight"):
"""Compute shortest path between source and all other reachable
nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node
Starting node for path.
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
paths : dictionary
Dictionary of shortest path lengths keyed by target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = nx.single_source_bellman_ford_path(G, 0)
>>> path[4]
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
(length, path) = single_source_bellman_ford(G, source, weight=weight)
return path
def single_source_bellman_ford_path_length(G, source, weight="weight"):
"""Compute the shortest path length between source and all other
reachable nodes for a weighted graph.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight.
Returns
-------
length : iterator
(target, shortest path length) iterator
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.single_source_bellman_ford_path_length(G, 0))
>>> length[4]
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra, single_source_bellman_ford
"""
weight = _weight_function(G, weight)
return _bellman_ford(G, [source], weight)
def single_source_bellman_ford(G, source, target=None, weight="weight"):
"""Compute shortest paths and lengths in a weighted graph G.
Uses Bellman-Ford algorithm for shortest paths.
Parameters
----------
G : NetworkX graph
source : node label
Starting node for path
target : node label, optional
Ending node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance, path : pair of dictionaries, or numeric and list
If target is None, returns a tuple of two dictionaries keyed by node.
The first dictionary stores distance from one of the source nodes.
The second stores the path from one of the sources to that node.
If target is not None, returns a tuple of (distance, path) where
distance is the distance from source to target and path is a list
representing the path from source to target.
Raises
------
NodeNotFound
If `source` is not in `G`.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.single_source_bellman_ford(G, 0)
>>> print(length[4])
4
>>> for node in [0, 1, 2, 3, 4]:
... print(f"{node}: {length[node]}")
0: 0
1: 1
2: 2
3: 3
4: 4
>>> path[4]
[0, 1, 2, 3, 4]
>>> length, path = nx.single_source_bellman_ford(G, 0, 1)
>>> length
1
>>> path
[0, 1]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
single_source_dijkstra
single_source_bellman_ford_path
single_source_bellman_ford_path_length
"""
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
paths = {source: [source]} # dictionary of paths
dist = _bellman_ford(G, [source], weight, paths=paths, target=target)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
msg = f"Node {target} not reachable from {source}"
raise nx.NetworkXNoPath(msg) from e
def all_pairs_bellman_ford_path_length(G, weight="weight"):
"""Compute shortest path lengths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
distance : iterator
(source, dictionary) iterator with dictionary keyed by target and
shortest path length as the key value.
Examples
--------
>>> G = nx.path_graph(5)
>>> length = dict(nx.all_pairs_bellman_ford_path_length(G))
>>> for node in [0, 1, 2, 3, 4]:
... print(f"1 - {node}: {length[1][node]}")
1 - 0: 1
1 - 1: 0
1 - 2: 1
1 - 3: 2
1 - 4: 3
>>> length[3][2]
1
>>> length[2][2]
0
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionary returned only has keys for reachable node pairs.
"""
length = single_source_bellman_ford_path_length
for n in G:
yield (n, dict(length(G, n, weight=weight)))
def all_pairs_bellman_ford_path(G, weight="weight"):
"""Compute shortest paths between all nodes in a weighted graph.
Parameters
----------
G : NetworkX graph
weight: string, optional (default='weight')
Edge data key corresponding to the edge weight
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Examples
--------
>>> G = nx.path_graph(5)
>>> path = dict(nx.all_pairs_bellman_ford_path(G))
>>> print(path[0][4])
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
See Also
--------
floyd_warshall, all_pairs_dijkstra_path
"""
path = single_source_bellman_ford_path
# TODO This can be trivially parallelized.
for n in G:
yield (n, path(G, n, weight=weight))
def goldberg_radzik(G, source, weight="weight"):
"""Compute shortest path lengths and predecessors on shortest paths
in weighted graphs.
The algorithm has a running time of $O(mn)$ where $n$ is the number of
nodes and $m$ is the number of edges. It is slower than Dijkstra but
can handle negative edge weights.
Parameters
----------
G : NetworkX graph
The algorithm works for all types of graphs, including directed
graphs and multigraphs.
source: node label
Starting node for path
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
pred, dist : dictionaries
Returns two dictionaries keyed by node to predecessor in the
path and to the distance from the source respectively.
Raises
------
NodeNotFound
If `source` is not in `G`.
NetworkXUnbounded
If the (di)graph contains a negative cost (di)cycle, the
algorithm raises an exception to indicate the presence of the
negative cost (di)cycle. Note: any negative weight edge in an
undirected graph is a negative cost cycle.
Examples
--------
>>> G = nx.path_graph(5, create_using=nx.DiGraph())
>>> pred, dist = nx.goldberg_radzik(G, 0)
>>> sorted(pred.items())
[(0, None), (1, 0), (2, 1), (3, 2), (4, 3)]
>>> sorted(dist.items())
[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4)]
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> G[1][2]["weight"] = -7
>>> nx.goldberg_radzik(G, 0)
Traceback (most recent call last):
...
networkx.exception.NetworkXUnbounded: Negative cost cycle detected.
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
The dictionaries returned only have keys for nodes reachable from
the source.
In the case where the (di)graph is not connected, if a component
not containing the source contains a negative cost (di)cycle, it
will not be detected.
"""
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if len(G) == 1:
return {source: None}, {source: 0}
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float("inf")
d = {u: inf for u in G}
d[source] = 0
pred = {source: None}
def topo_sort(relabeled):
"""Topologically sort nodes relabeled in the previous round and detect
negative cycles.
"""
# List of nodes to scan in this round. Denoted by A in Goldberg and
# Radzik's paper.
to_scan = []
# In the DFS in the loop below, neg_count records for each node the
# number of edges of negative reduced costs on the path from a DFS root
# to the node in the DFS forest. The reduced cost of an edge (u, v) is
# defined as d[u] + weight[u][v] - d[v].
#
# neg_count also doubles as the DFS visit marker array.
neg_count = {}
for u in relabeled:
# Skip visited nodes.
if u in neg_count:
continue
d_u = d[u]
# Skip nodes without out-edges of negative reduced costs.
if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()):
continue
# Nonrecursive DFS that inserts nodes reachable from u via edges of
# nonpositive reduced costs into to_scan in (reverse) topological
# order.
stack = [(u, iter(G_succ[u].items()))]
in_stack = {u}
neg_count[u] = 0
while stack:
u, it = stack[-1]
try:
v, e = next(it)
except StopIteration:
to_scan.append(u)
stack.pop()
in_stack.remove(u)
continue
t = d[u] + weight(u, v, e)
d_v = d[v]
if t <= d_v:
is_neg = t < d_v
d[v] = t
pred[v] = u
if v not in neg_count:
neg_count[v] = neg_count[u] + int(is_neg)
stack.append((v, iter(G_succ[v].items())))
in_stack.add(v)
elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]:
# (u, v) is a back edge, and the cycle formed by the
# path v to u and (u, v) contains at least one edge of
# negative reduced cost. The cycle must be of negative
# cost.
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
to_scan.reverse()
return to_scan
def relax(to_scan):
"""Relax out-edges of relabeled nodes."""
relabeled = set()
# Scan nodes in to_scan in topological order and relax incident
# out-edges. Add the relabled nodes to labeled.
for u in to_scan:
d_u = d[u]
for v, e in G_succ[u].items():
w_e = weight(u, v, e)
if d_u + w_e < d[v]:
d[v] = d_u + w_e
pred[v] = u
relabeled.add(v)
return relabeled
# Set of nodes relabled in the last round of scan operations. Denoted by B
# in Goldberg and Radzik's paper.
relabeled = {source}
while relabeled:
to_scan = topo_sort(relabeled)
relabeled = relax(to_scan)
d = {u: d[u] for u in pred}
return pred, d
def negative_edge_cycle(G, weight="weight", heuristic=True):
"""Returns True if there exists a negative edge cycle anywhere in G.
Parameters
----------
G : NetworkX graph
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
heuristic : bool
Determines whether to use a heuristic to early detect negative
cycles at a negligible cost. In case of graphs with a negative cycle,
the performance of detection increases by at least an order of magnitude.
Returns
-------
negative_cycle : bool
True if a negative edge cycle exists, otherwise False.
Examples
--------
>>> G = nx.cycle_graph(5, create_using=nx.DiGraph())
>>> print(nx.negative_edge_cycle(G))
False
>>> G[1][2]["weight"] = -7
>>> print(nx.negative_edge_cycle(G))
True
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
This algorithm uses bellman_ford_predecessor_and_distance() but finds
negative cycles on any component by first adding a new node connected to
every node, and starting bellman_ford_predecessor_and_distance on that
node. It then removes that extra node.
"""
# find unused node to use temporarily
newnode = -1
while newnode in G:
newnode -= 1
# connect it to all nodes
G.add_edges_from([(newnode, n) for n in G])
try:
bellman_ford_predecessor_and_distance(
G, newnode, weight=weight, heuristic=heuristic
)
except nx.NetworkXUnbounded:
return True
finally:
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight="weight"):
r"""Dijkstra's algorithm for shortest paths using bidirectional search.
Parameters
----------
G : NetworkX graph
source : node
Starting node.
target : node
Ending node.
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
length, path : number and list
length is the distance from source to target.
path is a list of nodes on a path from source to target.
Raises
------
NodeNotFound
If either `source` or `target` is not in `G`.
NetworkXNoPath
If no path exists between source and target.
Examples
--------
>>> G = nx.path_graph(5)
>>> length, path = nx.bidirectional_dijkstra(G, 0, 4)
>>> print(length)
4
>>> print(path)
[0, 1, 2, 3, 4]
Notes
-----
Edge weight attributes must be numerical.
Distances are calculated as sums of weighted edges traversed.
In practice bidirectional Dijkstra is much more than twice as fast as
ordinary Dijkstra.
Ordinary Dijkstra expands nodes in a sphere-like manner from the
source. The radius of this sphere will eventually be the length
of the shortest path. Bidirectional Dijkstra will expand nodes
from both the source and the target, making two spheres of half
this radius. Volume of the first sphere is `\pi*r*r` while the
others are `2*\pi*r/2*r/2`, making up half the volume.
This algorithm is not guaranteed to work if edge weights
are negative or are floating point numbers
(overflows and roundoff errors can cause problems).
See Also
--------
shortest_path
shortest_path_length
"""
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
push = heappush
pop = heappop
# Init: [Forward, Backward]
dists = [{}, {}] # dictionary of final distances
paths = [{source: [source]}, {target: [target]}] # dictionary of paths
fringe = [[], []] # heap of (distance, node) for choosing node to expand
seen = [{source: 0}, {target: 0}] # dict of distances to seen nodes
c = count()
# initialize fringe heap
push(fringe[0], (0, next(c), source))
push(fringe[1], (0, next(c), target))
# neighs for extracting correct neighbor information
if G.is_directed():
neighs = [G._succ, G._pred]
else:
neighs = [G._adj, G._adj]
# variables to hold shortest discovered path
# finaldist = 1e30000
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
# choose direction
# dir == 0 is forward direction and dir == 1 is back
dir = 1 - dir
# extract closest to expand
(dist, _, v) = pop(fringe[dir])
if v in dists[dir]:
# Shortest path to v has already been found
continue
# update distance
dists[dir][v] = dist # equal to seen[dir][v]
if v in dists[1 - dir]:
# if we have scanned v in both directions we are done
# we have now discovered the shortest path
return (finaldist, finalpath)
for w, d in neighs[dir][v].items():
if dir == 0: # forward
vwLength = dists[dir][v] + weight(v, w, d)
else: # back, must remember to change v,w->w,v
vwLength = dists[dir][v] + weight(w, v, d)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
# relaxing
seen[dir][w] = vwLength
push(fringe[dir], (vwLength, next(c), w))
paths[dir][w] = paths[dir][v] + [w]
if w in seen[0] and w in seen[1]:
# see if this path is better than the already
# discovered shortest path
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def johnson(G, weight="weight"):
r"""Uses Johnson's Algorithm to compute shortest paths.
Johnson's Algorithm finds a shortest path between each pair of
nodes in a weighted graph even if negative weights are present.
Parameters
----------
G : NetworkX graph
weight : string or function
If this is a string, then edge weights will be accessed via the
edge attribute with this key (that is, the weight of the edge
joining `u` to `v` will be ``G.edges[u, v][weight]``). If no
such edge attribute exists, the weight of the edge is assumed to
be one.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly three
positional arguments: the two endpoints of an edge and the
dictionary of edge attributes for that edge. The function must
return a number.
Returns
-------
distance : dictionary
Dictionary, keyed by source and target, of shortest paths.
Raises
------
NetworkXError
If given graph is not weighted.
Examples
--------
>>> graph = nx.DiGraph()
>>> graph.add_weighted_edges_from(
... [("0", "3", 3), ("0", "1", -5), ("0", "2", 2), ("1", "2", 4), ("2", "3", 1)]
... )
>>> paths = nx.johnson(graph, weight="weight")
>>> paths["0"]["2"]
['0', '1', '2']
Notes
-----
Johnson's algorithm is suitable even for graphs with negative weights. It
works by using the Bellman–Ford algorithm to compute a transformation of
the input graph that removes all negative weights, allowing Dijkstra's
algorithm to be used on the transformed graph.
The time complexity of this algorithm is $O(n^2 \log n + n m)$,
where $n$ is the number of nodes and $m$ the number of edges in the
graph. For dense graphs, this may be faster than the Floyd–Warshall
algorithm.
See Also
--------
floyd_warshall_predecessor_and_distance
floyd_warshall_numpy
all_pairs_shortest_path
all_pairs_shortest_path_length
all_pairs_dijkstra_path
bellman_ford_predecessor_and_distance
all_pairs_bellman_ford_path
all_pairs_bellman_ford_path_length
"""
if not nx.is_weighted(G, weight=weight):
raise nx.NetworkXError("Graph is not weighted.")
dist = {v: 0 for v in G}
pred = {v: [] for v in G}
weight = _weight_function(G, weight)
# Calculate distance of shortest paths
dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist)
# Update the weight function to take into account the Bellman--Ford
# relaxation distances.
def new_weight(u, v, d):
return weight(u, v, d) + dist_bellman[u] - dist_bellman[v]
def dist_path(v):
paths = {v: [v]}
_dijkstra(G, v, new_weight, paths=paths)
return paths
return {v: dist_path(v) for v in G}
| 32.018116 | 88 | 0.620445 |
from collections import deque
from heapq import heappush, heappop
from itertools import count
import networkx as nx
from networkx.algorithms.shortest_paths.generic import _build_paths_from_predecessors
__all__ = [
"dijkstra_path",
"dijkstra_path_length",
"bidirectional_dijkstra",
"single_source_dijkstra",
"single_source_dijkstra_path",
"single_source_dijkstra_path_length",
"multi_source_dijkstra",
"multi_source_dijkstra_path",
"multi_source_dijkstra_path_length",
"all_pairs_dijkstra",
"all_pairs_dijkstra_path",
"all_pairs_dijkstra_path_length",
"dijkstra_predecessor_and_distance",
"bellman_ford_path",
"bellman_ford_path_length",
"single_source_bellman_ford",
"single_source_bellman_ford_path",
"single_source_bellman_ford_path_length",
"all_pairs_bellman_ford_path",
"all_pairs_bellman_ford_path_length",
"bellman_ford_predecessor_and_distance",
"negative_edge_cycle",
"goldberg_radzik",
"johnson",
]
def _weight_function(G, weight):
if callable(weight):
return weight
if G.is_multigraph():
return lambda u, v, d: min(attr.get(weight, 1) for attr in d.values())
return lambda u, v, data: data.get(weight, 1)
def dijkstra_path(G, source, target, weight="weight"):
(length, path) = single_source_dijkstra(G, source, target=target, weight=weight)
return path
def dijkstra_path_length(G, source, target, weight="weight"):
if source == target:
return 0
weight = _weight_function(G, weight)
length = _dijkstra(G, source, weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"Node {target} not reachable from {source}") from e
def single_source_dijkstra_path(G, source, cutoff=None, weight="weight"):
return multi_source_dijkstra_path(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra_path_length(G, source, cutoff=None, weight="weight"):
return multi_source_dijkstra_path_length(G, {source}, cutoff=cutoff, weight=weight)
def single_source_dijkstra(G, source, target=None, cutoff=None, weight="weight"):
return multi_source_dijkstra(
G, {source}, cutoff=cutoff, target=target, weight=weight
)
def multi_source_dijkstra_path(G, sources, cutoff=None, weight="weight"):
length, path = multi_source_dijkstra(G, sources, cutoff=cutoff, weight=weight)
return path
def multi_source_dijkstra_path_length(G, sources, cutoff=None, weight="weight"):
if not sources:
raise ValueError("sources must not be empty")
weight = _weight_function(G, weight)
return _dijkstra_multisource(G, sources, weight, cutoff=cutoff)
def multi_source_dijkstra(G, sources, target=None, cutoff=None, weight="weight"):
if not sources:
raise ValueError("sources must not be empty")
if target in sources:
return (0, [target])
weight = _weight_function(G, weight)
paths = {source: [source] for source in sources}
dist = _dijkstra_multisource(
G, sources, weight, paths=paths, cutoff=cutoff, target=target
)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
raise nx.NetworkXNoPath(f"No path to {target}.") from e
def _dijkstra(G, source, weight, pred=None, paths=None, cutoff=None, target=None):
return _dijkstra_multisource(
G, [source], weight, pred=pred, paths=paths, cutoff=cutoff, target=target
)
def _dijkstra_multisource(
G, sources, weight, pred=None, paths=None, cutoff=None, target=None
):
G_succ = G._succ if G.is_directed() else G._adj
push = heappush
pop = heappop
dist = {}
seen = {}
c = count()
fringe = []
for source in sources:
if source not in G:
raise nx.NodeNotFound(f"Source {source} not in G")
seen[source] = 0
push(fringe, (0, next(c), source))
while fringe:
(d, _, v) = pop(fringe)
if v in dist:
continue
dist[v] = d
if v == target:
break
for u, e in G_succ[v].items():
cost = weight(v, u, e)
if cost is None:
continue
vu_dist = dist[v] + cost
if cutoff is not None:
if vu_dist > cutoff:
continue
if u in dist:
u_dist = dist[u]
if vu_dist < u_dist:
raise ValueError("Contradictory paths found:", "negative weights?")
elif pred is not None and vu_dist == u_dist:
pred[u].append(v)
elif u not in seen or vu_dist < seen[u]:
seen[u] = vu_dist
push(fringe, (vu_dist, next(c), u))
if paths is not None:
paths[u] = paths[v] + [u]
if pred is not None:
pred[u] = [v]
elif vu_dist == seen[u]:
if pred is not None:
pred[u].append(v)
return dist
def dijkstra_predecessor_and_distance(G, source, cutoff=None, weight="weight"):
weight = _weight_function(G, weight)
pred = {source: []}
return (pred, _dijkstra(G, source, weight, pred=pred, cutoff=cutoff))
def all_pairs_dijkstra(G, cutoff=None, weight="weight"):
for n in G:
dist, path = single_source_dijkstra(G, n, cutoff=cutoff, weight=weight)
yield (n, (dist, path))
def all_pairs_dijkstra_path_length(G, cutoff=None, weight="weight"):
length = single_source_dijkstra_path_length
for n in G:
yield (n, length(G, n, cutoff=cutoff, weight=weight))
def all_pairs_dijkstra_path(G, cutoff=None, weight="weight"):
path = single_source_dijkstra_path
for n in G:
yield (n, path(G, n, cutoff=cutoff, weight=weight))
def bellman_ford_predecessor_and_distance(
G, source, target=None, weight="weight", heuristic=False
):
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
dist = {source: 0}
pred = {source: []}
if len(G) == 1:
return pred, dist
weight = _weight_function(G, weight)
dist = _bellman_ford(
G, [source], weight, pred=pred, dist=dist, target=target, heuristic=heuristic
)
return (pred, dist)
def _bellman_ford(
G, source, weight, pred=None, paths=None, dist=None, target=None, heuristic=True
):
for s in source:
if s not in G:
raise nx.NodeNotFound(f"Source {s} not in G")
if pred is None:
pred = {v: [] for v in source}
if dist is None:
dist = {v: 0 for v in source}
nonexistent_edge = (None, None)
pred_edge = {v: None for v in source}
recent_update = {v: nonexistent_edge for v in source}
G_succ = G.succ if G.is_directed() else G.adj
inf = float("inf")
n = len(G)
count = {}
q = deque(source)
in_q = set(source)
while q:
u = q.popleft()
in_q.remove(u)
if all(pred_u not in in_q for pred_u in pred[u]):
dist_u = dist[u]
for v, e in G_succ[u].items():
dist_v = dist_u + weight(u, v, e)
if dist_v < dist.get(v, inf):
if heuristic:
if v in recent_update[u]:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if v in pred_edge and pred_edge[v] == u:
recent_update[v] = recent_update[u]
else:
recent_update[v] = (u, v)
if v not in in_q:
q.append(v)
in_q.add(v)
count_v = count.get(v, 0) + 1
if count_v == n:
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
count[v] = count_v
dist[v] = dist_v
pred[v] = [u]
pred_edge[v] = u
elif dist.get(v) is not None and dist_v == dist.get(v):
pred[v].append(u)
if paths is not None:
sources = set(source)
dsts = [target] if target is not None else pred
for dst in dsts:
gen = _build_paths_from_predecessors(sources, dst, pred)
paths[dst] = next(gen)
return dist
def bellman_ford_path(G, source, target, weight="weight"):
length, path = single_source_bellman_ford(G, source, target=target, weight=weight)
return path
def bellman_ford_path_length(G, source, target, weight="weight"):
if source == target:
return 0
weight = _weight_function(G, weight)
length = _bellman_ford(G, [source], weight, target=target)
try:
return length[target]
except KeyError as e:
raise nx.NetworkXNoPath(f"node {target} not reachable from {source}") from e
def single_source_bellman_ford_path(G, source, weight="weight"):
(length, path) = single_source_bellman_ford(G, source, weight=weight)
return path
def single_source_bellman_ford_path_length(G, source, weight="weight"):
weight = _weight_function(G, weight)
return _bellman_ford(G, [source], weight)
def single_source_bellman_ford(G, source, target=None, weight="weight"):
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
paths = {source: [source]}
dist = _bellman_ford(G, [source], weight, paths=paths, target=target)
if target is None:
return (dist, paths)
try:
return (dist[target], paths[target])
except KeyError as e:
msg = f"Node {target} not reachable from {source}"
raise nx.NetworkXNoPath(msg) from e
def all_pairs_bellman_ford_path_length(G, weight="weight"):
length = single_source_bellman_ford_path_length
for n in G:
yield (n, dict(length(G, n, weight=weight)))
def all_pairs_bellman_ford_path(G, weight="weight"):
path = single_source_bellman_ford_path
for n in G:
yield (n, path(G, n, weight=weight))
def goldberg_radzik(G, source, weight="weight"):
if source not in G:
raise nx.NodeNotFound(f"Node {source} is not found in the graph")
weight = _weight_function(G, weight)
if any(weight(u, v, d) < 0 for u, v, d in nx.selfloop_edges(G, data=True)):
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
if len(G) == 1:
return {source: None}, {source: 0}
if G.is_directed():
G_succ = G.succ
else:
G_succ = G.adj
inf = float("inf")
d = {u: inf for u in G}
d[source] = 0
pred = {source: None}
def topo_sort(relabeled):
to_scan = []
# In the DFS in the loop below, neg_count records for each node the
# number of edges of negative reduced costs on the path from a DFS root
# to the node in the DFS forest. The reduced cost of an edge (u, v) is
# defined as d[u] + weight[u][v] - d[v].
#
# neg_count also doubles as the DFS visit marker array.
neg_count = {}
for u in relabeled:
# Skip visited nodes.
if u in neg_count:
continue
d_u = d[u]
# Skip nodes without out-edges of negative reduced costs.
if all(d_u + weight(u, v, e) >= d[v] for v, e in G_succ[u].items()):
continue
# Nonrecursive DFS that inserts nodes reachable from u via edges of
# nonpositive reduced costs into to_scan in (reverse) topological
# order.
stack = [(u, iter(G_succ[u].items()))]
in_stack = {u}
neg_count[u] = 0
while stack:
u, it = stack[-1]
try:
v, e = next(it)
except StopIteration:
to_scan.append(u)
stack.pop()
in_stack.remove(u)
continue
t = d[u] + weight(u, v, e)
d_v = d[v]
if t <= d_v:
is_neg = t < d_v
d[v] = t
pred[v] = u
if v not in neg_count:
neg_count[v] = neg_count[u] + int(is_neg)
stack.append((v, iter(G_succ[v].items())))
in_stack.add(v)
elif v in in_stack and neg_count[u] + int(is_neg) > neg_count[v]:
# (u, v) is a back edge, and the cycle formed by the
# path v to u and (u, v) contains at least one edge of
# negative reduced cost. The cycle must be of negative
# cost.
raise nx.NetworkXUnbounded("Negative cost cycle detected.")
to_scan.reverse()
return to_scan
def relax(to_scan):
relabeled = set()
# Scan nodes in to_scan in topological order and relax incident
# out-edges. Add the relabled nodes to labeled.
for u in to_scan:
d_u = d[u]
for v, e in G_succ[u].items():
w_e = weight(u, v, e)
if d_u + w_e < d[v]:
d[v] = d_u + w_e
pred[v] = u
relabeled.add(v)
return relabeled
# Set of nodes relabled in the last round of scan operations. Denoted by B
# in Goldberg and Radzik's paper.
relabeled = {source}
while relabeled:
to_scan = topo_sort(relabeled)
relabeled = relax(to_scan)
d = {u: d[u] for u in pred}
return pred, d
def negative_edge_cycle(G, weight="weight", heuristic=True):
newnode = -1
while newnode in G:
newnode -= 1
G.add_edges_from([(newnode, n) for n in G])
try:
bellman_ford_predecessor_and_distance(
G, newnode, weight=weight, heuristic=heuristic
)
except nx.NetworkXUnbounded:
return True
finally:
G.remove_node(newnode)
return False
def bidirectional_dijkstra(G, source, target, weight="weight"):
if source not in G or target not in G:
msg = f"Either source {source} or target {target} is not in G"
raise nx.NodeNotFound(msg)
if source == target:
return (0, [source])
weight = _weight_function(G, weight)
push = heappush
pop = heappop
dists = [{}, {}]
paths = [{source: [source]}, {target: [target]}]
fringe = [[], []]
seen = [{source: 0}, {target: 0}]
c = count()
push(fringe[0], (0, next(c), source))
push(fringe[1], (0, next(c), target))
if G.is_directed():
neighs = [G._succ, G._pred]
else:
neighs = [G._adj, G._adj]
finalpath = []
dir = 1
while fringe[0] and fringe[1]:
dir = 1 - dir
(dist, _, v) = pop(fringe[dir])
if v in dists[dir]:
continue
dists[dir][v] = dist
if v in dists[1 - dir]:
return (finaldist, finalpath)
for w, d in neighs[dir][v].items():
if dir == 0:
vwLength = dists[dir][v] + weight(v, w, d)
else:
vwLength = dists[dir][v] + weight(w, v, d)
if w in dists[dir]:
if vwLength < dists[dir][w]:
raise ValueError("Contradictory paths found: negative weights?")
elif w not in seen[dir] or vwLength < seen[dir][w]:
seen[dir][w] = vwLength
push(fringe[dir], (vwLength, next(c), w))
paths[dir][w] = paths[dir][v] + [w]
if w in seen[0] and w in seen[1]:
totaldist = seen[0][w] + seen[1][w]
if finalpath == [] or finaldist > totaldist:
finaldist = totaldist
revpath = paths[1][w][:]
revpath.reverse()
finalpath = paths[0][w] + revpath[1:]
raise nx.NetworkXNoPath(f"No path between {source} and {target}.")
def johnson(G, weight="weight"):
if not nx.is_weighted(G, weight=weight):
raise nx.NetworkXError("Graph is not weighted.")
dist = {v: 0 for v in G}
pred = {v: [] for v in G}
weight = _weight_function(G, weight)
dist_bellman = _bellman_ford(G, list(G), weight, pred=pred, dist=dist)
def new_weight(u, v, d):
return weight(u, v, d) + dist_bellman[u] - dist_bellman[v]
def dist_path(v):
paths = {v: [v]}
_dijkstra(G, v, new_weight, paths=paths)
return paths
return {v: dist_path(v) for v in G}
| true | true |
f721a2657cff9163e52336d5c42f2f8b73f6cf7e | 383 | py | Python | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py | yypurpose/mmdetection | ec6bfd96eae0af047c623f3d1ec31b0b3f1f4a6c | [
"Apache-2.0"
] | null | null | null | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| 27.357143 | 54 | 0.563969 | _base_ = './faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://resnext101_64x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=64,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
style='pytorch'))
| true | true |
f721a43045ff008abbe0323da19119831a8f5c4e | 4,415 | py | Python | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | howtolens/simulators/chapter_4/mass_sie__source_sersic__2.py | rakaar/PyAutoLens | bc140c5d196c426092c1178b8abfa492c6fab859 | [
"MIT"
] | null | null | null | from os import path
import autolens as al
"""
This script simulates `Imaging` of a strong lens where:
- The lens `Galaxy`'s total mass distribution is a *SphericalIsothermal*.
- The source `Galaxy`'s `LightProfile` is a *SphericalExponential*.
This dataset is used in chapter 2, tutorials 1-3.
"""
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/lens_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/psf.fits`.
"""
dataset_type = "chapter_4"
dataset_name = "mass_sie__source_sersic__2"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/howtolens/dataset/chapter_2/mass_sis__source_exp/`
"""
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
"""
For simulating an image of a strong lens, we recommend using a GridIterate object. This represents a grid of $(y,x)$
coordinates like an ordinary Grid, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.GridIterate.uniform(
shape_2d=(150, 150),
pixel_scales=0.05,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
"""Simulate a simple Gaussian PSF for the image."""
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
"""
Setup the lens `Galaxy`'s mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
For lens modeling, defining ellipticity in terms of the `elliptical_comps` improves the model-fitting procedure.
However, for simulating a strong lens you may find it more intuitive to define the elliptical geometry using the
axis-ratio of the profile (axis_ratio = semi-major axis / semi-minor axis = b/a) and position angle phi, where phi is
in degrees and defined counter clockwise from the positive x-axis.
We can use the **PyAutoLens** `convert` module to determine the elliptical components from the axis-ratio and phi.
"""
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=(0.1, 0.0),
intensity=0.2,
effective_radius=0.3,
sersic_index=1.0,
),
)
"""Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset."""
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""Output our simulated dataset to the dataset path as .fits files"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
| 39.070796 | 120 | 0.727973 | from os import path
import autolens as al
dataset_type = "chapter_4"
dataset_name = "mass_sie__source_sersic__2"
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
grid = al.GridIterate.uniform(
shape_2d=(150, 150),
pixel_scales=0.05,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
lens_galaxy = al.Galaxy(
redshift=0.5,
mass=al.mp.EllipticalIsothermal(
centre=(0.0, 0.0), elliptical_comps=(0.1, 0.0), einstein_radius=1.6
),
)
source_galaxy = al.Galaxy(
redshift=1.0,
bulge=al.lp.EllipticalSersic(
centre=(0.1, 0.1),
elliptical_comps=(0.1, 0.0),
intensity=0.2,
effective_radius=0.3,
sersic_index=1.0,
),
)
tracer = al.Tracer.from_galaxies(galaxies=[lens_galaxy, source_galaxy])
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
tracer.save(file_path=dataset_path, filename="true_tracer")
| true | true |
f721a452377d10ba2fe32cd315a6bdce392c234d | 594 | py | Python | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | hc/accounts/tests/test_team_access_middleware.py | andela/-healthchecks_spartans | 4dd6480fc178996c0e386548816ca8c74e4af50d | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.auth.models import User
from django.test import TestCase
from hc.accounts.models import Profile
class TeamAccessMiddlewareTestCase(TestCase):
def test_it_handles_missing_profile(self):
user = User(username="ned", email="ned@example.org")
user.set_password("password")
user.save()
self.client.login(username="ned@example.org", password="password")
r = self.client.get("/about/")
self.assertEqual(r.status_code, 200)
### Assert the new Profile objects count
self.assertEqual(Profile.objects.count(), 1)
| 31.263158 | 74 | 0.695286 | from django.contrib.auth.models import User
from django.test import TestCase
from hc.accounts.models import Profile
class TeamAccessMiddlewareTestCase(TestCase):
def test_it_handles_missing_profile(self):
user = User(username="ned", email="ned@example.org")
user.set_password("password")
user.save()
self.client.login(username="ned@example.org", password="password")
r = self.client.get("/about/")
self.assertEqual(r.status_code, 200)
| true | true |
f721a4751de1cbbd852750a103606b9e45275fbe | 2,081 | py | Python | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 35 | 2016-03-20T15:25:07.000Z | 2022-03-29T04:05:56.000Z | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 12 | 2016-03-24T13:38:52.000Z | 2021-04-06T07:11:19.000Z | pysptools/skl/docstring.py | ctherien/pysptools | fbcd3ecaa7ab27f0158b28b4327537c3e75db160 | [
"Apache-2.0"
] | 14 | 2016-03-21T17:26:46.000Z | 2022-01-18T08:39:27.000Z | #
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# docstring.py - This file is part of the PySptools package.
#
plot_fi_docstring = """
Plot the feature importances.
The output can be split in n graphs.
Parameters:
path: `string`
The path where to save the plot.
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_fi_docstring = """
Display the feature importances.
The output can be split in n graphs.
Parameters:
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
| 32.515625 | 79 | 0.566555 |
plot_fi_docstring = """
Plot the feature importances.
The output can be split in n graphs.
Parameters:
path: `string`
The path where to save the plot.
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
display_fi_docstring = """
Display the feature importances.
The output can be split in n graphs.
Parameters:
n_labels: `string or integer`
The number of labels to output by graph. If the value is 'all',
only one graph is generated.
height: `float [default 0.2]`
The bar height (in fact width).
sort: `boolean [default False]`
If true the feature importances are sorted.
suffix: `string [default None]`
Add a suffix to the file name.
"""
| true | true |
f721a4c6a5e4336c0f4cb7515b1636b493ef02d6 | 6,182 | py | Python | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | 1 | 2022-02-10T10:51:32.000Z | 2022-02-10T10:51:32.000Z | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | tools/pysa_integration_tests/utils.py | joehendrix/pyre-check | 23693455b1e0b4a7287efba9337be6bbfe23ada4 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
"""
Custom Exception to raise when Pyre errors out
"""
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
"""
Returns a normalised JSON string from results keeping only essential items.
Removes all keys that are not salient to determining if results have changed
when 'salient_keys_only' is true. Filters issues down to issues that have
the code we intend to test for if 'filter_issues' is true.
"""
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
# Filter down to only issues that have the code that we intended to
# test for. This prevents the introduction of new rules or false
# positives from breaking existing tests.
normalized = [
issue for issue in normalized if f"test_{issue['code']}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
"""
Error function to print error message using LOG and exit
"""
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
"""
Runs pysa and compares the output to that in full_results.json. Creates
raw_results.json file that contains the output. Creates
position_invariant_result.json that contains position information to
compare using diff with position_invariant_result.actual before exiting if
there is a mismatch between the specified and detected issues.
"""
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
| 32.197917 | 88 | 0.651084 |
from __future__ import annotations
import json
import logging
import subprocess
import sys
from pathlib import Path
from typing import final, Sequence, Optional
LOG: logging.Logger = logging.getLogger(__name__)
@final
class PyreErrorException(Exception):
pass
def normalized_json_dump(
results: str, salient_keys_only: bool, filter_issues: bool
) -> str:
normalized = json.loads(results)
if "errors" in normalized:
pretty_error = json.dumps(normalized, sort_keys=True, indent=2)
raise PyreErrorException(
f"Errors were found when processing results:\n{pretty_error}"
)
if filter_issues:
normalized = [
issue for issue in normalized if f"test_{issue['code']}_" in issue["define"]
]
normalized = sorted(
normalized,
key=lambda issue: (
issue["code"],
issue["path"],
issue["line"],
issue["column"],
),
)
if salient_keys_only:
salient_keys = {"code", "define", "description", "path"}
stripped_issues = []
for issue in normalized:
stripped_issue = {
key: value for key, value in issue.items() if key in salient_keys
}
if set(stripped_issue.keys()) != salient_keys:
raise KeyError(
f"Expected issue to contain {salient_keys} keys, "
+ f"but instead found: {issue}"
)
stripped_issues.append(stripped_issue)
normalized = stripped_issues
return json.dumps(normalized, sort_keys=True, indent=2) + "\n"
def compare_results(
actual_results: str,
expected_results: str,
current_directory: Path,
filter_issues: bool,
) -> None:
normalized_pysa_results = normalized_json_dump(
actual_results, salient_keys_only=True, filter_issues=filter_issues
)
normalized_expected_results = normalized_json_dump(
expected_results, salient_keys_only=True, filter_issues=filter_issues
)
if normalized_pysa_results != normalized_expected_results:
actual_full_results_path = current_directory / "full_result.actual"
actual_full_results_path.write_text(
normalized_json_dump(
actual_results, salient_keys_only=False, filter_issues=filter_issues
)
)
actual_invariant_results_path = (
current_directory / "position_invariant_result.actual"
)
actual_invariant_results_path.write_text(normalized_pysa_results)
expected_invariant_results_path = (
current_directory / "position_invariant_result.json"
)
expected_invariant_results_path.write_text(normalized_expected_results)
result = subprocess.run(
[
"diff",
"-u",
expected_invariant_results_path,
actual_invariant_results_path,
],
text=True,
stdout=subprocess.PIPE,
)
friendly_exit(
"Output differs from expected:",
result.stdout,
"output-differs-from-expected",
)
else:
LOG.info("Run produced expected results")
def friendly_exit(error_message: str, logs: str, suggested_hash: str) -> None:
LOG.error("----BEGIN PYSA INTEGRATION TEST ERROR----")
LOG.error(error_message)
LOG.error(logs)
LOG.error("----END PYSA INTEGRATION TEST ERROR----")
sys.exit(1)
def run_pysa_integration_test(
current_directory: Path,
passthrough_args: Sequence[str],
skip_model_verification: bool,
filter_issues: bool,
save_results_to: Optional[Path],
run_from_source: bool = False,
) -> None:
LOG.info("Running `pyre analyze`")
if run_from_source:
command = [
"python",
"-m" "pyre-check.client.pyre",
]
else:
command = ["pyre"]
command.extend(["--noninteractive", "analyze"])
if save_results_to is not None:
command.extend(["--save-results-to", str(save_results_to)])
if skip_model_verification:
command.append("--no-verify")
command.extend(passthrough_args)
LOG.debug(f"Using command: {command}")
pysa_results: str
try:
pysa_results = subprocess.check_output(
command, text=True, cwd=current_directory
)
if save_results_to is not None:
pysa_results = (save_results_to / "errors.json").read_text()
except subprocess.CalledProcessError as exception:
friendly_exit(
"Command failed with output:",
exception.stdout,
"found-x-model-verification-error",
)
(current_directory / "raw_results.json").write_text(pysa_results)
expected_results = (current_directory / "full_result.json").read_text()
compare_results(pysa_results, expected_results, current_directory, filter_issues)
| true | true |
f721a628ef8e42f4b26f07888d6e70148b933809 | 4,668 | py | Python | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/velux/cover.py | orcema/core | ce144bf63145813c76fbbe4f9423341764695057 | [
"Apache-2.0"
] | null | null | null | """Support for Velux covers."""
from __future__ import annotations
from typing import Any
from pyvlx import OpeningDevice, Position
from pyvlx.opening_device import Awning, Blind, GarageDoor, Gate, RollerShutter, Window
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_VELUX, VeluxEntity
PARALLEL_UPDATES = 1
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up cover(s) for Velux platform."""
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(VeluxEntity, CoverEntity):
"""Representation of a Velux cover."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
supported_features = (
CoverEntityFeature.OPEN
| CoverEntityFeature.CLOSE
| CoverEntityFeature.SET_POSITION
| CoverEntityFeature.STOP
)
if self.current_cover_tilt_position is not None:
supported_features |= (
CoverEntityFeature.OPEN_TILT
| CoverEntityFeature.CLOSE_TILT
| CoverEntityFeature.SET_TILT_POSITION
| CoverEntityFeature.STOP_TILT
)
return supported_features
@property
def current_cover_position(self) -> int:
"""Return the current position of the cover."""
return 100 - self.node.position.position_percent
@property
def current_cover_tilt_position(self) -> int | None:
"""Return the current position of the cover."""
if isinstance(self.node, Blind):
return 100 - self.node.orientation.position_percent
return None
@property
def device_class(self) -> CoverDeviceClass:
"""Define this cover as either awning, blind, garage, gate, shutter or window."""
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance(self.node, Blind):
return CoverDeviceClass.BLIND
if isinstance(self.node, GarageDoor):
return CoverDeviceClass.GARAGE
if isinstance(self.node, Gate):
return CoverDeviceClass.GATE
if isinstance(self.node, RollerShutter):
return CoverDeviceClass.SHUTTER
if isinstance(self.node, Window):
return CoverDeviceClass.WINDOW
return CoverDeviceClass.WINDOW
@property
def is_closed(self) -> bool:
"""Return if the cover is closed."""
return self.node.position.closed
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
await self.node.close(wait_for_completion=False)
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
await self.node.open(wait_for_completion=False)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
position_percent = 100 - kwargs[ATTR_POSITION]
await self.node.set_position(
Position(position_percent=position_percent), wait_for_completion=False
)
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
await self.node.stop(wait_for_completion=False)
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
"""Close cover tilt."""
await self.node.close_orientation(wait_for_completion=False)
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
"""Open cover tilt."""
await self.node.open_orientation(wait_for_completion=False)
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
"""Stop cover tilt."""
await self.node.stop_orientation(wait_for_completion=False)
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
"""Move cover tilt to a specific position."""
position_percent = 100 - kwargs[ATTR_TILT_POSITION]
orientation = Position(position_percent=position_percent)
await self.node.set_orientation(
orientation=orientation, wait_for_completion=False
)
| 35.097744 | 89 | 0.673522 | from __future__ import annotations
from typing import Any
from pyvlx import OpeningDevice, Position
from pyvlx.opening_device import Awning, Blind, GarageDoor, Gate, RollerShutter, Window
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DATA_VELUX, VeluxEntity
PARALLEL_UPDATES = 1
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
entities = []
for node in hass.data[DATA_VELUX].pyvlx.nodes:
if isinstance(node, OpeningDevice):
entities.append(VeluxCover(node))
async_add_entities(entities)
class VeluxCover(VeluxEntity, CoverEntity):
@property
def supported_features(self) -> int:
supported_features = (
CoverEntityFeature.OPEN
| CoverEntityFeature.CLOSE
| CoverEntityFeature.SET_POSITION
| CoverEntityFeature.STOP
)
if self.current_cover_tilt_position is not None:
supported_features |= (
CoverEntityFeature.OPEN_TILT
| CoverEntityFeature.CLOSE_TILT
| CoverEntityFeature.SET_TILT_POSITION
| CoverEntityFeature.STOP_TILT
)
return supported_features
@property
def current_cover_position(self) -> int:
return 100 - self.node.position.position_percent
@property
def current_cover_tilt_position(self) -> int | None:
if isinstance(self.node, Blind):
return 100 - self.node.orientation.position_percent
return None
@property
def device_class(self) -> CoverDeviceClass:
if isinstance(self.node, Awning):
return CoverDeviceClass.AWNING
if isinstance(self.node, Blind):
return CoverDeviceClass.BLIND
if isinstance(self.node, GarageDoor):
return CoverDeviceClass.GARAGE
if isinstance(self.node, Gate):
return CoverDeviceClass.GATE
if isinstance(self.node, RollerShutter):
return CoverDeviceClass.SHUTTER
if isinstance(self.node, Window):
return CoverDeviceClass.WINDOW
return CoverDeviceClass.WINDOW
@property
def is_closed(self) -> bool:
return self.node.position.closed
async def async_close_cover(self, **kwargs: Any) -> None:
await self.node.close(wait_for_completion=False)
async def async_open_cover(self, **kwargs: Any) -> None:
await self.node.open(wait_for_completion=False)
async def async_set_cover_position(self, **kwargs: Any) -> None:
position_percent = 100 - kwargs[ATTR_POSITION]
await self.node.set_position(
Position(position_percent=position_percent), wait_for_completion=False
)
async def async_stop_cover(self, **kwargs: Any) -> None:
await self.node.stop(wait_for_completion=False)
async def async_close_cover_tilt(self, **kwargs: Any) -> None:
await self.node.close_orientation(wait_for_completion=False)
async def async_open_cover_tilt(self, **kwargs: Any) -> None:
await self.node.open_orientation(wait_for_completion=False)
async def async_stop_cover_tilt(self, **kwargs: Any) -> None:
await self.node.stop_orientation(wait_for_completion=False)
async def async_set_cover_tilt_position(self, **kwargs: Any) -> None:
position_percent = 100 - kwargs[ATTR_TILT_POSITION]
orientation = Position(position_percent=position_percent)
await self.node.set_orientation(
orientation=orientation, wait_for_completion=False
)
| true | true |
f721a6360f0511e109d096adce51015e18e66e23 | 5,135 | py | Python | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | Scripts/Slicer.py | rhong3/GBM | 088b1e99f4fe02395b62d324ec4f9e8402417651 | [
"MIT"
] | null | null | null | """
Tile real scn/svs files; used by Cutter.py
Created on 11/19/2018
*** Removed imlist storage to minimize memory usage 01/24/2019 ***
@author: RH
"""
from openslide import OpenSlide
import numpy as np
import pandas as pd
import multiprocessing as mp
import staintools
from PIL import Image
# check if a tile is background or not; return a blank pixel percentage score
def bgcheck(img, ts):
the_imagea = np.array(img)[:, :, :3]
the_imagea = np.nan_to_num(the_imagea)
mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)
maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)
greya = ((np.ptp(the_imagea[0])) < 100).astype(np.uint8)
greyb = ((np.ptp(the_imagea[1])) < 100).astype(np.uint8)
greyc = ((np.ptp(the_imagea[2])) < 100).astype(np.uint8)
grey = greya * greyb * greyc
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
maskb = maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]
white = (np.sum(mask) + np.sum(maskb)) / (ts * ts) + grey
return white
# Tile color normalization
def normalization(img, sttd):
img = np.array(img)[:, :, :3]
img = staintools.LuminosityStandardizer.standardize(img)
normalizer = staintools.StainNormalizer(method='vahadane')
normalizer.fit(sttd)
img = normalizer.transform(img)
img = Image.fromarray(img.astype('uint8'), 'RGB')
return img
# tile method; slp is the scn/svs image; n_y is the number of tiles can be cut on y column to be cut;
# x and y are the upper left position of each tile; tile_size is tile size; stepsize of each step; x0 is the row to cut.
# outdir is the output directory for images;
# imloc record each tile's relative and absolute coordinates; imlist is a list of cut tiles (Removed 01/24/2019).
def v_slide(slp, n_y, x, y, tile_size, stepsize, x0, outdir, level, dp, std):
# pid = os.getpid()
# print('{}: start working'.format(pid))
slide = OpenSlide(slp)
imloc = []
y0 = 0
target_x = x0 * stepsize
image_x = (target_x + x)*(4**level)
while y0 < n_y:
target_y = y0 * stepsize
image_y = (target_y + y)*(4**level)
img = slide.read_region((image_x, image_y), level, (tile_size, tile_size))
wscore = bgcheck(img, tile_size)
if 0.01 < wscore < 0.4:
img = img.resize((299, 299))
img = normalization(img, std)
if dp:
img.save(outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp)))
strr = outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp))
else:
img.save(outdir + "/region_x-{}-y-{}.png".format(image_x, image_y))
strr = outdir + "/region_x-{}-y-{}.png".format(image_x, image_y)
imloc.append([x0, y0, image_x, image_y, strr])
y0 += 1
slide.close()
return imloc
# image_file is the scn/svs name; outdir is the output directory; path_to_slide is where the scn/svs stored.
# First open the slide, determine how many tiles can be cut, record the residue edges width,
# and calculate the final output prediction heat map size should be. Then, using multithread to cut tiles, and stack up
# tiles and their position dictionaries.
def tile(image_file, outdir, level, std_img, path_to_slide="../images/", dp=None, ft=1):
slide = OpenSlide(path_to_slide+image_file)
slp = str(path_to_slide+image_file)
print(slp)
print(slide.level_dimensions)
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49*ft
full_width_region = 299*ft
stepsize = (full_width_region - half_width_region)
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
residue_x = int((bounds_width - n_x * stepsize)/50)
residue_y = int((bounds_height - n_y * stepsize)/50)
lowres = slide.read_region((x, y), 2, (int(n_x*stepsize/16), int(n_y*stepsize/16)))
lowres = np.array(lowres)[:,:,:3]
x0 = 0
# create multiporcessing pool
print(mp.cpu_count())
pool = mp.Pool(processes=mp.cpu_count())
tasks = []
while x0 < n_x:
task = tuple((slp, n_y, x, y, full_width_region, stepsize, x0, outdir, level, dp, std_img))
tasks.append(task)
x0 += 1
# slice images with multiprocessing
temp = pool.starmap(v_slide, tasks)
tempdict = list(temp)
temp = None
pool.close()
pool.join()
tempdict = list(filter(None, tempdict))
imloc = []
list(map(imloc.extend, tempdict))
imlocpd = pd.DataFrame(imloc, columns = ["X_pos", "Y_pos", "X", "Y", "Loc"])
imlocpd = imlocpd.sort_values(["X_pos", "Y_pos"], ascending=[True, True])
imlocpd = imlocpd.reset_index(drop=True)
imlocpd = imlocpd.reset_index(drop=False)
imlocpd.columns = ["Num", "X_pos", "Y_pos", "X", "Y", "Loc"]
if dp:
imlocpd.to_csv(outdir + "/{}_dict.csv".format(dp), index=False)
else:
imlocpd.to_csv(outdir + "/dict.csv", index=False)
tempdict = None
ct = len(imloc)
print(ct)
return n_x, n_y, lowres, residue_x, residue_y, ct
| 37.210145 | 120 | 0.63408 | from openslide import OpenSlide
import numpy as np
import pandas as pd
import multiprocessing as mp
import staintools
from PIL import Image
def bgcheck(img, ts):
the_imagea = np.array(img)[:, :, :3]
the_imagea = np.nan_to_num(the_imagea)
mask = (the_imagea[:, :, :3] > 200).astype(np.uint8)
maskb = (the_imagea[:, :, :3] < 50).astype(np.uint8)
greya = ((np.ptp(the_imagea[0])) < 100).astype(np.uint8)
greyb = ((np.ptp(the_imagea[1])) < 100).astype(np.uint8)
greyc = ((np.ptp(the_imagea[2])) < 100).astype(np.uint8)
grey = greya * greyb * greyc
mask = mask[:, :, 0] * mask[:, :, 1] * mask[:, :, 2]
maskb = maskb[:, :, 0] * maskb[:, :, 1] * maskb[:, :, 2]
white = (np.sum(mask) + np.sum(maskb)) / (ts * ts) + grey
return white
def normalization(img, sttd):
img = np.array(img)[:, :, :3]
img = staintools.LuminosityStandardizer.standardize(img)
normalizer = staintools.StainNormalizer(method='vahadane')
normalizer.fit(sttd)
img = normalizer.transform(img)
img = Image.fromarray(img.astype('uint8'), 'RGB')
return img
def v_slide(slp, n_y, x, y, tile_size, stepsize, x0, outdir, level, dp, std):
# pid = os.getpid()
# print('{}: start working'.format(pid))
slide = OpenSlide(slp)
imloc = []
y0 = 0
target_x = x0 * stepsize
image_x = (target_x + x)*(4**level)
while y0 < n_y:
target_y = y0 * stepsize
image_y = (target_y + y)*(4**level)
img = slide.read_region((image_x, image_y), level, (tile_size, tile_size))
wscore = bgcheck(img, tile_size)
if 0.01 < wscore < 0.4:
img = img.resize((299, 299))
img = normalization(img, std)
if dp:
img.save(outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp)))
strr = outdir + "/region_x-{}-y-{}_{}.png".format(image_x, image_y, str(dp))
else:
img.save(outdir + "/region_x-{}-y-{}.png".format(image_x, image_y))
strr = outdir + "/region_x-{}-y-{}.png".format(image_x, image_y)
imloc.append([x0, y0, image_x, image_y, strr])
y0 += 1
slide.close()
return imloc
# image_file is the scn/svs name; outdir is the output directory; path_to_slide is where the scn/svs stored.
# First open the slide, determine how many tiles can be cut, record the residue edges width,
# and calculate the final output prediction heat map size should be. Then, using multithread to cut tiles, and stack up
# tiles and their position dictionaries.
def tile(image_file, outdir, level, std_img, path_to_slide="../images/", dp=None, ft=1):
slide = OpenSlide(path_to_slide+image_file)
slp = str(path_to_slide+image_file)
print(slp)
print(slide.level_dimensions)
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49*ft
full_width_region = 299*ft
stepsize = (full_width_region - half_width_region)
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
residue_x = int((bounds_width - n_x * stepsize)/50)
residue_y = int((bounds_height - n_y * stepsize)/50)
lowres = slide.read_region((x, y), 2, (int(n_x*stepsize/16), int(n_y*stepsize/16)))
lowres = np.array(lowres)[:,:,:3]
x0 = 0
# create multiporcessing pool
print(mp.cpu_count())
pool = mp.Pool(processes=mp.cpu_count())
tasks = []
while x0 < n_x:
task = tuple((slp, n_y, x, y, full_width_region, stepsize, x0, outdir, level, dp, std_img))
tasks.append(task)
x0 += 1
# slice images with multiprocessing
temp = pool.starmap(v_slide, tasks)
tempdict = list(temp)
temp = None
pool.close()
pool.join()
tempdict = list(filter(None, tempdict))
imloc = []
list(map(imloc.extend, tempdict))
imlocpd = pd.DataFrame(imloc, columns = ["X_pos", "Y_pos", "X", "Y", "Loc"])
imlocpd = imlocpd.sort_values(["X_pos", "Y_pos"], ascending=[True, True])
imlocpd = imlocpd.reset_index(drop=True)
imlocpd = imlocpd.reset_index(drop=False)
imlocpd.columns = ["Num", "X_pos", "Y_pos", "X", "Y", "Loc"]
if dp:
imlocpd.to_csv(outdir + "/{}_dict.csv".format(dp), index=False)
else:
imlocpd.to_csv(outdir + "/dict.csv", index=False)
tempdict = None
ct = len(imloc)
print(ct)
return n_x, n_y, lowres, residue_x, residue_y, ct
| true | true |
f721a64b1ed80dcb38fc20d3f17da57445b5b1a0 | 9,626 | py | Python | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 136 | 2015-01-03T04:03:23.000Z | 2022-02-07T11:08:57.000Z | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 11 | 2017-02-09T20:05:04.000Z | 2021-01-24T22:25:59.000Z | python/ht/ui/menus/parmmenu.py | Hengle/Houdini-Toolbox | a1fd7d3dd73d3fc4cea78e29aeff1d190c41bae3 | [
"MIT"
] | 26 | 2015-08-18T12:11:02.000Z | 2020-12-19T01:53:31.000Z | """This module contains functions supporting custom PARMmenu.xml entries."""
# =============================================================================
# IMPORTS
# =============================================================================
# Standard Library
from typing import Dict, List
# Houdini
import hou
# =============================================================================
# NON-PUBLIC FUNCTIONS
# =============================================================================
def _valid_to_convert_to_absolute_reference(parm: hou.Parm) -> bool:
"""Check if a parameter is valid to convert to an absolute reference.
A parameter is valid if it is a node reference string parameter with a raw
value appears to be a relative path and points to a valid node.
:param parm: There parameter to check.
:return: Whether or not the parm can be converted.
"""
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be absolute.
if not path.startswith(".."):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def _valid_to_convert_to_relative_reference(parm: hou.Parm) -> bool:
"""Check if a parameter is valid to convert to a relative reference.
A parameter is valid if it is a node reference string parameter with a raw
value appears to be an absolute path and points to a valid node.
:param parm: There parameter to check.
:return: Whether or not the parm can be converted.
"""
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be relative.
if not path.startswith("/"):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
# =============================================================================
# FUNCTIONS
# =============================================================================
def convert_absolute_to_relative_path_context(scriptargs: dict) -> bool:
"""Context script for converting any absolute node paths to relative paths.
The menu entry will be shown if there are node reference string parameters
whose values are absolute paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return: Whether or not to show the menu entry.
"""
parms = scriptargs["parms"]
return any([_valid_to_convert_to_relative_reference(parm) for parm in parms])
def convert_absolute_to_relative_path(scriptargs: dict):
"""Convert any absolute node paths to relative paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_relative_reference(parm):
target_node = parm.evalAsNode()
parm.set(parm.node().relativePathTo(target_node))
def convert_relative_to_absolute_path_context(scriptargs: dict) -> bool:
"""Context script for converting any relative node paths to absolute paths.
The menu entry will be shown if there are node reference string parameters
whose values are relative paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return: Whether or not to show the menu entry.
"""
parms = scriptargs["parms"]
return any([_valid_to_convert_to_absolute_reference(parm) for parm in parms])
def convert_relative_to_absolute_path(scriptargs: dict):
"""Convert any absolute node paths to absolute paths.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_absolute_reference(parm):
target_node = parm.evalAsNode()
parm.set(target_node.path())
def promote_parameter_to_node(scriptargs: dict): # pylint: disable=too-many-locals
"""Promote a parameter to a target node.
:param scriptargs: kwargs dict from PARMmenu entry.
:return:
"""
# Get the parms to act on.
parms = scriptargs["parms"]
# The start node for the node chooser prompt
start_node = None
parm_tuple: hou.ParmTuple = None
parm_tuple_map: Dict[hou.ParmTuple, List[hou.Parm]] = {}
parm_tuple_nodes = []
# Process all the selected parms, partitioning by parm tuple.
for parm in parms:
parm_tuple = parm.tuple()
# Get or create a list of parms for this tuple.
parms_for_tuple = parm_tuple_map.setdefault(parm_tuple, [])
parms_for_tuple.append(parm)
node = parm_tuple.node()
parm_tuple_nodes.append(node)
# Update the start node to be the parent of this tuple's node.
start_node = node.parent()
# The number of parms in the tuple.
num_components = len(parm_tuple)
# Determine how many components of the tuple we will set.
num_components_to_set = max([len(value) for value in list(parm_tuple_map.values())])
# Prompt for a target node. Start at the parent (the most logical choice?)
result = hou.ui.selectNode(initial_node=start_node)
# Try to find ths selected node.
target_node = hou.node(result)
if target_node is not None:
# Can't promote to a selected node.
if target_node in parm_tuple_nodes:
raise hou.OperationFailed("Cannot promote to a source node.")
# Should the target parm will be set to the source value?
set_value = True
# The target node already has a parm tuple with the desired name so we
# should prompt to use it.
if target_node.parmTuple(parm_tuple.name()) is not None:
choice = hou.ui.displayMessage(
"Parameter already exists on {}. Link to existing parameter?".format(
target_node.path()
),
buttons=(
"Yes and keep current value",
"Yes and update value",
"Cancel",
),
severity=hou.severityType.ImportantMessage,
)
# Use parm but keep value, so don't set.
if choice == 0:
set_value = False
# Use parm and update value.
elif choice == 1:
set_value = True
# Bail out since we're cancelling.
else:
return
# No existing parameter so we'll have to create one.
else:
# Get the target node's parm interface.
target_ptg = target_node.parmTemplateGroup()
# The parameter definition for the parm we are trying to link.
parm_template = parm_tuple.parmTemplate()
# If we are trying to link a single parm inside a tuple then modify
# the parm definition to represent that single parm.
if num_components_to_set != num_components:
parm_template.setNumComponents(1)
# Since we're just setting a single component the parms should all
# have the same name so just grab the first.
parm_template.setName(parms[0].name())
# Add the parameter definition to the parm list.
target_ptg.addParmTemplate(parm_template)
# Update the interface with the new definition.
target_node.setParmTemplateGroup(target_ptg)
# Process each parm to set.
for parm in parms:
# Get the target parm.
target_parm = target_node.parm(parm.name())
# Set the target parm to the current value if required.
if set_value:
target_parm.set(parm.eval())
# Create the channel reference.
parm.set(target_parm)
| 34.134752 | 88 | 0.601911 |
from typing import Dict, List
import hou
def _valid_to_convert_to_absolute_reference(parm: hou.Parm) -> bool:
parm_template = parm.parmTemplate()
if isinstance(parm_template, hou.StringParmTemplate):
if parm_template.stringType() == hou.stringParmType.NodeReference:
path = parm.eval()
if not path:
return False
if not path.startswith(".."):
return False
if parm.keyframes():
return False
# If the path is the same as the raw path then we can say that we
# can show the menu item. If the path is not the same as the
# unexpanded we won't say yes because it would be some sort of an
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def _valid_to_convert_to_relative_reference(parm: hou.Parm) -> bool:
parm_template = parm.parmTemplate()
# Check if the parameter is a string parameter.
if isinstance(parm_template, hou.StringParmTemplate):
# Check if the string parameter is a node reference.
if parm_template.stringType() == hou.stringParmType.NodeReference:
# Need to test values to decide whether to show up or not.
path = parm.eval()
# Ignore empty strings.
if not path:
return False
# Ignore paths which already seem to be relative.
if not path.startswith("/"):
return False
# Can't convert parameters with keyframes/expressions.
if parm.keyframes():
return False
# expression which we don't want to mess with.
if path == parm.unexpandedString():
if parm.evalAsNode() is not None:
return True
return False
def convert_absolute_to_relative_path_context(scriptargs: dict) -> bool:
parms = scriptargs["parms"]
return any([_valid_to_convert_to_relative_reference(parm) for parm in parms])
def convert_absolute_to_relative_path(scriptargs: dict):
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_relative_reference(parm):
target_node = parm.evalAsNode()
parm.set(parm.node().relativePathTo(target_node))
def convert_relative_to_absolute_path_context(scriptargs: dict) -> bool:
parms = scriptargs["parms"]
return any([_valid_to_convert_to_absolute_reference(parm) for parm in parms])
def convert_relative_to_absolute_path(scriptargs: dict):
parms = scriptargs["parms"]
for parm in parms:
if _valid_to_convert_to_absolute_reference(parm):
target_node = parm.evalAsNode()
parm.set(target_node.path())
def promote_parameter_to_node(scriptargs: dict):
parms = scriptargs["parms"]
start_node = None
parm_tuple: hou.ParmTuple = None
parm_tuple_map: Dict[hou.ParmTuple, List[hou.Parm]] = {}
parm_tuple_nodes = []
for parm in parms:
parm_tuple = parm.tuple()
parms_for_tuple = parm_tuple_map.setdefault(parm_tuple, [])
parms_for_tuple.append(parm)
node = parm_tuple.node()
parm_tuple_nodes.append(node)
start_node = node.parent()
# The number of parms in the tuple.
num_components = len(parm_tuple)
# Determine how many components of the tuple we will set.
num_components_to_set = max([len(value) for value in list(parm_tuple_map.values())])
# Prompt for a target node. Start at the parent (the most logical choice?)
result = hou.ui.selectNode(initial_node=start_node)
# Try to find ths selected node.
target_node = hou.node(result)
if target_node is not None:
# Can't promote to a selected node.
if target_node in parm_tuple_nodes:
raise hou.OperationFailed("Cannot promote to a source node.")
set_value = True
if target_node.parmTuple(parm_tuple.name()) is not None:
choice = hou.ui.displayMessage(
"Parameter already exists on {}. Link to existing parameter?".format(
target_node.path()
),
buttons=(
"Yes and keep current value",
"Yes and update value",
"Cancel",
),
severity=hou.severityType.ImportantMessage,
)
if choice == 0:
set_value = False
# Use parm and update value.
elif choice == 1:
set_value = True
# Bail out since we're cancelling.
else:
return
else:
# Get the target node's parm interface.
target_ptg = target_node.parmTemplateGroup()
parm_template = parm_tuple.parmTemplate()
if num_components_to_set != num_components:
parm_template.setNumComponents(1)
# have the same name so just grab the first.
parm_template.setName(parms[0].name())
# Add the parameter definition to the parm list.
target_ptg.addParmTemplate(parm_template)
# Update the interface with the new definition.
target_node.setParmTemplateGroup(target_ptg)
# Process each parm to set.
for parm in parms:
# Get the target parm.
target_parm = target_node.parm(parm.name())
# Set the target parm to the current value if required.
if set_value:
target_parm.set(parm.eval())
# Create the channel reference.
parm.set(target_parm)
| true | true |
f721a752d81135177ab54ecb6768ca98ba8ac9c6 | 6,793 | py | Python | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | controller/modules/Logger.py | avinashnatesan/Controllers | 85a005a87e61d50a3ada660e8d90739745e211af | [
"MIT"
] | null | null | null | # ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import logging.handlers as lh
import os
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(Logger, self).__init__(cfx_handle, module_config, module_name)
def initialize(self):
# Extracts the controller Log Level from the ipop-config file,
# If nothing is provided the default is INFO
if "LogLevel" in self._cm_config:
level = getattr(logging, self._cm_config["LogLevel"])
else:
level = getattr(logging, "info")
# If the Logging is set to Console by the User
if self._cm_config["Device"] == "Console":
# Console logging
logging.basicConfig(format="[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
level=level)
self.logger = logging.getLogger("IPOP console logger")
# If the Logging is set to File by the User
elif self._cm_config["Device"] == "File":
# Extracts the filepath else sets logs to current working directory
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
# Creates rotating filehandler
handler = lh.RotatingFileHandler(filename=fqname,
maxBytes=self._cm_config["MaxFileSize"],
backupCount=self._cm_config["MaxArchives"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
handler.setFormatter(formatter)
# Adds the filehandler to the Python logger module
self.logger.addHandler(handler)
# If the Logging is set to All by the User
else:
self.logger = logging.getLogger("IPOP Console & File Logger")
self.logger.setLevel(level)
#Console Logger
console_handler = logging.StreamHandler()
console_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S")
console_handler.setFormatter(console_log_formatter)
self.logger.addHandler(console_handler)
# Extracts the filepath else sets logs to current working directory
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
#File Logger
# Creates rotating filehandler
file_handler = lh.RotatingFileHandler(filename=fqname)
file_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
file_handler.setFormatter(file_log_formatter)
self.logger.addHandler(file_handler)
self.logger.info("Logger: Module loaded")
# PKTDUMP mode dumps packet information
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def process_cbt(self, cbt):
if cbt.op_type == "Request":
log_entry = "{0}: {1}".format(cbt.request.initiator, cbt.request.params)
# Extracting the logging level information from the CBT action tag
if cbt.request.action == "LOG_DEBUG" or cbt.request.action == "debug":
self.logger.debug(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_INFO" or cbt.request.action == "info":
self.logger.info(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_WARNING" or cbt.request.action == "warning":
self.logger.warning(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_ERROR" or cbt.request.action == "error":
self.logger.error(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "pktdump":
self.pktdump(message=cbt.request.params.get("message"),
dump=cbt.request.params.get("dump"))
cbt.set_response(None, True)
elif cbt.request.action == "LOG_QUERY_CONFIG":
cbt.set_response(self._cm_config, True)
else:
log = "Unsupported CBT action {0}".format(cbt)
self.logger.warning("{0}: {1}".format(self._module_name, log))
cbt.set_response(log, False)
self.complete_cbt(cbt)
elif cbt.op_type == "Response":
self.free_cbt(cbt)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
""" Packet Information dumping method"""
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i + 2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
logging.shutdown()
| 45.590604 | 97 | 0.602532 |
import logging
import logging.handlers as lh
import os
from controller.framework.ControllerModule import ControllerModule
class Logger(ControllerModule):
def __init__(self, cfx_handle, module_config, module_name):
super(Logger, self).__init__(cfx_handle, module_config, module_name)
def initialize(self):
if "LogLevel" in self._cm_config:
level = getattr(logging, self._cm_config["LogLevel"])
else:
level = getattr(logging, "info")
if self._cm_config["Device"] == "Console":
logging.basicConfig(format="[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S",
level=level)
self.logger = logging.getLogger("IPOP console logger")
elif self._cm_config["Device"] == "File":
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
self.logger = logging.getLogger("IPOP Rotating Log")
self.logger.setLevel(level)
handler = lh.RotatingFileHandler(filename=fqname,
maxBytes=self._cm_config["MaxFileSize"],
backupCount=self._cm_config["MaxArchives"])
formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
handler.setFormatter(formatter)
self.logger.addHandler(handler)
else:
self.logger = logging.getLogger("IPOP Console & File Logger")
self.logger.setLevel(level)
console_handler = logging.StreamHandler()
console_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s: %(message)s",
datefmt="%H:%M:%S")
console_handler.setFormatter(console_log_formatter)
self.logger.addHandler(console_handler)
filepath = self._cm_config.get("Directory", "./")
fqname = filepath + \
self._cm_config.get("CtrlLogFileName", "ctrl.log")
if not os.path.isdir(filepath):
os.mkdir(filepath)
file_handler = lh.RotatingFileHandler(filename=fqname)
file_log_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d] %(levelname)s:%(message)s", datefmt="%Y%m%d %H:%M:%S")
file_handler.setFormatter(file_log_formatter)
self.logger.addHandler(file_handler)
self.logger.info("Logger: Module loaded")
logging.addLevelName(5, "PKTDUMP")
logging.PKTDUMP = 5
def process_cbt(self, cbt):
if cbt.op_type == "Request":
log_entry = "{0}: {1}".format(cbt.request.initiator, cbt.request.params)
if cbt.request.action == "LOG_DEBUG" or cbt.request.action == "debug":
self.logger.debug(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_INFO" or cbt.request.action == "info":
self.logger.info(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_WARNING" or cbt.request.action == "warning":
self.logger.warning(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "LOG_ERROR" or cbt.request.action == "error":
self.logger.error(log_entry)
cbt.set_response(None, True)
elif cbt.request.action == "pktdump":
self.pktdump(message=cbt.request.params.get("message"),
dump=cbt.request.params.get("dump"))
cbt.set_response(None, True)
elif cbt.request.action == "LOG_QUERY_CONFIG":
cbt.set_response(self._cm_config, True)
else:
log = "Unsupported CBT action {0}".format(cbt)
self.logger.warning("{0}: {1}".format(self._module_name, log))
cbt.set_response(log, False)
self.complete_cbt(cbt)
elif cbt.op_type == "Response":
self.free_cbt(cbt)
def timer_method(self):
pass
def pktdump(self, message, dump=None, *args, **argv):
hext = ""
if dump:
for i in range(0, len(dump), 2):
hext += dump[i:i + 2].encode("hex")
hext += " "
if i % 16 == 14:
hext += "\n"
logging.log(5, message + "\n" + hext)
else:
logging.log(5, message, *args, **argv)
def terminate(self):
logging.shutdown()
| true | true |
f721aa11249df76d852759230ba85c6a027c2c3e | 3,271 | py | Python | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 32 | 2016-07-22T06:17:00.000Z | 2021-09-24T16:19:11.000Z | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 50 | 2016-06-28T09:36:00.000Z | 2022-03-18T13:03:18.000Z | libs/parse_ansible.py | realglobe-Inc/atom-autocomplete-ansible | 3752b7d893be35ca93a8e424c960e328c0d75bb9 | [
"MIT"
] | 22 | 2016-09-20T16:56:04.000Z | 2022-03-25T23:24:35.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import __main__
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.utils.display import Display
try:
from ansible.plugins.loader import lookup_loader, module_loader
from ansible.utils import plugin_docs
use_old_loader = False
BLACKLIST_MODULES = plugin_docs.BLACKLIST['MODULE']
except ImportError:
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs as plugin_docs
use_old_loader = True
BLACKLIST_MODULES = plugin_docs.BLACKLIST_MODULES
try:
from ansible.plugins.loader import fragment_loader
USE_FRAGMENT_LOADER = True
except ImportError:
fragment_loader = None
USE_FRAGMENT_LOADER = False
__main__.display = Display()
doc_cli = DocCLI(['ansible atom'])
def get_module_list():
module_paths = module_loader._get_paths()
for path in module_paths:
if use_old_loader:
doc_cli.find_modules(path)
else:
try:
founds = doc_cli.find_plugins(path, 'module')
except TypeError:
founds = doc_cli.find_plugins(path, 'plugins', 'module')
if founds:
doc_cli.plugin_list.update(founds)
module_list = (
doc_cli.module_list if use_old_loader else doc_cli.plugin_list)
return sorted(set(module_list))
def main():
module_keys = ('module', 'short_description', 'options', 'deprecated')
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in get_module_list():
if module in BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
get_docstring_args = ((filename, fragment_loader)
if USE_FRAGMENT_LOADER else (filename,))
try:
doc = plugin_docs.get_docstring(*get_docstring_args)[0]
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except Exception as e:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all(path_only=True):
name = os.path.splitext(os.path.basename(lookup))[0]
result['lookup_plugins'].append(name)
return json.dumps(result)
if __name__ == '__main__':
print(main())
| 32.068627 | 75 | 0.64812 |
from __future__ import print_function, unicode_literals
import __main__
import json
import os
from ansible.cli.doc import DocCLI
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from ansible.utils.display import Display
try:
from ansible.plugins.loader import lookup_loader, module_loader
from ansible.utils import plugin_docs
use_old_loader = False
BLACKLIST_MODULES = plugin_docs.BLACKLIST['MODULE']
except ImportError:
from ansible.plugins import lookup_loader, module_loader
from ansible.utils import module_docs as plugin_docs
use_old_loader = True
BLACKLIST_MODULES = plugin_docs.BLACKLIST_MODULES
try:
from ansible.plugins.loader import fragment_loader
USE_FRAGMENT_LOADER = True
except ImportError:
fragment_loader = None
USE_FRAGMENT_LOADER = False
__main__.display = Display()
doc_cli = DocCLI(['ansible atom'])
def get_module_list():
module_paths = module_loader._get_paths()
for path in module_paths:
if use_old_loader:
doc_cli.find_modules(path)
else:
try:
founds = doc_cli.find_plugins(path, 'module')
except TypeError:
founds = doc_cli.find_plugins(path, 'plugins', 'module')
if founds:
doc_cli.plugin_list.update(founds)
module_list = (
doc_cli.module_list if use_old_loader else doc_cli.plugin_list)
return sorted(set(module_list))
def main():
module_keys = ('module', 'short_description', 'options', 'deprecated')
result = {'modules': [], 'directives': {}, 'lookup_plugins': []}
for module in get_module_list():
if module in BLACKLIST_MODULES:
continue
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
get_docstring_args = ((filename, fragment_loader)
if USE_FRAGMENT_LOADER else (filename,))
try:
doc = plugin_docs.get_docstring(*get_docstring_args)[0]
filtered_doc = {key: doc.get(key, None) for key in module_keys}
result['modules'].append(filtered_doc)
except Exception as e:
pass
for aclass in (Play, Role, Block, Task):
aobj = aclass()
name = type(aobj).__name__
for attr in aobj.__dict__['_attributes']:
if 'private' in attr and attr.private:
continue
direct_target = result['directives'].setdefault(attr, [])
direct_target.append(name)
if attr == 'action':
local_action = result['directives'].setdefault(
'local_action', [])
local_action.append(name)
result['directives']['with_'] = ['Task']
for lookup in lookup_loader.all(path_only=True):
name = os.path.splitext(os.path.basename(lookup))[0]
result['lookup_plugins'].append(name)
return json.dumps(result)
if __name__ == '__main__':
print(main())
| true | true |
f721aa8af2cd7cf530a4b76cbb10ce9276f81044 | 5,616 | py | Python | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | espnet/asr/pytorch_backend/asr_recog.py | MarkWuNLP/StreamingTransformer | df9bfe348608b7e55ef1ff70464070c0055ea799 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import json
import logging
import os
import numpy as np
import torch
from espnet.asr.asr_utils import add_results_to_json, add_single_results
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
def recog(args):
"""Decode with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
model.recog_args = args
# read rnnlm
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None), # for backward compatibility
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
# gpu
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0]
if args.prefix_decode:
best, ids, score = model.prefix_recognize(feat, args, train_args, train_args.char_list, rnnlm)
new_js[name] = add_single_results(js[name], best, ids, score)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def viterbi_decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(
args.model, os.path.join(os.path.dirname(args.model), 'model.json'))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
if args.model is not None:
load_params = dict(torch.load(args.model))
if 'model' in load_params:
load_params = dict(load_params['model'])
if 'state_dict' in load_params:
load_params = dict(load_params['state_dict'])
model_params = dict(model.named_parameters())
for k, v in load_params.items():
k = k.replace('module.', '')
if k in model_params and v.size() == model_params[k].size():
model_params[k].data = v.data
logging.warning('load parameters {}'.format(k))
model.recog_args = args
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
y = np.fromiter(map(int, batch[0][1]['output'][0]['tokenid'].split()), dtype=np.int64)
align = model.viterbi_decode(feat[0][0], y)
assert len(align) == len(y)
new_js[name] = js[name]
new_js[name]['output'][0]['align'] = ' '.join([str(i) for i in list(align)])
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
| 34.036364 | 110 | 0.615028 |
import json
import logging
import os
import numpy as np
import torch
from espnet.asr.asr_utils import add_results_to_json, add_single_results
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import torch_load
from espnet.asr.pytorch_backend.asr_init import load_trained_model
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
def recog(args):
set_deterministic_pytorch(args)
model, train_args = load_trained_model(args.model)
model.recog_args = args
if args.rnnlm:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
if getattr(rnnlm_args, "model_module", "default") != "default":
raise ValueError(
"use '--api v2' option to decode with non-default language model"
)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(train_args.char_list),
rnnlm_args.layer,
rnnlm_args.unit,
getattr(rnnlm_args, "embed_unit", None),
)
)
torch_load(args.rnnlm, rnnlm)
rnnlm.eval()
else:
rnnlm = None
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info("gpu id: " + str(gpu_id))
model.cuda()
if rnnlm:
rnnlm.cuda()
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode="asr",
load_output=False,
sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None
else args.preprocess_conf,
preprocess_args={"train": False},
)
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info("(%d/%d) decoding " + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
feat = feat[0][0]
if args.prefix_decode:
best, ids, score = model.prefix_recognize(feat, args, train_args, train_args.char_list, rnnlm)
new_js[name] = add_single_results(js[name], best, ids, score)
else:
nbest_hyps = model.recognize(
feat, args, train_args.char_list, rnnlm
)
new_js[name] = add_results_to_json(
js[name], nbest_hyps, train_args.char_list
)
with open(args.result_label, "wb") as f:
f.write(
json.dumps(
{"utts": new_js}, indent=4, ensure_ascii=False, sort_keys=True
).encode("utf_8")
)
def viterbi_decode(args):
set_deterministic_pytorch(args)
idim, odim, train_args = get_model_conf(
args.model, os.path.join(os.path.dirname(args.model), 'model.json'))
model_class = dynamic_import(train_args.model_module)
model = model_class(idim, odim, train_args)
if args.model is not None:
load_params = dict(torch.load(args.model))
if 'model' in load_params:
load_params = dict(load_params['model'])
if 'state_dict' in load_params:
load_params = dict(load_params['state_dict'])
model_params = dict(model.named_parameters())
for k, v in load_params.items():
k = k.replace('module.', '')
if k in model_params and v.size() == model_params[k].size():
model_params[k].data = v.data
logging.warning('load parameters {}'.format(k))
model.recog_args = args
if args.ngpu == 1:
gpu_id = list(range(args.ngpu))
logging.info('gpu id: ' + str(gpu_id))
model.cuda()
with open(args.recog_json, 'rb') as f:
js = json.load(f)['utts']
new_js = {}
load_inputs_and_targets = LoadInputsAndTargets(
mode='asr', load_output=False, sort_in_input_length=False,
preprocess_conf=train_args.preprocess_conf
if args.preprocess_conf is None else args.preprocess_conf,
preprocess_args={'train': False})
with torch.no_grad():
for idx, name in enumerate(js.keys(), 1):
logging.info('(%d/%d) decoding ' + name, idx, len(js.keys()))
batch = [(name, js[name])]
feat = load_inputs_and_targets(batch)
y = np.fromiter(map(int, batch[0][1]['output'][0]['tokenid'].split()), dtype=np.int64)
align = model.viterbi_decode(feat[0][0], y)
assert len(align) == len(y)
new_js[name] = js[name]
new_js[name]['output'][0]['align'] = ' '.join([str(i) for i in list(align)])
with open(args.result_label, 'wb') as f:
f.write(json.dumps({'utts': new_js}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
| true | true |
f721ab5c5621aefa332a1c1c49b2c98c1ff4fa57 | 2,408 | py | Python | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 22 | 2018-01-18T21:03:26.000Z | 2021-06-29T00:19:44.000Z | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 22 | 2018-02-22T17:08:50.000Z | 2021-11-07T09:20:18.000Z | pythonup/operations/common.py | uranusjr/pythonup-windows | af25844af1c5fdc8a90ae95435c8ce322e5e41e5 | [
"0BSD"
] | 2 | 2018-01-18T21:03:30.000Z | 2021-01-18T05:14:18.000Z | import functools
import click
from .. import configs, metadata, versions
def check_installation(version, *, installed=True, on_exit=None):
try:
installation = version.get_installation()
except FileNotFoundError:
if not installed: # Expected to be absent. Return None.
return None
message = '{} is not installed.'
else:
if installed: # Expected to be installed. Return the installation.
return installation
message = '{} is already installed.'
click.echo(message.format(version), err=True)
if on_exit:
on_exit()
click.get_current_context().exit(1)
def get_active_names():
return configs.get_active_names()
def set_active_versions(versions):
configs.set_active_names([v.name for v in versions])
def get_versions(*, installed_only):
vers = versions.get_versions()
names = set(v.name for v in vers)
def should_include(version):
if installed_only and not version.is_installed():
return False
# On a 32-bit host, hide 64-bit names if there is a 32-bit counterpart.
if (not metadata.can_install_64bit() and
not version.name.endswith('-32') and
'{}-32'.format(version.name) in names):
return False
return True
return [v for v in vers if should_include(v)]
def get_version(name):
force_32 = not metadata.can_install_64bit()
try:
version = versions.get_version(name, force_32=force_32)
except versions.VersionNotFoundError:
click.echo('No such version: {}'.format(name), err=True)
click.get_current_context().exit(1)
if version.name != name:
click.echo('Note: Selecting {} instead of {}'.format(
version.name, name,
))
return version
def version_command(*, plural=False, wild_versions=()):
if wild_versions:
def _get_version(n):
if n in wild_versions:
return n
return get_version(n)
else:
_get_version = get_version
def decorator(f):
@functools.wraps(f)
def wrapped(*args, version, **kw):
if plural:
kw['versions'] = [_get_version(n) for n in version]
else:
kw['version'] = _get_version(version)
return f(*args, **kw)
return wrapped
return decorator
| 28 | 79 | 0.618355 | import functools
import click
from .. import configs, metadata, versions
def check_installation(version, *, installed=True, on_exit=None):
try:
installation = version.get_installation()
except FileNotFoundError:
if not installed:
return None
message = '{} is not installed.'
else:
if installed:
return installation
message = '{} is already installed.'
click.echo(message.format(version), err=True)
if on_exit:
on_exit()
click.get_current_context().exit(1)
def get_active_names():
return configs.get_active_names()
def set_active_versions(versions):
configs.set_active_names([v.name for v in versions])
def get_versions(*, installed_only):
vers = versions.get_versions()
names = set(v.name for v in vers)
def should_include(version):
if installed_only and not version.is_installed():
return False
if (not metadata.can_install_64bit() and
not version.name.endswith('-32') and
'{}-32'.format(version.name) in names):
return False
return True
return [v for v in vers if should_include(v)]
def get_version(name):
force_32 = not metadata.can_install_64bit()
try:
version = versions.get_version(name, force_32=force_32)
except versions.VersionNotFoundError:
click.echo('No such version: {}'.format(name), err=True)
click.get_current_context().exit(1)
if version.name != name:
click.echo('Note: Selecting {} instead of {}'.format(
version.name, name,
))
return version
def version_command(*, plural=False, wild_versions=()):
if wild_versions:
def _get_version(n):
if n in wild_versions:
return n
return get_version(n)
else:
_get_version = get_version
def decorator(f):
@functools.wraps(f)
def wrapped(*args, version, **kw):
if plural:
kw['versions'] = [_get_version(n) for n in version]
else:
kw['version'] = _get_version(version)
return f(*args, **kw)
return wrapped
return decorator
| true | true |
f721ab81cbd00ed051aca6942799ab865c6412c5 | 238 | py | Python | frappe/website/doctype/website_route_redirect/website_route_redirect.py | ssuda777/frappe | d3f3df2ce15154aecc1d9d6d07d947e72c2e8c6e | [
"MIT"
] | 1 | 2021-06-03T07:04:48.000Z | 2021-06-03T07:04:48.000Z | frappe/website/doctype/website_route_redirect/website_route_redirect.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 3 | 2021-02-27T11:50:14.000Z | 2021-05-03T06:48:49.000Z | frappe/website/doctype/website_route_redirect/website_route_redirect.py | JMBodz/frappe | eb218a06d1cbfc3a8f1cc00ba8dac2c927d2f71d | [
"MIT"
] | 2 | 2021-09-02T09:51:55.000Z | 2021-09-07T04:55:42.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class WebsiteRouteRedirect(Document):
pass
| 23.8 | 58 | 0.768908 |
from frappe.model.document import Document
class WebsiteRouteRedirect(Document):
pass
| true | true |
f721abc28aeee16569cf14634251ef073a83b8f1 | 2,289 | py | Python | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | core/models.py | mackay/ble_detector | 4d7c3e9edd7bbeeea0bd0bebce43c1bb9d02ee41 | [
"MIT"
] | null | null | null | from peewee import *
import json
from datetime import datetime
#set sane default log levels
import logging
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger("peewee.pool").setLevel(logging.DEBUG)
database = SqliteDatabase('detector.db')
class JSONField(TextField):
def db_value(self, value):
if value is not None:
return json.dumps(value)
return None
def python_value(self, value):
if value is not None:
return json.loads(value)
class BaseModel(Model):
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__( *args, **kwargs )
self._meta.base_uri = self._meta.db_table
class Meta:
database = database
base_uri = "unknown"
class SystemOption(BaseModel):
key = CharField(max_length=64, unique=True, index=True)
value = CharField(max_length=255)
class ActiveEntity(BaseModel):
uuid = CharField(max_length=64, unique=True, index=True)
last_active = DateTimeField(null=True)
total_packets = IntegerField(default=0)
metadata = JSONField(null=True)
class Meta:
order_by = ('uuid', )
class Detector(ActiveEntity):
pass
class Beacon(ActiveEntity):
is_accepted = IntegerField(default=0)
class Agent(ActiveEntity):
pass
class Signal(BaseModel):
date = DateTimeField(default=datetime.utcnow)
detector = ForeignKeyField(rel_model=Detector)
beacon = ForeignKeyField(rel_model=Beacon)
rssi = FloatField()
source_data = CharField(max_length=255, null=True)
class Training(BaseModel):
date = DateTimeField(default=datetime.utcnow)
beacon = ForeignKeyField(rel_model=Beacon)
expectation = JSONField()
is_used = IntegerField(default=1)
class Meta:
order_by = ('date', 'expectation', 'beacon')
class TrainingSignal(BaseModel):
training = ForeignKeyField(rel_model=Training, related_name='signals')
signal = ForeignKeyField(rel_model=Signal)
def initialize():
database.connect()
database.create_tables([ SystemOption ], safe=True)
database.create_tables([ Detector, Beacon, Agent ], safe=True)
database.create_tables([ Signal ], safe=True)
database.create_tables([ Training, TrainingSignal ], safe=True)
database.close()
| 24.094737 | 74 | 0.70118 | from peewee import *
import json
from datetime import datetime
import logging
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger("peewee.pool").setLevel(logging.DEBUG)
database = SqliteDatabase('detector.db')
class JSONField(TextField):
def db_value(self, value):
if value is not None:
return json.dumps(value)
return None
def python_value(self, value):
if value is not None:
return json.loads(value)
class BaseModel(Model):
def __init__(self, *args, **kwargs):
super(BaseModel, self).__init__( *args, **kwargs )
self._meta.base_uri = self._meta.db_table
class Meta:
database = database
base_uri = "unknown"
class SystemOption(BaseModel):
key = CharField(max_length=64, unique=True, index=True)
value = CharField(max_length=255)
class ActiveEntity(BaseModel):
uuid = CharField(max_length=64, unique=True, index=True)
last_active = DateTimeField(null=True)
total_packets = IntegerField(default=0)
metadata = JSONField(null=True)
class Meta:
order_by = ('uuid', )
class Detector(ActiveEntity):
pass
class Beacon(ActiveEntity):
is_accepted = IntegerField(default=0)
class Agent(ActiveEntity):
pass
class Signal(BaseModel):
date = DateTimeField(default=datetime.utcnow)
detector = ForeignKeyField(rel_model=Detector)
beacon = ForeignKeyField(rel_model=Beacon)
rssi = FloatField()
source_data = CharField(max_length=255, null=True)
class Training(BaseModel):
date = DateTimeField(default=datetime.utcnow)
beacon = ForeignKeyField(rel_model=Beacon)
expectation = JSONField()
is_used = IntegerField(default=1)
class Meta:
order_by = ('date', 'expectation', 'beacon')
class TrainingSignal(BaseModel):
training = ForeignKeyField(rel_model=Training, related_name='signals')
signal = ForeignKeyField(rel_model=Signal)
def initialize():
database.connect()
database.create_tables([ SystemOption ], safe=True)
database.create_tables([ Detector, Beacon, Agent ], safe=True)
database.create_tables([ Signal ], safe=True)
database.create_tables([ Training, TrainingSignal ], safe=True)
database.close()
| true | true |
f721ae2772712944094b9c2e009ee6bae9dce86c | 827 | py | Python | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | app/main/models/EMI.py | pOrgz-dev/financial-api | edf849cfbcedf74a8b81f70683a1edfbea172fb7 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from .. import db
class EMI_Information(db.Model):
__tablename__ = "EMI_Information"
EMI_Identifier = db.Column(db.String(45),primary_key = True, nullable = False)
ItemName = db.Column(db.String(45), nullable = False)
ProductPrice = db.Column(db.Float, nullable = False)
InterestRate = db.Column(db.Float, nullable = False)
Tenure = db.Column(db.Integer, nullable = False)
MonthlyEMI = db.Column(db.Float, nullable = False)
def __repr__(self):
# return { c.key : getattr(self, c.key) for c in self.__table__.columns }
return f"<{self.EMI_Identifier}(ItemName = {self.ItemName}, ProductPrice = {self.ProductPrice}, Tenure = {self.Tenure}>"
def toDict(self):
return { c.key : getattr(self, c.key) for c in self.__table__.columns } | 41.35 | 128 | 0.666264 |
from .. import db
class EMI_Information(db.Model):
__tablename__ = "EMI_Information"
EMI_Identifier = db.Column(db.String(45),primary_key = True, nullable = False)
ItemName = db.Column(db.String(45), nullable = False)
ProductPrice = db.Column(db.Float, nullable = False)
InterestRate = db.Column(db.Float, nullable = False)
Tenure = db.Column(db.Integer, nullable = False)
MonthlyEMI = db.Column(db.Float, nullable = False)
def __repr__(self):
return f"<{self.EMI_Identifier}(ItemName = {self.ItemName}, ProductPrice = {self.ProductPrice}, Tenure = {self.Tenure}>"
def toDict(self):
return { c.key : getattr(self, c.key) for c in self.__table__.columns } | true | true |
f721aeecd78fde51b1f23b627ac73ea974b16e4f | 5,118 | py | Python | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | 1 | 2021-05-13T07:54:08.000Z | 2021-05-13T07:54:08.000Z | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | null | null | null | draw_tracking_line.py | jiyauppal/face-mask-detector.github.io | 210ce81fa37c441a076fbb8db28376268e634412 | [
"Apache-2.0"
] | null | null | null | import cv2
import datetime
import imutils
import numpy as np
from centroidtracker import CentroidTracker
from collections import defaultdict
protopath = "MobileNetSSD_deploy.prototxt"
modelpath = "MobileNetSSD_deploy.caffemodel"
detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
# Only enable it if you are using OpenVino environment
# detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_INFERENCE_ENGINE)
# detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
def non_max_suppression_fast(boxes, overlapThresh):
try:
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
return boxes[pick].astype("int")
except Exception as e:
print("Exception occurred in non_max_suppression : {}".format(e))
def main():
cap = cv2.VideoCapture('test_video.mp4')
fps_start_time = datetime.datetime.now()
fps = 0
total_frames = 0
centroid_dict = defaultdict(list)
object_id_list = []
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
detector.setInput(blob)
person_detections = detector.forward()
rects = []
for i in np.arange(0, person_detections.shape[2]):
confidence = person_detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(person_detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = person_box.astype("int")
rects.append(person_box)
boundingboxes = np.array(rects)
boundingboxes = boundingboxes.astype(int)
rects = non_max_suppression_fast(boundingboxes, 0.3)
objects = tracker.update(rects)
for (objectId, bbox) in objects.items():
x1, y1, x2, y2 = bbox
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cX = int((x1 + x2) / 2.0)
cY = int((y1 + y2) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 255, 0), -1)
centroid_dict[objectId].append((cX, cY))
if objectId not in object_id_list:
object_id_list.append(objectId)
start_pt = (cX, cY)
end_pt = (cX, cY)
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
else:
l = len(centroid_dict[objectId])
for pt in range(len(centroid_dict[objectId])):
if not pt + 1 == l:
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
text = "ID: {}".format(objectId)
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
fps_end_time = datetime.datetime.now()
time_diff = fps_end_time - fps_start_time
if time_diff.seconds == 0:
fps = 0.0
else:
fps = (total_frames / time_diff.seconds)
fps_text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
cv2.imshow("Application", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
main()
| 33.45098 | 106 | 0.524424 | import cv2
import datetime
import imutils
import numpy as np
from centroidtracker import CentroidTracker
from collections import defaultdict
protopath = "MobileNetSSD_deploy.prototxt"
modelpath = "MobileNetSSD_deploy.caffemodel"
detector = cv2.dnn.readNetFromCaffe(prototxt=protopath, caffeModel=modelpath)
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
tracker = CentroidTracker(maxDisappeared=80, maxDistance=90)
def non_max_suppression_fast(boxes, overlapThresh):
try:
if len(boxes) == 0:
return []
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
pick = []
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap = (w * h) / area[idxs[:last]]
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
return boxes[pick].astype("int")
except Exception as e:
print("Exception occurred in non_max_suppression : {}".format(e))
def main():
cap = cv2.VideoCapture('test_video.mp4')
fps_start_time = datetime.datetime.now()
fps = 0
total_frames = 0
centroid_dict = defaultdict(list)
object_id_list = []
while True:
ret, frame = cap.read()
frame = imutils.resize(frame, width=600)
total_frames = total_frames + 1
(H, W) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)
detector.setInput(blob)
person_detections = detector.forward()
rects = []
for i in np.arange(0, person_detections.shape[2]):
confidence = person_detections[0, 0, i, 2]
if confidence > 0.5:
idx = int(person_detections[0, 0, i, 1])
if CLASSES[idx] != "person":
continue
person_box = person_detections[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = person_box.astype("int")
rects.append(person_box)
boundingboxes = np.array(rects)
boundingboxes = boundingboxes.astype(int)
rects = non_max_suppression_fast(boundingboxes, 0.3)
objects = tracker.update(rects)
for (objectId, bbox) in objects.items():
x1, y1, x2, y2 = bbox
x1 = int(x1)
y1 = int(y1)
x2 = int(x2)
y2 = int(y2)
cX = int((x1 + x2) / 2.0)
cY = int((y1 + y2) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 255, 0), -1)
centroid_dict[objectId].append((cX, cY))
if objectId not in object_id_list:
object_id_list.append(objectId)
start_pt = (cX, cY)
end_pt = (cX, cY)
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
else:
l = len(centroid_dict[objectId])
for pt in range(len(centroid_dict[objectId])):
if not pt + 1 == l:
start_pt = (centroid_dict[objectId][pt][0], centroid_dict[objectId][pt][1])
end_pt = (centroid_dict[objectId][pt + 1][0], centroid_dict[objectId][pt + 1][1])
cv2.line(frame, start_pt, end_pt, (0, 255, 0), 2)
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255), 2)
text = "ID: {}".format(objectId)
cv2.putText(frame, text, (x1, y1 - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
fps_end_time = datetime.datetime.now()
time_diff = fps_end_time - fps_start_time
if time_diff.seconds == 0:
fps = 0.0
else:
fps = (total_frames / time_diff.seconds)
fps_text = "FPS: {:.2f}".format(fps)
cv2.putText(frame, fps_text, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 1)
cv2.imshow("Application", frame)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
main()
| true | true |
f721aef7525b920408840cd454d2a33a4df2714c | 1,953 | py | Python | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 27 | 2018-06-15T15:28:18.000Z | 2022-03-10T12:23:50.000Z | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 22 | 2018-06-14T08:29:16.000Z | 2021-07-05T13:33:44.000Z | setup.py | PyXRD/pyxrd | 26bacdf64f3153fa74b8caa62e219b76d91a55c1 | [
"BSD-2-Clause"
] | 8 | 2019-04-13T13:03:51.000Z | 2021-06-19T09:29:11.000Z | #!/usr/bin/env python3
import os
from setuptools import setup, find_packages
def get_version():
from pyxrd.__version import __version__
if __version__.startswith("v"):
__version__ = __version__.replace("v", "")
return "%s" % __version__
def get_install_requires():
return [
'setuptools',
'numpy>=1.11',
'scipy>=1.1.0',
'matplotlib>=2.2.2',
'Pyro4>=4.41',
'deap>=1.0.1',
'cairocffi',
'pygobject>=3.20'
]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="PyXRD",
version=get_version(),
description="PyXRD is a python implementation of the matrix algorithm developed for the X-ray diffraction analysis of disordered lamellar structures",
long_description=read('README.md'),
keywords="XRD disorder mixed-layers",
author="Mathijs Dumon",
author_email="mathijs.dumon@gmail.com",
url="http://github.org/mathijs-dumon/PyXRD",
license="BSD",
setup_requires=[ "setuptools_git >= 1.2", ],
packages=find_packages(exclude=["test.*", "test", "tests_mvc", "tests_mvc.*"]),
include_package_data=True,
install_requires=get_install_requires(),
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.4",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications :: Gnome",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
],
)
| 31.5 | 154 | 0.622632 |
import os
from setuptools import setup, find_packages
def get_version():
from pyxrd.__version import __version__
if __version__.startswith("v"):
__version__ = __version__.replace("v", "")
return "%s" % __version__
def get_install_requires():
return [
'setuptools',
'numpy>=1.11',
'scipy>=1.1.0',
'matplotlib>=2.2.2',
'Pyro4>=4.41',
'deap>=1.0.1',
'cairocffi',
'pygobject>=3.20'
]
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(
name="PyXRD",
version=get_version(),
description="PyXRD is a python implementation of the matrix algorithm developed for the X-ray diffraction analysis of disordered lamellar structures",
long_description=read('README.md'),
keywords="XRD disorder mixed-layers",
author="Mathijs Dumon",
author_email="mathijs.dumon@gmail.com",
url="http://github.org/mathijs-dumon/PyXRD",
license="BSD",
setup_requires=[ "setuptools_git >= 1.2", ],
packages=find_packages(exclude=["test.*", "test", "tests_mvc", "tests_mvc.*"]),
include_package_data=True,
install_requires=get_install_requires(),
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.4",
"Environment :: Win32 (MS Windows)",
"Environment :: X11 Applications :: Gnome",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Science/Research",
"Topic :: Utilities",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
],
)
| true | true |
f721afa5606a9e63a7128757986d8b2a4eb9a224 | 2,755 | py | Python | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | 3 | 2020-12-12T03:17:13.000Z | 2021-02-21T01:43:29.000Z | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | 5 | 2021-02-03T04:15:03.000Z | 2021-03-17T07:29:14.000Z | scripts/py_scripts/calculate_cluster_average.py | Elenadisa/PhenCo | f320fc286b90ec566afb5edfe3d6d1e3dcc28497 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
##############################################################################################################################################
# METHODS
##############################################################################################################################################
import functions as fn
##############################################################################################################################################
# OPTPARSE
##############################################################################################################################################
import optparse
parser = optparse.OptionParser()
parser.add_option("-c", "--cluster file", dest="dictionary",
help="Input file with the clusters of a network", metavar="FILE")
parser.add_option("-A", "--cluster_id", dest="cluster_id",
help="column which have clusters identificators", type='int')
parser.add_option("-B", "--item_id", dest="item_id",
help="column which have HPO o disease identificators", type='int')
parser.add_option("-m", "--model", dest="model_type",
help="network_type", metavar="str")
parser.add_option("-n", "--model_name", dest="model_name",
help="network_name", metavar="str")
parser.add_option("-e", "--enrichment_type", dest="enrichment",
help="type of enrichment", metavar="str")
parser.add_option("-p", "--p_value", dest="pvalue",
help="pvalue", metavar="float")
(options, args) = parser.parse_args()
###############################################################################################################################################
# MAIN
###############################################################################################################################################
import numpy as np
import os.path as path
#If the principal file exits it makes a dictionary cluster HPO
if path.exists(options.dictionary): #if the dictionary has a length different to 0 append the length of every cluster in the empty list, esle append 0.
dictionary = fn.build_dictionary(options.dictionary, options.cluster_id, options.item_id)
size = [] #empty list
if int(len(dictionary)) != 0:
for cluster_id in dictionary:
size.append(len(dictionary[cluster_id]))
else:
size.append(0)
mean = np.mean(size) #Calculate the mean of the clusters length
else : #If the dictionary has length 0 the mean of clusters size is 0
mean = 0
print(options.model_name + "\t" + options.model_type + "\t" + "Average_Cluster_size_" + options.enrichment + "_" + options.pvalue + "\t" + str(mean))
| 50.090909 | 154 | 0.450091 | true | true | |
f721b00012139ce758efe463a3d3ca112283819e | 1,375 | py | Python | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 8 | 2015-01-29T19:16:40.000Z | 2021-01-08T05:55:03.000Z | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 12 | 2021-01-05T06:46:37.000Z | 2022-03-30T19:06:26.000Z | docs/development/custom-vectors/secp256k1/verify_secp256k1.py | dvaerum/cryptography | 63dfc57fca688d0f8d0515001f249c317d5e54dc | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 5 | 2015-11-06T01:47:01.000Z | 2021-12-01T00:22:52.000Z | from __future__ import absolute_import, print_function
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature,
)
from tests.utils import load_fips_ecdsa_signing_vectors, load_vectors_from_file
CRYPTOGRAPHY_HASH_TYPES = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def verify_one_vector(vector):
digest_algorithm = vector["digest_algorithm"]
message = vector["message"]
x = vector["x"]
y = vector["y"]
signature = encode_dss_signature(vector["r"], vector["s"])
numbers = ec.EllipticCurvePublicNumbers(x, y, ec.SECP256K1())
key = numbers.public_key(default_backend())
verifier = key.verifier(
signature, ec.ECDSA(CRYPTOGRAPHY_HASH_TYPES[digest_algorithm]())
)
verifier.update(message)
verifier.verify()
def verify_vectors(vectors):
for vector in vectors:
verify_one_vector(vector)
vector_path = os.path.join("asymmetric", "ECDSA", "SECP256K1", "SigGen.txt")
secp256k1_vectors = load_vectors_from_file(
vector_path, load_fips_ecdsa_signing_vectors
)
verify_vectors(secp256k1_vectors)
| 25.943396 | 79 | 0.744 | from __future__ import absolute_import, print_function
import os
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.asymmetric.utils import (
encode_dss_signature,
)
from tests.utils import load_fips_ecdsa_signing_vectors, load_vectors_from_file
CRYPTOGRAPHY_HASH_TYPES = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def verify_one_vector(vector):
digest_algorithm = vector["digest_algorithm"]
message = vector["message"]
x = vector["x"]
y = vector["y"]
signature = encode_dss_signature(vector["r"], vector["s"])
numbers = ec.EllipticCurvePublicNumbers(x, y, ec.SECP256K1())
key = numbers.public_key(default_backend())
verifier = key.verifier(
signature, ec.ECDSA(CRYPTOGRAPHY_HASH_TYPES[digest_algorithm]())
)
verifier.update(message)
verifier.verify()
def verify_vectors(vectors):
for vector in vectors:
verify_one_vector(vector)
vector_path = os.path.join("asymmetric", "ECDSA", "SECP256K1", "SigGen.txt")
secp256k1_vectors = load_vectors_from_file(
vector_path, load_fips_ecdsa_signing_vectors
)
verify_vectors(secp256k1_vectors)
| true | true |
f721b000aa08bdf2f6fa4ebe1f323827ac57b123 | 217 | py | Python | 3rdParty/V8/v5.7.0.0/tools/foozzie/testdata/test_d8_1.py | jjzhang166/avocadodb | 948d94592c10731857c8617b133bda840b8e833e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | null | null | null | 3rdParty/V8/v5.7.0.0/tools/foozzie/testdata/test_d8_1.py | jjzhang166/avocadodb | 948d94592c10731857c8617b133bda840b8e833e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | null | null | null | 3rdParty/V8/v5.7.0.0/tools/foozzie/testdata/test_d8_1.py | jjzhang166/avocadodb | 948d94592c10731857c8617b133bda840b8e833e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | null | null | null | # Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
print """
1
2
weird error
^
3
unknown
"""
| 16.692308 | 72 | 0.695853 |
print """
1
2
weird error
^
3
unknown
"""
| false | true |
f721b0aaa3a21ebd95d28ba898211ca8c479b10e | 4,747 | py | Python | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | mlprodict/onnx_tools/optim/onnx_optimisation_identity.py | henrywu2019/mlprodict | 4c09dc39d5ba7a7235fa321d80c81b5bf4f078ad | [
"MIT"
] | null | null | null | """
@file
@brief Optimisation of :epkg:`ONNX` graphs.
"""
from onnx.helper import make_graph
from ._onnx_optimisation_common import ( # pylint: disable=E0611
_rename_node_input,
_rename_node_output,
_apply_optimisation_on_graph,
_apply_remove_node_fct_node
)
def onnx_remove_node_identity(onnx_model, recursive=True, debug_info=None, **options):
"""
Removes as many *Identity* nodes as possible.
The function looks into every node and subgraphs if
*recursive* is True for identity node. Unless such a
node directy connects one input to one output, it will
be removed and every other node gets its inputs or
outputs accordingly renamed.
@param onnx_model onnx model
@param recursive looks into subgraphs
@param debug_info debug information (private)
@param options additional options (unused)
@return new onnx _model
"""
if debug_info is None:
debug_info = [str(type(onnx_model)).rsplit(
'.', maxsplit=1)[-1].strip("'>")]
else:
debug_info = (debug_info +
[str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip("'>")])
if hasattr(onnx_model, 'graph'):
return _apply_optimisation_on_graph(
onnx_remove_node_identity, onnx_model,
recursive=recursive, debug_info=debug_info, **options)
graph = onnx_model
inputs = set(i.name for i in graph.input)
outputs = set(o.name for o in graph.output)
def retrieve_idnodes(graph, existing_nodes):
idnodes = []
for i, exnode in enumerate(existing_nodes):
if exnode is None:
continue
if exnode.op_type == 'Identity':
input = exnode.input[0]
output = exnode.output[0]
idnodes.append((i, exnode, input, output))
return idnodes
nodes = list(graph.node)
rem = 1
while rem > 0:
rem = 0
idnodes = retrieve_idnodes(graph, nodes)
restart = False
for i, _, inp, out in idnodes:
if restart:
break # pragma: no cover
if nodes[i] is None:
# Already removed.
continue # pragma: no cover
if inp in inputs and out in outputs:
# Cannot be removed.
continue
if not restart and out not in outputs:
# We cannot change an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if out in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], out, inp)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
nodes[i] = None
rem += 1
continue
if not restart and inp not in inputs and inp not in outputs:
# We cannot change an input name or an output name.
for j in range(len(nodes)): # pylint: disable=C0200
if nodes[j] is None:
continue
if inp in nodes[j].output:
nodes[j] = _rename_node_output(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True # pragma: no cover
if inp in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
if recursive:
# Handles subgraphs.
for i in range(len(nodes)): # pylint: disable=C0200
node = nodes[i]
if node is None or not (node.attribute): # pylint: disable=C0325
continue
nodes[i] = _apply_remove_node_fct_node(
onnx_remove_node_identity,
node, recursive=True, debug_info=debug_info + [node.name])
# Finally create the new graph.
nodes = list(filter(lambda n: n is not None, nodes))
graph = make_graph(nodes, onnx_model.name,
onnx_model.input, onnx_model.output,
onnx_model.initializer)
graph.value_info.extend(onnx_model.value_info) # pylint: disable=E1101
return graph
| 39.231405 | 87 | 0.52391 | from onnx.helper import make_graph
from ._onnx_optimisation_common import (
_rename_node_input,
_rename_node_output,
_apply_optimisation_on_graph,
_apply_remove_node_fct_node
)
def onnx_remove_node_identity(onnx_model, recursive=True, debug_info=None, **options):
if debug_info is None:
debug_info = [str(type(onnx_model)).rsplit(
'.', maxsplit=1)[-1].strip("'>")]
else:
debug_info = (debug_info +
[str(type(onnx_model)).rsplit('.', maxsplit=1)[-1].strip("'>")])
if hasattr(onnx_model, 'graph'):
return _apply_optimisation_on_graph(
onnx_remove_node_identity, onnx_model,
recursive=recursive, debug_info=debug_info, **options)
graph = onnx_model
inputs = set(i.name for i in graph.input)
outputs = set(o.name for o in graph.output)
def retrieve_idnodes(graph, existing_nodes):
idnodes = []
for i, exnode in enumerate(existing_nodes):
if exnode is None:
continue
if exnode.op_type == 'Identity':
input = exnode.input[0]
output = exnode.output[0]
idnodes.append((i, exnode, input, output))
return idnodes
nodes = list(graph.node)
rem = 1
while rem > 0:
rem = 0
idnodes = retrieve_idnodes(graph, nodes)
restart = False
for i, _, inp, out in idnodes:
if restart:
break
if nodes[i] is None:
continue
if inp in inputs and out in outputs:
continue
if not restart and out not in outputs:
for j in range(len(nodes)):
if nodes[j] is None:
continue
if out in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], out, inp)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
continue
if not restart and inp not in inputs and inp not in outputs:
for j in range(len(nodes)):
if nodes[j] is None:
continue
if inp in nodes[j].output:
nodes[j] = _rename_node_output(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
if inp in nodes[j].input:
nodes[j] = _rename_node_input(nodes[j], inp, out)
rem += 1
if nodes[j].op_type == 'Identity':
restart = True
nodes[i] = None
rem += 1
if recursive:
for i in range(len(nodes)):
node = nodes[i]
if node is None or not (node.attribute):
continue
nodes[i] = _apply_remove_node_fct_node(
onnx_remove_node_identity,
node, recursive=True, debug_info=debug_info + [node.name])
nodes = list(filter(lambda n: n is not None, nodes))
graph = make_graph(nodes, onnx_model.name,
onnx_model.input, onnx_model.output,
onnx_model.initializer)
graph.value_info.extend(onnx_model.value_info)
return graph
| true | true |
f721b154eb6f80cea86ed321cc3199bcce85024f | 300 | py | Python | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | 01-code-scripts/example.py | calekochenour/python-formatter-env | 9cc0b484e9b8b8d17a8abe5d2f9f49af953a7790 | [
"BSD-3-Clause"
] | null | null | null | def example_function(first_parameter, second_parameter, third_parameter, fourth_parameter, fifth_parameter):
"""Example function to test the code formatter."""
parameter_sum = first_parameter + second_parameter + third_parameter + fourth_parameter + fifth_parameter
return parameter_sum
| 50 | 109 | 0.806667 | def example_function(first_parameter, second_parameter, third_parameter, fourth_parameter, fifth_parameter):
parameter_sum = first_parameter + second_parameter + third_parameter + fourth_parameter + fifth_parameter
return parameter_sum
| true | true |
f721b168bc3ebd2c6a8be74cae0fb14973d58fc0 | 4,618 | py | Python | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 3 | 2015-08-19T12:50:05.000Z | 2017-10-25T00:58:05.000Z | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 169 | 2015-08-03T11:25:49.000Z | 2022-02-10T08:06:20.000Z | examples/orcid_app.py | jennur/invenio-oauthclient | 9b8bd7bc8bcbbe178aad3f0f8a2e620749c9980b | [
"MIT"
] | 73 | 2015-08-03T15:16:05.000Z | 2022-03-07T15:34:36.000Z | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development with orcid handler.
SPHINX-START
1. Register an orcid application with `Authorization callback URL` as
`http://localhost:5000/oauth/authorized/orcid/`
2. Install oauthclient:
.. code-block:: console
cdvirtualenv src/invenio-oauthclient
pip install -e .[orcid]
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration as `consumer_key` and
`consumer_secret`.
.. code-block:: console
$ export ORCID_APP_CREDENTIALS_KEY=my_orcid_client_id
$ export ORCID_APP_CREDENTIALS_SECRET=my_orcid_client_secret
4. Create database and tables:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ export FLASK_APP=orcid_app.py
$ ./app-setup.sh
You can find the database in `examples/orcid_app.db`.
5. Run the development server:
.. code-block:: console
$ flask -a orcid_app.py run -p 5000 -h '0.0.0.0'
6. Open in a browser the page `http://0.0.0.0:5000/orcid`.
You will be redirected to orcid to authorize the application.
Click on `Authorize application` and you will be redirected back to
`http://0.0.0.0:5000/oauth/authorized/orcid/`, where you will be able to
finalize the local user registration, inserting email address.
Insert e.g. `fuu@bar.it` as email address and send the form.
Now, you will be again in homepage but this time it say: `hello fuu@bar.it`.
You have completed the user registration.
7. To be able to uninstall the example app:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail as Mail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import \
blueprint_api_init as blueprint_userprofile_api_init
from invenio_userprofiles.views import \
blueprint_ui_init as blueprint_userprofile_ui_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import orcid
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug # noqa isort:skip
monkey_patch_werkzeug() # noqa isort:skip
from flask_oauthlib.client import OAuth as FlaskOAuth # noqa isort:skip
# [ Configure application credentials ]
ORCID_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('ORCID_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('ORCID_APP_CREDENTIALS_SECRET'),
)
# Create Flask application
app = Flask(__name__)
app.config.update(
SQLALCHEMY_ECHO=False,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///orcid_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
orcid=orcid.REMOTE_SANDBOX_APP,
),
ORCID_APP_CREDENTIALS=ORCID_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_api_init)
app.register_blueprint(blueprint_userprofile_ui_init)
@app.route('/')
def index():
"""Homepage."""
return 'Home page (without any restrictions)'
@app.route('/orcid')
def orcid():
"""Try to print user email or redirect to login with orcid."""
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='orcid'))
return 'hello {}'.format(current_user.email)
| 28.8625 | 80 | 0.750325 |
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail as Mail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import \
blueprint_api_init as blueprint_userprofile_api_init
from invenio_userprofiles.views import \
blueprint_ui_init as blueprint_userprofile_ui_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import orcid
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug
monkey_patch_werkzeug()
from flask_oauthlib.client import OAuth as FlaskOAuth
ORCID_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('ORCID_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('ORCID_APP_CREDENTIALS_SECRET'),
)
app = Flask(__name__)
app.config.update(
SQLALCHEMY_ECHO=False,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///orcid_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
orcid=orcid.REMOTE_SANDBOX_APP,
),
ORCID_APP_CREDENTIALS=ORCID_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_api_init)
app.register_blueprint(blueprint_userprofile_ui_init)
@app.route('/')
def index():
return 'Home page (without any restrictions)'
@app.route('/orcid')
def orcid():
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='orcid'))
return 'hello {}'.format(current_user.email)
| true | true |
f721b1ff207ea23d5cdd699f29d320911240c621 | 743 | py | Python | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 2 | 2021-12-02T07:15:24.000Z | 2021-12-15T06:27:53.000Z | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 1 | 2021-11-05T12:42:12.000Z | 2021-11-05T12:42:12.000Z | notes/migrations/0001_initial.py | chalikavanyaa/stu-do-list | b6af2f1072936240a59f1b63cc7fc32999132da4 | [
"Unlicense"
] | 6 | 2021-10-30T13:44:16.000Z | 2021-12-29T09:14:18.000Z | # Generated by Django 3.2.7 on 2021-11-04 20:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NotesModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Penulis', models.CharField(max_length=150)),
('Matkul', models.CharField(max_length=150)),
('Topik', models.CharField(max_length=150)),
('Keterangan', models.TextField()),
('Link', models.URLField()),
],
),
]
| 27.518519 | 118 | 0.537012 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NotesModel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Penulis', models.CharField(max_length=150)),
('Matkul', models.CharField(max_length=150)),
('Topik', models.CharField(max_length=150)),
('Keterangan', models.TextField()),
('Link', models.URLField()),
],
),
]
| true | true |
f721b33e8da5aa1935c59645b23cb35201dfccdd | 340 | py | Python | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | 1 | 2019-10-13T13:43:18.000Z | 2019-10-13T13:43:18.000Z | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | null | null | null | Hackerrank/swap-case.py | sourav1122/Hacktoberfest | 3e3a6e1a537632b1f2b7af3b3b69c8696355047c | [
"MIT"
] | null | null | null | #!/bin/python3
# Swaps case of all chars in provided string
def swap_case(s):
formattedStr = "".join(map(swapChar, s))
return formattedStr
def swapChar(char):
if char.islower():
return char.upper()
else:
return char.lower()
n=input()
if len(n)==1:
print(swapChar(n))
else:
print(swap_case(n))
| 17.894737 | 44 | 0.623529 |
def swap_case(s):
formattedStr = "".join(map(swapChar, s))
return formattedStr
def swapChar(char):
if char.islower():
return char.upper()
else:
return char.lower()
n=input()
if len(n)==1:
print(swapChar(n))
else:
print(swap_case(n))
| true | true |
f721b3f846fa3924e1f8ff5e8b545d82d1f3e494 | 205 | py | Python | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | 3 | 2020-11-25T19:05:31.000Z | 2021-03-29T07:29:36.000Z | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | 1072.py | FahimFBA/URI-Problem-Solve | d718a95e5a873dffbce19d850998e8917ec87ebb | [
"Apache-2.0"
] | null | null | null | qte = int(input())
sim = 0
nao = 0
for i in range(qte):
valor = int(input())
if(valor >= 10 and valor <= 20):
sim += 1
else:
nao += 1
print("%d in" %sim)
print("%d out" %nao) | 14.642857 | 36 | 0.487805 | qte = int(input())
sim = 0
nao = 0
for i in range(qte):
valor = int(input())
if(valor >= 10 and valor <= 20):
sim += 1
else:
nao += 1
print("%d in" %sim)
print("%d out" %nao) | true | true |
f721b4abc95f52800b933cdfce1558f764e48a65 | 1,087 | py | Python | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | utils/fonts_scanner.py | sunnywalden/oss_management | 4d417801ba0c55493788b356921c4e3ea462a851 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: sunnywalden@gmail.com
import os
from utils.get_logger import Log
def get_fonts_from_local():
log = Log()
logger = log.logger_generate('font_scanner')
# fonts_lists = []
for root, dirs, files in os.walk('../fonts'):
logger.info('File found %s, dirs: %s' % (files, dirs))
for file in files:
logger.info('File found %s' % file)
fonts_file_path = os.path.join(root, file)
if os.path.splitext(file)[1] == '.ttf' or os.path.splitext(file)[1] == '.otf':
# fonts_lists.append(os.path.join(root, file))
logger.info('Fonts file found: %s' % fonts_file_path)
yield fonts_file_path
else:
logger.info('Files which is not a fonts be ignored: %s' % file)
# logger.info('Fonts gonna to be uploaded are: %s' % fonts_lists)
# return fonts_lists
if __name__ == '__main__':
get_fonts_files = get_fonts_from_local()
for fonts_file in iter(get_fonts_files):
print(fonts_file)
| 29.378378 | 90 | 0.601656 |
import os
from utils.get_logger import Log
def get_fonts_from_local():
log = Log()
logger = log.logger_generate('font_scanner')
for root, dirs, files in os.walk('../fonts'):
logger.info('File found %s, dirs: %s' % (files, dirs))
for file in files:
logger.info('File found %s' % file)
fonts_file_path = os.path.join(root, file)
if os.path.splitext(file)[1] == '.ttf' or os.path.splitext(file)[1] == '.otf':
logger.info('Fonts file found: %s' % fonts_file_path)
yield fonts_file_path
else:
logger.info('Files which is not a fonts be ignored: %s' % file)
if __name__ == '__main__':
get_fonts_files = get_fonts_from_local()
for fonts_file in iter(get_fonts_files):
print(fonts_file)
| true | true |
f721b4f5eb357708bf5747da4008cd53e3881f89 | 1,359 | py | Python | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | 1 | 2020-10-10T23:23:06.000Z | 2020-10-10T23:23:06.000Z | pyblas/level1/scnrm2.py | timleslie/pyblas | 9109f2cc24e674cf59a3b39f95c2d7b8116ae884 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from ..util import slice_
def scnrm2(N, X, INCX):
"""Computes the Euclidean norm of the vector x
Parameters
----------
N : int
Number of elements in input vector
X : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `X`
Returns
-------
numpy.single
See Also
--------
snrm2 : Single-precision real euclidean norm
dnrm2 : Double-precision real euclidean norm
dznrm2 : Double-precision complex euclidean norm
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/scnrm2.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/scnrm2.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> print(scnrm2(N, x, incx)
6.5574384
"""
if N <= 0:
return 0
# Note: This implementaiton suffers from potential overflow errors for large vector values.
# More sophisticated implementations can avoid this with appropriate scaling applied before
# taking the square of large values.
return np.sqrt((X[slice_(N, INCX)].conj() * X[slice_(N, INCX)]).sum().real)
| 29.543478 | 113 | 0.636497 | import numpy as np
from ..util import slice_
def scnrm2(N, X, INCX):
if N <= 0:
return 0
return np.sqrt((X[slice_(N, INCX)].conj() * X[slice_(N, INCX)]).sum().real)
| true | true |
f721b541788468f6224ba9b4f3e9d2a8b01d2637 | 3,999 | py | Python | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | tests/manage/monitoring/prometheus/test_deployment_status.py | shivamdurgbuns/ocs-ci | 0fa3a19cab39dcc76843338e4af357c197c08843 | [
"MIT"
] | null | null | null | import logging
import pytest
from ocs_ci.framework.testlib import tier4, tier4a
from ocs_ci.ocs import constants
from ocs_ci.utility import prometheus
from ocs_ci.ocs.ocp import OCP
log = logging.getLogger(__name__)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-1052")
def test_ceph_manager_stopped(measure_stop_ceph_mgr):
"""
Test that there is appropriate alert when ceph manager
is unavailable and that this alert is cleared when the manager
is back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_mgr.get("prometheus_alerts")
target_label = constants.ALERT_MGRISABSENT
target_msg = "Storage metrics collector service not available anymore."
states = ["pending", "firing"]
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=states,
severity="critical",
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mgr.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-904")
def test_ceph_monitor_stopped(measure_stop_ceph_mon):
"""
Test that there is appropriate alert related to ceph monitor quorum
when there is even number of ceph monitors and that this alert
is cleared when monitors are back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_mon.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity in [
(
constants.ALERT_MONQUORUMATRISK,
"Storage quorum at risk",
["pending"],
"error",
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending"],
"warning",
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mon.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-900")
def test_ceph_osd_stopped(measure_stop_ceph_osd):
"""
Test that there is appropriate alert related to situation when ceph osd
is down. Alert is cleared when osd disk is back online.
"""
api = prometheus.PrometheusAPI()
# get alerts from time when manager deployment was scaled down
alerts = measure_stop_ceph_osd.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity, ignore in [
(
constants.ALERT_OSDDISKNOTRESPONDING,
"Disk not responding",
["pending", "firing"],
"error",
False,
),
(
constants.ALERT_DATARECOVERYTAKINGTOOLONG,
"Data recovery is slow",
["pending"],
"warning",
True,
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending", "firing"],
"warning",
False,
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
ignore_more_occurences=ignore,
)
# the time to wait is increased because it takes more time for osd pod
# to be ready than for other pods
osd_up_wait = 360
api.check_alert_cleared(
label=target_label,
measure_end_time=measure_stop_ceph_osd.get("stop"),
time_min=osd_up_wait,
)
def teardown_module():
ocs_obj = OCP()
ocs_obj.login_as_sa()
| 29.189781 | 82 | 0.632908 | import logging
import pytest
from ocs_ci.framework.testlib import tier4, tier4a
from ocs_ci.ocs import constants
from ocs_ci.utility import prometheus
from ocs_ci.ocs.ocp import OCP
log = logging.getLogger(__name__)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-1052")
def test_ceph_manager_stopped(measure_stop_ceph_mgr):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_mgr.get("prometheus_alerts")
target_label = constants.ALERT_MGRISABSENT
target_msg = "Storage metrics collector service not available anymore."
states = ["pending", "firing"]
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=states,
severity="critical",
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mgr.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-904")
def test_ceph_monitor_stopped(measure_stop_ceph_mon):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_mon.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity in [
(
constants.ALERT_MONQUORUMATRISK,
"Storage quorum at risk",
["pending"],
"error",
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending"],
"warning",
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
)
api.check_alert_cleared(
label=target_label, measure_end_time=measure_stop_ceph_mon.get("stop")
)
@tier4
@tier4a
@pytest.mark.polarion_id("OCS-900")
def test_ceph_osd_stopped(measure_stop_ceph_osd):
api = prometheus.PrometheusAPI()
alerts = measure_stop_ceph_osd.get("prometheus_alerts")
for target_label, target_msg, target_states, target_severity, ignore in [
(
constants.ALERT_OSDDISKNOTRESPONDING,
"Disk not responding",
["pending", "firing"],
"error",
False,
),
(
constants.ALERT_DATARECOVERYTAKINGTOOLONG,
"Data recovery is slow",
["pending"],
"warning",
True,
),
(
constants.ALERT_CLUSTERWARNINGSTATE,
"Storage cluster is in degraded state",
["pending", "firing"],
"warning",
False,
),
]:
prometheus.check_alert_list(
label=target_label,
msg=target_msg,
alerts=alerts,
states=target_states,
severity=target_severity,
ignore_more_occurences=ignore,
)
osd_up_wait = 360
api.check_alert_cleared(
label=target_label,
measure_end_time=measure_stop_ceph_osd.get("stop"),
time_min=osd_up_wait,
)
def teardown_module():
ocs_obj = OCP()
ocs_obj.login_as_sa()
| true | true |
f721b63438fa70ee2bdce28ac774d92f0929d8a6 | 5,826 | py | Python | openapi_to_fastapi/tests/test_ihan_standards.py | tbikeev/openapi-to-fastapi | 46cacb41fde2d178afd58466fb6080d79fef1b22 | [
"BSD-3-Clause"
] | null | null | null | openapi_to_fastapi/tests/test_ihan_standards.py | tbikeev/openapi-to-fastapi | 46cacb41fde2d178afd58466fb6080d79fef1b22 | [
"BSD-3-Clause"
] | null | null | null | openapi_to_fastapi/tests/test_ihan_standards.py | tbikeev/openapi-to-fastapi | 46cacb41fde2d178afd58466fb6080d79fef1b22 | [
"BSD-3-Clause"
] | null | null | null | import json
from copy import deepcopy
from pathlib import Path
import pytest
from ..routes import SpecRouter
from ..validator import InvalidJSON, UnsupportedVersion
from ..validator import ihan_standards as ihan
# Note: It's easier to get some 100% valid spec and corrupt it
# instead of having multiple incorrect specs in the repo
SPECS_ROOT_DIR = Path(__file__).absolute().parent / "data"
COMPANY_BASIC_INFO: dict = json.loads(
(SPECS_ROOT_DIR / "ihan" / "CompanyBasicInfo.json").read_text()
)
def check_validation_error(tmp_path, spec: dict, exception):
spec_path = tmp_path / "spec.json"
spec_path.write_text(json.dumps(spec))
with pytest.raises(exception):
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
@pytest.mark.parametrize("method", ["get", "put", "delete"])
def test_standards_has_non_post_method(method, tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/Company/BasicInfo"][method] = {
"description": "Method which should not exist"
}
check_validation_error(tmp_path, spec, ihan.OnlyPostMethodAllowed)
def test_post_method_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]
check_validation_error(tmp_path, spec, ihan.PostMethodIsMissing)
def test_many_endpoints(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/pets"] = {"post": {"description": "Pet store, why not?"}}
check_validation_error(tmp_path, spec, ihan.OnlyOneEndpointAllowed)
def test_no_endpoints(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]
check_validation_error(tmp_path, spec, ihan.NoEndpointsDefined)
def test_missing_field_body_is_fine(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
spec_path = tmp_path / "spec.json"
spec_path.write_text(json.dumps(spec))
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
def test_missing_200_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
check_validation_error(tmp_path, spec, ihan.ResponseBodyMissing)
def test_wrong_content_type_of_request_body(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
request_body = spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
schema = deepcopy(request_body["content"]["application/json"])
request_body["content"]["text/plan"] = schema
del request_body["content"]["application/json"]
check_validation_error(tmp_path, spec, ihan.WrongContentType)
def test_wrong_content_type_of_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
response = spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
schema = deepcopy(response["content"]["application/json"])
response["content"]["text/plan"] = schema
del response["content"]["application/json"]
check_validation_error(tmp_path, spec, ihan.WrongContentType)
def test_component_schema_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["components"]["schemas"]
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
@pytest.mark.parametrize(
"model_name", ["BasicCompanyInfoRequest", "BasicCompanyInfoResponse"]
)
def test_component_is_missing(model_name, tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["components"]["schemas"][model_name]
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_non_existing_component_defined_in_body(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
body = spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
body["content"]["application/json"]["schema"]["$ref"] += "blah"
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_non_existing_component_defined_in_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
resp_200 = spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
resp_200["content"]["application/json"]["schema"]["$ref"] += "blah"
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_auth_header_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
x_app_provider_header = {
"schema": {"type": "string"},
"in": "header",
"name": "X-Authorization-Provider",
"description": "Provider domain",
}
spec["paths"]["/Company/BasicInfo"]["post"]["parameters"] = [x_app_provider_header]
check_validation_error(tmp_path, spec, ihan.AuthorizationHeaderMissing)
def test_auth_provider_header_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
auth_header = {
"schema": {"type": "string"},
"in": "header",
"name": "Authorization",
"description": "User bearer token",
}
spec["paths"]["/Company/BasicInfo"]["post"]["parameters"] = [auth_header]
check_validation_error(tmp_path, spec, ihan.AuthProviderHeaderMissing)
def test_servers_are_defined(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["servers"] = [{"url": "http://example.com"}]
check_validation_error(tmp_path, spec, ihan.ServersShouldNotBeDefined)
def test_security_is_defined(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/Company/BasicInfo"]["post"]["security"] = {}
check_validation_error(tmp_path, spec, ihan.SecurityShouldNotBeDefined)
def test_loading_non_json_file(tmp_path):
spec_path = tmp_path / "spec.json"
spec_path.write_text("weirdo content")
with pytest.raises(InvalidJSON):
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
def test_loading_unsupported_version(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["openapi"] = "999.999.999"
check_validation_error(tmp_path, spec, UnsupportedVersion)
| 35.962963 | 87 | 0.730862 | import json
from copy import deepcopy
from pathlib import Path
import pytest
from ..routes import SpecRouter
from ..validator import InvalidJSON, UnsupportedVersion
from ..validator import ihan_standards as ihan
# instead of having multiple incorrect specs in the repo
SPECS_ROOT_DIR = Path(__file__).absolute().parent / "data"
COMPANY_BASIC_INFO: dict = json.loads(
(SPECS_ROOT_DIR / "ihan" / "CompanyBasicInfo.json").read_text()
)
def check_validation_error(tmp_path, spec: dict, exception):
spec_path = tmp_path / "spec.json"
spec_path.write_text(json.dumps(spec))
with pytest.raises(exception):
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
@pytest.mark.parametrize("method", ["get", "put", "delete"])
def test_standards_has_non_post_method(method, tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/Company/BasicInfo"][method] = {
"description": "Method which should not exist"
}
check_validation_error(tmp_path, spec, ihan.OnlyPostMethodAllowed)
def test_post_method_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]
check_validation_error(tmp_path, spec, ihan.PostMethodIsMissing)
def test_many_endpoints(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/pets"] = {"post": {"description": "Pet store, why not?"}}
check_validation_error(tmp_path, spec, ihan.OnlyOneEndpointAllowed)
def test_no_endpoints(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]
check_validation_error(tmp_path, spec, ihan.NoEndpointsDefined)
def test_missing_field_body_is_fine(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
spec_path = tmp_path / "spec.json"
spec_path.write_text(json.dumps(spec))
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
def test_missing_200_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
check_validation_error(tmp_path, spec, ihan.ResponseBodyMissing)
def test_wrong_content_type_of_request_body(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
request_body = spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
schema = deepcopy(request_body["content"]["application/json"])
request_body["content"]["text/plan"] = schema
del request_body["content"]["application/json"]
check_validation_error(tmp_path, spec, ihan.WrongContentType)
def test_wrong_content_type_of_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
response = spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
schema = deepcopy(response["content"]["application/json"])
response["content"]["text/plan"] = schema
del response["content"]["application/json"]
check_validation_error(tmp_path, spec, ihan.WrongContentType)
def test_component_schema_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["components"]["schemas"]
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
@pytest.mark.parametrize(
"model_name", ["BasicCompanyInfoRequest", "BasicCompanyInfoResponse"]
)
def test_component_is_missing(model_name, tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
del spec["components"]["schemas"][model_name]
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_non_existing_component_defined_in_body(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
body = spec["paths"]["/Company/BasicInfo"]["post"]["requestBody"]
body["content"]["application/json"]["schema"]["$ref"] += "blah"
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_non_existing_component_defined_in_response(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
resp_200 = spec["paths"]["/Company/BasicInfo"]["post"]["responses"]["200"]
resp_200["content"]["application/json"]["schema"]["$ref"] += "blah"
check_validation_error(tmp_path, spec, ihan.SchemaMissing)
def test_auth_header_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
x_app_provider_header = {
"schema": {"type": "string"},
"in": "header",
"name": "X-Authorization-Provider",
"description": "Provider domain",
}
spec["paths"]["/Company/BasicInfo"]["post"]["parameters"] = [x_app_provider_header]
check_validation_error(tmp_path, spec, ihan.AuthorizationHeaderMissing)
def test_auth_provider_header_is_missing(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
auth_header = {
"schema": {"type": "string"},
"in": "header",
"name": "Authorization",
"description": "User bearer token",
}
spec["paths"]["/Company/BasicInfo"]["post"]["parameters"] = [auth_header]
check_validation_error(tmp_path, spec, ihan.AuthProviderHeaderMissing)
def test_servers_are_defined(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["servers"] = [{"url": "http://example.com"}]
check_validation_error(tmp_path, spec, ihan.ServersShouldNotBeDefined)
def test_security_is_defined(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["paths"]["/Company/BasicInfo"]["post"]["security"] = {}
check_validation_error(tmp_path, spec, ihan.SecurityShouldNotBeDefined)
def test_loading_non_json_file(tmp_path):
spec_path = tmp_path / "spec.json"
spec_path.write_text("weirdo content")
with pytest.raises(InvalidJSON):
SpecRouter(spec_path, [ihan.IhanStandardsValidator])
def test_loading_unsupported_version(tmp_path):
spec = deepcopy(COMPANY_BASIC_INFO)
spec["openapi"] = "999.999.999"
check_validation_error(tmp_path, spec, UnsupportedVersion)
| true | true |
f721b681480bbcf350254eceb8ac0f83efa3bb75 | 147,382 | py | Python | scipy/stats/_distn_infrastructure.py | sntgl/scipy | 6660830eb7d7590d56f1377d27bf7ee97bb3adec | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/_distn_infrastructure.py | sntgl/scipy | 6660830eb7d7590d56f1377d27bf7ee97bb3adec | [
"BSD-3-Clause"
] | null | null | null | scipy/stats/_distn_infrastructure.py | sntgl/scipy | 6660830eb7d7590d56f1377d27bf7ee97bb3adec | [
"BSD-3-Clause"
] | null | null | null | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from collections import namedtuple
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(order, %(shapes)s, loc=0, scale=1)
Non-central moment of the specified order.
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(confidence, %(shapes)s, loc=0, scale=1)
Confidence interval with equal areas around the median.
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, order=None, **kwds):
return self.dist.moment(order, *self.args, **self.kwds, **kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def interval(self, confidence=None, **kwds):
return self.dist.interval(confidence, *self.args, **self.kwds, **kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
class rv_discrete_frozen(rv_frozen):
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k): # No error
return self.dist.logpmf(k, *self.args, **self.kwds)
class rv_continuous_frozen(rv_frozen):
def pdf(self, x):
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# broadcast arrays with cond
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
if isinstance(self, rv_continuous):
return rv_continuous_frozen(self, *args, **kwds)
else:
return rv_discrete_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions; see the "
"distribution documentation for other restrictions.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(np.int64)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, order=None, *args, **kwds):
"""non-central moment of distribution of specified order.
.. deprecated:: 1.9.0
Parameter `n` is replaced by parameter `order` to avoid name
collisions with the shape parameter `n` of several distributions.
Parameter `n` will be removed in SciPy 1.11.0.
Parameters
----------
order : int, order >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
# This function was originally written with parameter `n`, but `n`
# is also the name of many distribution shape parameters.
# This block allows the function to accept both `n` and its
# replacement `order` during a deprecation period; it can be removed
# in the second release after 1.9.0.
# The logic to provide a DeprecationWarning only when `n` is passed
# as a keyword, accept the new keyword `order`, and otherwise be
# backward-compatible deserves explanation. We need to look out for
# the following:
# * Does the distribution have a shape named `n`?
# * Is `order` provided? It doesn't matter whether it is provided as a
# positional or keyword argument; it will be used as the order of the
# moment rather than a distribution shape parameter because:
# - The first positional argument of `moment` has always been the
# order of the moment.
# - The keyword `order` is new, so it's unambiguous that it refers to
# the order of the moment.
# * Is `n` provided as a keyword argument? It _does_ matter whether it
# is provided as a positional or keyword argument.
# - The first positional argument of `moment` has always been the
# order of moment, but
# - if `n` is provided as a keyword argument, its meaning depends
# on whether the distribution accepts `n` as a shape parameter.
has_shape_n = (self.shapes is not None
and "n" in (self.shapes.split(", ")))
got_order = order is not None
got_keyword_n = kwds.get("n", None) is not None
# These lead to the following cases.
# Case A: If the distribution _does_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this is now OK:
# it is unambiguous that `order` is the order of the moment and `n`
# is the shape parameter. Previously, this would have caused an
# error because `n` was provided both as a keyword argument and
# as the first positional argument. I don't think it is credible for
# users to rely on this error in their code, though, so I don't see
# this as a backward compatibility break.
# 2. If only `n` is provided (as a keyword argument), this would have
# been an error in the past because `n` would have been treated as
# the order of the moment while the shape parameter would be
# missing. It is still the same type of error, but for a different
# reason: now, `n` is treated as the shape parameter while the
# order of the moment is missing.
# 3. If only `order` is provided, no special treament is needed.
# Clearly this value is intended to be the order of the moment,
# and the rest of the function will determine whether `n` is
# available as a shape parameter in `args`.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided) and it is still an
# error for the same reason.
# Case B: the distribution does _not_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this was an error, and it
# still is an error: two values for same parameter.
# 2. If only `n` is provided (as a keyword argument), this was OK and
# is still OK, but there shold now be a `DeprecationWarning`. The
# value of `n` should be removed from `kwds` and stored in `order`.
# 3. If only `order` is provided, there was no problem before providing
# only the first argument of `moment`, and there is no problem with
# that now.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided), and it is still an
# error for the same reason.
if not got_order and ((not got_keyword_n) # A4 and B4
or (got_keyword_n and has_shape_n)): # A2
message = ("moment() missing 1 required "
"positional argument: `order`")
raise TypeError(message)
if got_keyword_n and not has_shape_n:
if got_order: # B1
# this will change to "moment got unexpected argument n"
message = "moment() got multiple values for first argument"
raise TypeError(message)
else: # B2
message = ("Use of keyword argument `n` for method "
"`moment` is deprecated. Use first positional "
"argument or keyword argument `order` instead.")
order = kwds.pop("n")
warnings.warn(message, DeprecationWarning, stacklevel=2)
n = order
# No special treatment of A1, A3, or B3 is needed because the order
# of the moment is now in variable `n` and the shape parameter, if
# needed, will be fished out of `args` or `kwds` by _parse_args
# A3 might still cause an error if the shape parameter called `n`
# is not found in `args`.
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
if result.ndim == 0:
return result.item()
return result
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence=None, *args, **kwds):
"""Confidence interval with equal areas around the median.
.. deprecated:: 1.9.0
Parameter `alpha` is replaced by parameter `confidence` to avoid
name collisions with the shape parameter `alpha` of some
distributions. Parameter `alpha` will be removed in SciPy 1.11.0.
Parameters
----------
confidence : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the function to accept both `alpha` and its
# replacement `confidence` during a deprecation period; it can be
# removed in the second release after 1.9.0.
# See description of logic in `moment` method.
has_shape_alpha = (self.shapes is not None
and "alpha" in (self.shapes.split(", ")))
got_confidence = confidence is not None
got_keyword_alpha = kwds.get("alpha", None) is not None
if not got_confidence and ((not got_keyword_alpha)
or (got_keyword_alpha and has_shape_alpha)):
message = ("interval() missing 1 required positional argument: "
"`confidence`")
raise TypeError(message)
if got_keyword_alpha and not has_shape_alpha:
if got_confidence:
# this will change to "interval got unexpected argument alpha"
message = "interval() got multiple values for first argument"
raise TypeError(message)
else:
message = ("Use of keyword argument `alpha` for method "
"`interval` is deprecated. Use first positional "
"argument or keyword argument `confidence` "
"instead.")
confidence = kwds.pop("alpha")
warnings.warn(message, DeprecationWarning, stacklevel=2)
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpxf = self._logpxf(x, *args)
finite_logpxf = np.isfinite(logpxf)
n_bad += np.sum(~finite_logpxf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpxf[finite_logpxf], axis=0) + penalty
return -np.sum(logpxf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
class _ShapeInfo:
def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
inclusive=(True, True)):
self.name = name
self.integrality = integrality
domain = list(domain)
if np.isfinite(domain[0]) and not inclusive[0]:
domain[0] = np.nextafter(domain[0], np.inf)
if np.isfinite(domain[1]) and not inclusive[1]:
domain[1] = np.nextafter(domain[1], -np.inf)
self.domain = domain
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
p = self._pdf(x, *args)
with np.errstate(divide='ignore'):
return log(p)
def _logpxf(self, x, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpdf(x, *args)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
**kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
param_info = shape_info + [loc_info, scale_info]
return param_info
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _logpxf(self, k, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpmf(k, *args)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-1]
scale = 1
args = tuple(theta[:-1])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
param_info = shape_info + [loc_info]
return param_info
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| 36.516848 | 195 | 0.573347 |
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from collections import namedtuple
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
from scipy import optimize
from scipy import integrate
from scipy.misc import derivative
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(order, %(shapes)s, loc=0, scale=1)
Non-central moment of the specified order.
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(confidence, %(shapes)s, loc=0, scale=1)
Confidence interval with equal areas around the median.
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5)
val = mu3+3*mu*mu2+mu*mu*mu
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0)
mu3 = g1*np.power(mu2, 1.5)
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, order=None, **kwds):
return self.dist.moment(order, *self.args, **self.kwds, **kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def interval(self, confidence=None, **kwds):
return self.dist.interval(confidence, *self.args, **self.kwds, **kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
class rv_discrete_frozen(rv_frozen):
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
class rv_continuous_frozen(rv_frozen):
def pdf(self, x):
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def argsreduce(cond, *args):
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
def _ncx2_log_pdf(x, df, nc):
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
def __init__(self, seed=None):
super().__init__()
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
self._rvs_size_warned = False
@property
def random_state(self):
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
raise NotImplementedError
def _attach_argparser_methods(self):
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
if isinstance(self, rv_continuous):
return rv_continuous_frozen(self, *args, **kwds)
else:
return rv_discrete_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
def _stats(self, *args, **kwds):
return None, None, None, None
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions; see the "
"distribution documentation for other restrictions.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(np.int64)
return vals
def stats(self, *args, **kwds):
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, order=None, *args, **kwds):
# This function was originally written with parameter `n`, but `n`
# is also the name of many distribution shape parameters.
# This block allows the function to accept both `n` and its
# replacement `order` during a deprecation period; it can be removed
# in the second release after 1.9.0.
# The logic to provide a DeprecationWarning only when `n` is passed
# as a keyword, accept the new keyword `order`, and otherwise be
# backward-compatible deserves explanation. We need to look out for
# the following:
# * Does the distribution have a shape named `n`?
# * Is `order` provided? It doesn't matter whether it is provided as a
# the order of the moment.
# * Is `n` provided as a keyword argument? It _does_ matter whether it
# is provided as a positional or keyword argument.
# - The first positional argument of `moment` has always been the
# order of moment, but
# - if `n` is provided as a keyword argument, its meaning depends
# on whether the distribution accepts `n` as a shape parameter.
has_shape_n = (self.shapes is not None
and "n" in (self.shapes.split(", ")))
got_order = order is not None
got_keyword_n = kwds.get("n", None) is not None
# These lead to the following cases.
# Case A: If the distribution _does_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this is now OK:
# it is unambiguous that `order` is the order of the moment and `n`
# is the shape parameter. Previously, this would have caused an
# error because `n` was provided both as a keyword argument and
# as the first positional argument. I don't think it is credible for
# this as a backward compatibility break.
# 2. If only `n` is provided (as a keyword argument), this would have
# been an error in the past because `n` would have been treated as
# the order of the moment while the shape parameter would be
# missing. It is still the same type of error, but for a different
# reason: now, `n` is treated as the shape parameter while the
# order of the moment is missing.
# 3. If only `order` is provided, no special treament is needed.
# Clearly this value is intended to be the order of the moment,
# and the rest of the function will determine whether `n` is
# available as a shape parameter in `args`.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided) and it is still an
# error for the same reason.
# Case B: the distribution does _not_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this was an error, and it
# still is an error: two values for same parameter.
# 2. If only `n` is provided (as a keyword argument), this was OK and
# is still OK, but there shold now be a `DeprecationWarning`. The
# value of `n` should be removed from `kwds` and stored in `order`.
# 3. If only `order` is provided, there was no problem before providing
# only the first argument of `moment`, and there is no problem with
# that now.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided), and it is still an
# error for the same reason.
if not got_order and ((not got_keyword_n) # A4 and B4
or (got_keyword_n and has_shape_n)): # A2
message = ("moment() missing 1 required "
"positional argument: `order`")
raise TypeError(message)
if got_keyword_n and not has_shape_n:
if got_order: # B1
# this will change to "moment got unexpected argument n"
message = "moment() got multiple values for first argument"
raise TypeError(message)
else: # B2
message = ("Use of keyword argument `n` for method "
"`moment` is deprecated. Use first positional "
"argument or keyword argument `order` instead.")
order = kwds.pop("n")
warnings.warn(message, DeprecationWarning, stacklevel=2)
n = order
# No special treatment of A1, A3, or B3 is needed because the order
# of the moment is now in variable `n` and the shape parameter, if
# needed, will be fished out of `args` or `kwds` by _parse_args
# A3 might still cause an error if the shape parameter called `n`
# is not found in `args`.
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
if result.ndim == 0:
return result.item()
return result
def median(self, *args, **kwds):
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence=None, *args, **kwds):
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the function to accept both `alpha` and its
# replacement `confidence` during a deprecation period; it can be
# removed in the second release after 1.9.0.
# See description of logic in `moment` method.
has_shape_alpha = (self.shapes is not None
and "alpha" in (self.shapes.split(", ")))
got_confidence = confidence is not None
got_keyword_alpha = kwds.get("alpha", None) is not None
if not got_confidence and ((not got_keyword_alpha)
or (got_keyword_alpha and has_shape_alpha)):
message = ("interval() missing 1 required positional argument: "
"`confidence`")
raise TypeError(message)
if got_keyword_alpha and not has_shape_alpha:
if got_confidence:
# this will change to "interval got unexpected argument alpha"
message = "interval() got multiple values for first argument"
raise TypeError(message)
else:
message = ("Use of keyword argument `alpha` for method "
"`interval` is deprecated. Use first positional "
"argument or keyword argument `confidence` "
"instead.")
confidence = kwds.pop("alpha")
warnings.warn(message, DeprecationWarning, stacklevel=2)
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpxf = self._logpxf(x, *args)
finite_logpxf = np.isfinite(logpxf)
n_bad += np.sum(~finite_logpxf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpxf[finite_logpxf], axis=0) + penalty
return -np.sum(logpxf, axis=0)
def _penalized_nnlf(self, theta, x):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
class _ShapeInfo:
def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
inclusive=(True, True)):
self.name = name
self.integrality = integrality
domain = list(domain)
if np.isfinite(domain[0]) and not inclusive[0]:
domain[0] = np.nextafter(domain[0], np.inf)
if np.isfinite(domain[1]) and not inclusive[1]:
domain[1] = np.nextafter(domain[1], -np.inf)
self.domain = domain
def _get_fixed_fit_value(kwds, names):
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
p = self._pdf(x, *args)
with np.errstate(divide='ignore'):
return log(p)
def _logpxf(self, x, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
return self._logpdf(x, *args)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
def pdf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
optimizer = _fit_determine_optimizer(optimizer)
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
data = np.asarray(data)
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
if support_width <= 0:
return loc_hat, scale_hat
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
param_info = shape_info + [loc_info, scale_info]
return param_info
def _drv2_moment(self, n, *args):
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args):
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b):
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a):
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
class rv_discrete(rv_generic):
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
return super(rv_discrete, cls).__new__(rv_sample)
else:
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self._attach_argparser_methods()
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _logpxf(self, k, *args):
# discrete and continuous distributions.
return self._logpmf(k, *args)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-1]
scale = 1
args = tuple(theta[:-1])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
param_info = shape_info + [loc_info]
return param_info
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if extradoc is not None:
warnings.warn("extradoc is deprecated and will be removed in "
"SciPy 1.11.0", DeprecationWarning)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
self._attach_argparser_methods()
def _get_support(self, *args):
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| true | true |
f721b6c0725948384b13004e8039bea5c71d7ff9 | 14,338 | py | Python | example/ui/example_pyqt5_ui.py | blurstudio/QDarkStyleSheet | 68c9e3177742c47b158594260b57f591b5238e7a | [
"MIT"
] | 8 | 2016-08-28T18:28:05.000Z | 2020-09-09T15:41:52.000Z | example/ui/example_pyqt5_ui.py | blurstudio/QDarkStyleSheet | 68c9e3177742c47b158594260b57f591b5238e7a | [
"MIT"
] | null | null | null | example/ui/example_pyqt5_ui.py | blurstudio/QDarkStyleSheet | 68c9e3177742c47b158594260b57f591b5238e7a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'example.ui'
#
# Created: Sat May 17 20:31:42 2014
# by: PyQt5 UI code generator 5.2.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(880, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.East)
self.tabWidget.setTabsClosable(True)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout = QtWidgets.QGridLayout(self.tab)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.toolBox = QtWidgets.QToolBox(self.groupBox)
self.toolBox.setObjectName("toolBox")
self.page = QtWidgets.QWidget()
self.page.setGeometry(QtCore.QRect(0, 0, 388, 390))
self.page.setObjectName("page")
self.gridLayout_4 = QtWidgets.QGridLayout(self.page)
self.gridLayout_4.setObjectName("gridLayout_4")
self.lineEdit = QtWidgets.QLineEdit(self.page)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_4.addWidget(self.lineEdit, 0, 0, 1, 1)
self.toolBox.addItem(self.page, "")
self.page_2 = QtWidgets.QWidget()
self.page_2.setGeometry(QtCore.QRect(0, 0, 388, 390))
self.page_2.setObjectName("page_2")
self.gridLayout_5 = QtWidgets.QGridLayout(self.page_2)
self.gridLayout_5.setObjectName("gridLayout_5")
self.listWidget = QtWidgets.QListWidget(self.page_2)
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
self.gridLayout_5.addWidget(self.listWidget, 0, 0, 1, 1)
self.toolBox.addItem(self.page_2, "")
self.verticalLayout_3.addWidget(self.toolBox)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.verticalLayout_4.addWidget(self.label)
self.radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton.setObjectName("radioButton")
self.verticalLayout_4.addWidget(self.radioButton)
self.checkBox = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox.setObjectName("checkBox")
self.verticalLayout_4.addWidget(self.checkBox)
self.treeWidget = QtWidgets.QTreeWidget(self.groupBox_2)
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
self.verticalLayout_4.addWidget(self.treeWidget)
self.gridLayout_2.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.horizontalLayout_2.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 880, 25))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
self.menuSubmenu_2 = QtWidgets.QMenu(self.menuMenu)
self.menuSubmenu_2.setObjectName("menuSubmenu_2")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.dockWidget1 = QtWidgets.QDockWidget(MainWindow)
self.dockWidget1.setObjectName("dockWidget1")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_3 = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton_3.setEnabled(False)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.dockWidgetContents)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.horizontalLayout.addWidget(self.doubleSpinBox)
self.toolButton = QtWidgets.QToolButton(self.dockWidgetContents)
self.toolButton.setObjectName("toolButton")
self.horizontalLayout.addWidget(self.toolButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.comboBox = QtWidgets.QComboBox(self.dockWidgetContents)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.verticalLayout.addWidget(self.comboBox)
self.textEdit = QtWidgets.QTextEdit(self.dockWidgetContents)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.progressBar = QtWidgets.QProgressBar(self.dockWidgetContents)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.dockWidget1.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget1)
self.dockWidget2 = QtWidgets.QDockWidget(MainWindow)
self.dockWidget2.setObjectName("dockWidget2")
self.dockWidgetContents_2 = QtWidgets.QWidget()
self.dockWidgetContents_2.setObjectName("dockWidgetContents_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.dockWidgetContents_2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tableWidget = QtWidgets.QTableWidget(self.dockWidgetContents_2)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(4)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
self.gridLayout_3.addWidget(self.tableWidget, 0, 0, 1, 1)
self.dockWidget2.setWidget(self.dockWidgetContents_2)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget2)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionAction = QtWidgets.QAction(MainWindow)
self.actionAction.setObjectName("actionAction")
self.actionSub_menu = QtWidgets.QAction(MainWindow)
self.actionSub_menu.setObjectName("actionSub_menu")
self.actionAction_C = QtWidgets.QAction(MainWindow)
self.actionAction_C.setObjectName("actionAction_C")
self.menuSubmenu_2.addAction(self.actionSub_menu)
self.menuSubmenu_2.addAction(self.actionAction_C)
self.menuMenu.addAction(self.actionAction)
self.menuMenu.addAction(self.menuSubmenu_2.menuAction())
self.menubar.addAction(self.menuMenu.menuAction())
self.toolBar.addAction(self.actionAction)
self.toolBar.addAction(self.actionSub_menu)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.toolBox.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox.setTitle(_translate("MainWindow", "GroupBox"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "Page 1"))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(1)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(2)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(3)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(4)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(5)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(6)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(7)
item.setText(_translate("MainWindow", "New Item"))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Page 2"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.groupBox_2.setTitle(_translate("MainWindow", "GroupBox"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.radioButton.setText(_translate("MainWindow", "RadioButton"))
self.checkBox.setText(_translate("MainWindow", "CheckBox"))
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "qdz"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, _translate("MainWindow", "qzd"))
self.treeWidget.topLevelItem(1).setText(0, _translate("MainWindow", "effefe"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.menuSubmenu_2.setTitle(_translate("MainWindow", "Submenu 2"))
self.dockWidget1.setWindowTitle(_translate("MainWindow", "Dock widget 1"))
self.pushButton_2.setText(_translate("MainWindow", "PushButton"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
self.pushButton_3.setText(_translate("MainWindow", "Disabled"))
self.toolButton.setText(_translate("MainWindow", "..."))
self.comboBox.setItemText(0, _translate("MainWindow", "Item 0"))
self.comboBox.setItemText(1, _translate("MainWindow", "Item 2"))
self.dockWidget2.setWindowTitle(_translate("MainWindow", "Dock widget 2"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "New Column 2"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionAction.setText(_translate("MainWindow", "Action"))
self.actionSub_menu.setText(_translate("MainWindow", "Action B"))
self.actionSub_menu.setToolTip(_translate("MainWindow", "submenu"))
self.actionAction_C.setText(_translate("MainWindow", "Action C"))
| 54.725191 | 104 | 0.707491 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(880, 600)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setTabPosition(QtWidgets.QTabWidget.East)
self.tabWidget.setTabsClosable(True)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.gridLayout = QtWidgets.QGridLayout(self.tab)
self.gridLayout.setObjectName("gridLayout")
self.groupBox = QtWidgets.QGroupBox(self.tab)
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.toolBox = QtWidgets.QToolBox(self.groupBox)
self.toolBox.setObjectName("toolBox")
self.page = QtWidgets.QWidget()
self.page.setGeometry(QtCore.QRect(0, 0, 388, 390))
self.page.setObjectName("page")
self.gridLayout_4 = QtWidgets.QGridLayout(self.page)
self.gridLayout_4.setObjectName("gridLayout_4")
self.lineEdit = QtWidgets.QLineEdit(self.page)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_4.addWidget(self.lineEdit, 0, 0, 1, 1)
self.toolBox.addItem(self.page, "")
self.page_2 = QtWidgets.QWidget()
self.page_2.setGeometry(QtCore.QRect(0, 0, 388, 390))
self.page_2.setObjectName("page_2")
self.gridLayout_5 = QtWidgets.QGridLayout(self.page_2)
self.gridLayout_5.setObjectName("gridLayout_5")
self.listWidget = QtWidgets.QListWidget(self.page_2)
self.listWidget.setObjectName("listWidget")
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
item = QtWidgets.QListWidgetItem()
self.listWidget.addItem(item)
self.gridLayout_5.addWidget(self.listWidget, 0, 0, 1, 1)
self.toolBox.addItem(self.page_2, "")
self.verticalLayout_3.addWidget(self.toolBox)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_2)
self.gridLayout_2.setObjectName("gridLayout_2")
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2)
self.groupBox_2.setObjectName("groupBox_2")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_2)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label = QtWidgets.QLabel(self.groupBox_2)
self.label.setObjectName("label")
self.verticalLayout_4.addWidget(self.label)
self.radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton.setObjectName("radioButton")
self.verticalLayout_4.addWidget(self.radioButton)
self.checkBox = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox.setObjectName("checkBox")
self.verticalLayout_4.addWidget(self.checkBox)
self.treeWidget = QtWidgets.QTreeWidget(self.groupBox_2)
self.treeWidget.setObjectName("treeWidget")
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget)
self.verticalLayout_4.addWidget(self.treeWidget)
self.gridLayout_2.addWidget(self.groupBox_2, 0, 0, 1, 1)
self.tabWidget.addTab(self.tab_2, "")
self.horizontalLayout_2.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 880, 25))
self.menubar.setObjectName("menubar")
self.menuMenu = QtWidgets.QMenu(self.menubar)
self.menuMenu.setObjectName("menuMenu")
self.menuSubmenu_2 = QtWidgets.QMenu(self.menuMenu)
self.menuSubmenu_2.setObjectName("menuSubmenu_2")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.dockWidget1 = QtWidgets.QDockWidget(MainWindow)
self.dockWidget1.setObjectName("dockWidget1")
self.dockWidgetContents = QtWidgets.QWidget()
self.dockWidgetContents.setObjectName("dockWidgetContents")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.pushButton_3 = QtWidgets.QPushButton(self.dockWidgetContents)
self.pushButton_3.setEnabled(False)
self.pushButton_3.setObjectName("pushButton_3")
self.horizontalLayout.addWidget(self.pushButton_3)
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.dockWidgetContents)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.horizontalLayout.addWidget(self.doubleSpinBox)
self.toolButton = QtWidgets.QToolButton(self.dockWidgetContents)
self.toolButton.setObjectName("toolButton")
self.horizontalLayout.addWidget(self.toolButton)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.comboBox = QtWidgets.QComboBox(self.dockWidgetContents)
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.verticalLayout.addWidget(self.comboBox)
self.textEdit = QtWidgets.QTextEdit(self.dockWidgetContents)
self.textEdit.setObjectName("textEdit")
self.verticalLayout.addWidget(self.textEdit)
self.progressBar = QtWidgets.QProgressBar(self.dockWidgetContents)
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.verticalLayout.addWidget(self.progressBar)
self.verticalLayout_2.addLayout(self.verticalLayout)
self.dockWidget1.setWidget(self.dockWidgetContents)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget1)
self.dockWidget2 = QtWidgets.QDockWidget(MainWindow)
self.dockWidget2.setObjectName("dockWidget2")
self.dockWidgetContents_2 = QtWidgets.QWidget()
self.dockWidgetContents_2.setObjectName("dockWidgetContents_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.dockWidgetContents_2)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tableWidget = QtWidgets.QTableWidget(self.dockWidgetContents_2)
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(2)
self.tableWidget.setRowCount(4)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setVerticalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
self.gridLayout_3.addWidget(self.tableWidget, 0, 0, 1, 1)
self.dockWidget2.setWidget(self.dockWidgetContents_2)
MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget2)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionAction = QtWidgets.QAction(MainWindow)
self.actionAction.setObjectName("actionAction")
self.actionSub_menu = QtWidgets.QAction(MainWindow)
self.actionSub_menu.setObjectName("actionSub_menu")
self.actionAction_C = QtWidgets.QAction(MainWindow)
self.actionAction_C.setObjectName("actionAction_C")
self.menuSubmenu_2.addAction(self.actionSub_menu)
self.menuSubmenu_2.addAction(self.actionAction_C)
self.menuMenu.addAction(self.actionAction)
self.menuMenu.addAction(self.menuSubmenu_2.menuAction())
self.menubar.addAction(self.menuMenu.menuAction())
self.toolBar.addAction(self.actionAction)
self.toolBar.addAction(self.actionSub_menu)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.toolBox.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.groupBox.setTitle(_translate("MainWindow", "GroupBox"))
self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "Page 1"))
__sortingEnabled = self.listWidget.isSortingEnabled()
self.listWidget.setSortingEnabled(False)
item = self.listWidget.item(0)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(1)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(2)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(3)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(4)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(5)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(6)
item.setText(_translate("MainWindow", "New Item"))
item = self.listWidget.item(7)
item.setText(_translate("MainWindow", "New Item"))
self.listWidget.setSortingEnabled(__sortingEnabled)
self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Page 2"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1"))
self.groupBox_2.setTitle(_translate("MainWindow", "GroupBox"))
self.label.setText(_translate("MainWindow", "TextLabel"))
self.radioButton.setText(_translate("MainWindow", "RadioButton"))
self.checkBox.setText(_translate("MainWindow", "CheckBox"))
self.treeWidget.headerItem().setText(0, _translate("MainWindow", "qdz"))
__sortingEnabled = self.treeWidget.isSortingEnabled()
self.treeWidget.setSortingEnabled(False)
self.treeWidget.topLevelItem(0).setText(0, _translate("MainWindow", "qzd"))
self.treeWidget.topLevelItem(1).setText(0, _translate("MainWindow", "effefe"))
self.treeWidget.setSortingEnabled(__sortingEnabled)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2"))
self.menuMenu.setTitle(_translate("MainWindow", "Menu"))
self.menuSubmenu_2.setTitle(_translate("MainWindow", "Submenu 2"))
self.dockWidget1.setWindowTitle(_translate("MainWindow", "Dock widget 1"))
self.pushButton_2.setText(_translate("MainWindow", "PushButton"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
self.pushButton_3.setText(_translate("MainWindow", "Disabled"))
self.toolButton.setText(_translate("MainWindow", "..."))
self.comboBox.setItemText(0, _translate("MainWindow", "Item 0"))
self.comboBox.setItemText(1, _translate("MainWindow", "Item 2"))
self.dockWidget2.setWindowTitle(_translate("MainWindow", "Dock widget 2"))
item = self.tableWidget.verticalHeaderItem(0)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(1)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(2)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.verticalHeaderItem(3)
item.setText(_translate("MainWindow", "New Row"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "New Column"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "New Column 2"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionAction.setText(_translate("MainWindow", "Action"))
self.actionSub_menu.setText(_translate("MainWindow", "Action B"))
self.actionSub_menu.setToolTip(_translate("MainWindow", "submenu"))
self.actionAction_C.setText(_translate("MainWindow", "Action C"))
| true | true |
f721b6ccaaf918b21885f9b671dce1f756b4c4ca | 2,337 | py | Python | pipeline_utils.py | j-petit/mplus_model_pipeline | 4b837ad3cdcd337a842f9eac1b75f58ef477da7a | [
"MIT"
] | null | null | null | pipeline_utils.py | j-petit/mplus_model_pipeline | 4b837ad3cdcd337a842f9eac1b75f58ef477da7a | [
"MIT"
] | null | null | null | pipeline_utils.py | j-petit/mplus_model_pipeline | 4b837ad3cdcd337a842f9eac1b75f58ef477da7a | [
"MIT"
] | null | null | null | """
File: pipeline_utils.py
Author: Jens Petit
Email: petit.jens@gmail.com
Github: https://github.com/j-petit
Description: Utility functions for filtering models
"""
import re
def createDiffs(model1, model2, filename):
"""Takes two models and creates constraint variables out of their paths.
Parameters
----------
model1 : mplus model object
model2 : mplus model object
filename : string where to write diffs
"""
if len(model1.model) != len(model2.model):
raise Exception("Models not the same")
no_lines = len(model1.model)
not_diffs = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
not_diffs = not_diffs + 1
lines = []
start_line = "new(diff0-diff{});".format(no_lines - not_diffs - 1)
counter = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
line = "diffX = {1} - {2};".format(i, model1.labels[i],
model2.labels[i])
line = "! " + line
else:
line = "diff{0} = {1} - {2};".format(counter, model1.labels[i],
model2.labels[i])
counter = counter + 1
lines.append(line)
with open(filename, 'a') as f:
f.write("MODEL CONSTRAINT:\n")
f.write(start_line + "\n")
for line in lines:
f.write(line + "\n")
def filterDiffs(threshold, filename, var_name):
"""Searches for lines starting with var_name in file and indexes them.
Parameters
----------
threshold : float indicating which lines to consider
filename : string specifying file
var_name : string to search for in file
"""
to_match = '^' + var_name.upper()
match_counter = 0
same_paths = []
with open(filename) as fp:
for line in fp:
line = line.strip(None)
if (line == "New/Additional Parameters"):
found = True
if (line == ""):
found = False
if (re.match(to_match, line) and found):
value = float(line.split()[4])
if value < threshold:
same_paths.append(match_counter)
match_counter = match_counter + 1
return same_paths
| 23.846939 | 76 | 0.551562 |
import re
def createDiffs(model1, model2, filename):
if len(model1.model) != len(model2.model):
raise Exception("Models not the same")
no_lines = len(model1.model)
not_diffs = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
not_diffs = not_diffs + 1
lines = []
start_line = "new(diff0-diff{});".format(no_lines - not_diffs - 1)
counter = 0
for i in range(no_lines):
if model1.labels[i] == model2.labels[i]:
line = "diffX = {1} - {2};".format(i, model1.labels[i],
model2.labels[i])
line = "! " + line
else:
line = "diff{0} = {1} - {2};".format(counter, model1.labels[i],
model2.labels[i])
counter = counter + 1
lines.append(line)
with open(filename, 'a') as f:
f.write("MODEL CONSTRAINT:\n")
f.write(start_line + "\n")
for line in lines:
f.write(line + "\n")
def filterDiffs(threshold, filename, var_name):
to_match = '^' + var_name.upper()
match_counter = 0
same_paths = []
with open(filename) as fp:
for line in fp:
line = line.strip(None)
if (line == "New/Additional Parameters"):
found = True
if (line == ""):
found = False
if (re.match(to_match, line) and found):
value = float(line.split()[4])
if value < threshold:
same_paths.append(match_counter)
match_counter = match_counter + 1
return same_paths
| true | true |
f721b7a28e5ba3d40876a0ab5769b27adb80ab95 | 19,049 | py | Python | python/cudf/cudf/tests/test_duplicates.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | 1 | 2021-12-17T19:28:00.000Z | 2021-12-17T19:28:00.000Z | python/cudf/cudf/tests/test_duplicates.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | 1 | 2021-03-10T20:28:23.000Z | 2021-03-25T15:58:47.000Z | python/cudf/cudf/tests/test_duplicates.py | sperlingxx/cudf | c681211df6253e1ceee9203658108980e7e93e3c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020-2021, NVIDIA CORPORATION.
import itertools as it
import random
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series, date_range
import cudf
from cudf import concat
from cudf.tests.utils import assert_eq, assert_exceptions_equal
# TODO: PANDAS 1.0 support
# Revisit drop_duplicates() tests to update parameters like ignore_index.
def assert_df(g, p):
# assert_eq() with sorted index of dataframes
g = g.sort_index()
p = p.sort_index()
return assert_eq(g, p)
def assert_df2(g, p):
assert g.index.dtype == p.index.dtype
np.testing.assert_equal(g.index.to_array(), p.index)
assert tuple(g.columns) == tuple(p.columns)
for k in g.columns:
assert g[k].dtype == p[k].dtype
np.testing.assert_equal(g[k].to_array(), p[k])
# most tests are similar to pandas drop_duplicates
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
def test_duplicated_with_misspelled_column_name(subset):
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
gdf = cudf.DataFrame.from_pandas(df)
assert_exceptions_equal(
lfunc=df.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=([subset],),
rfunc_args_and_kwargs=([subset],),
compare_error_message=False,
)
@pytest.mark.parametrize("keep", ["first", "last", False])
@pytest.mark.parametrize(
"data",
[
[1, 2, 4, 5, 6, 6],
[],
["a", "b", "s", "sd", "a", "b"],
Series(["aaa"] * 10, dtype="object"),
],
)
def test_drop_duplicates_series(data, keep):
pds = cudf.utils.utils._create_pandas_series(data)
gds = cudf.from_pandas(pds)
assert_df(pds.drop_duplicates(keep=keep), gds.drop_duplicates(keep=keep))
pds.drop_duplicates(keep=keep, inplace=True)
gds.drop_duplicates(keep=keep, inplace=True)
assert_df(pds, gds)
def test_drop_duplicates():
pdf = DataFrame(
{
"AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# single column
result = gdf.copy()
result.drop_duplicates("AAA", inplace=True)
expected = pdf.copy()
expected.drop_duplicates("AAA", inplace=True)
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep="last")
expected = pdf.drop_duplicates("AAA", keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep=False)
expected = pdf.drop_duplicates("AAA", keep=False)
assert_df(result, expected)
assert len(result) == 0
# multi column
expected = pdf.loc[[0, 1, 2, 3]]
result = gdf.drop_duplicates(np.array(["AAA", "B"]))
assert_df(result, expected)
result = pdf.drop_duplicates(np.array(["AAA", "B"]))
assert_df(result, expected)
result = gdf.drop_duplicates(("AAA", "B"), keep="last")
expected = pdf.drop_duplicates(("AAA", "B"), keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(("AAA", "B"), keep=False)
expected = pdf.drop_duplicates(("AAA", "B"), keep=False)
assert_df(result, expected)
# consider everything
df2 = gdf.loc[:, ["AAA", "B", "C"]]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(["AAA", "B"])
assert_df(result, expected)
result = df2.drop_duplicates(keep="last")
expected = df2.drop_duplicates(["AAA", "B"], keep="last")
assert_df(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(["AAA", "B"], keep=False)
assert_df(result, expected)
# integers
result = gdf.drop_duplicates("C")
expected = pdf.drop_duplicates("C")
assert_df(result, expected)
result = gdf.drop_duplicates("C", keep="last")
expected = pdf.drop_duplicates("C", keep="last")
assert_df(result, expected)
gdf["E"] = gdf["C"].astype("int8")
result = gdf.drop_duplicates("E")
pdf["E"] = pdf["C"].astype("int8")
expected = pdf.drop_duplicates("E")
assert_df(result, expected)
result = gdf.drop_duplicates("E", keep="last")
expected = pdf.drop_duplicates("E", keep="last")
assert_df(result, expected)
pdf = DataFrame({"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[1, 0], [0, 2]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[-2, 0], [0, -4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
x = np.iinfo(np.int64).max / 3 * 2
pdf = DataFrame([[-x, x], [0, x + 4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[-x, x], [x, x + 4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([i] * 9 for i in range(16))
pdf = pdf.append([[1] + [0] * 8], ignore_index=True)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
@pytest.mark.skip(reason="cudf does not support duplicate column names yet")
def test_drop_duplicates_with_duplicate_column_names():
df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"])
df = cudf.DataFrame.from_pandas(df)
result0 = df.drop_duplicates()
assert_df(result0, df)
result1 = df.drop_duplicates("a")
expected1 = df[:2]
assert_df(result1, expected1)
def test_drop_duplicates_for_take_all():
pdf = DataFrame(
{
"AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# single column
result = gdf.drop_duplicates("AAA")
expected = pdf.drop_duplicates("AAA")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep="last")
expected = pdf.drop_duplicates("AAA", keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep=False)
expected = pdf.drop_duplicates("AAA", keep=False)
assert_df(result, expected)
# multiple columns
result = gdf.drop_duplicates(["AAA", "B"])
expected = pdf.drop_duplicates(["AAA", "B"])
assert_df(result, expected)
result = gdf.drop_duplicates(["AAA", "B"], keep="last")
expected = pdf.drop_duplicates(["AAA", "B"], keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(["AAA", "B"], keep=False)
expected = pdf.drop_duplicates(["AAA", "B"], keep=False)
assert_df(result, expected)
def test_drop_duplicates_tuple():
pdf = DataFrame(
{
("AA", "AB"): [
"foo",
"bar",
"foo",
"bar",
"foo",
"bar",
"bar",
"foo",
],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
# single column
result = gdf.drop_duplicates(("AA", "AB"))
expected = pdf.drop_duplicates(("AA", "AB"))
assert_df(result, expected)
result = gdf.drop_duplicates(("AA", "AB"), keep="last")
expected = pdf.drop_duplicates(("AA", "AB"), keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(("AA", "AB"), keep=False)
expected = pdf.drop_duplicates(("AA", "AB"), keep=False) # empty df
assert len(result) == 0
assert_df(result, expected)
# multi column
expected = pdf.drop_duplicates((("AA", "AB"), "B"))
result = gdf.drop_duplicates((("AA", "AB"), "B"))
assert_df(result, expected)
@pytest.mark.parametrize(
"df",
[
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=["A", "B", "C"]),
DataFrame(index=[]),
DataFrame(index=["A", "B", "C"]),
],
)
def test_drop_duplicates_empty(df):
df = cudf.DataFrame.from_pandas(df)
result = df.drop_duplicates()
assert_df(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
assert_df(result, df)
@pytest.mark.parametrize("num_columns", [3, 4, 5])
def test_dataframe_drop_duplicates_numeric_method(num_columns):
comb = list(it.permutations(range(num_columns), num_columns))
shuf = list(comb)
random.Random(num_columns).shuffle(shuf)
def get_pdf(n_dup):
# create dataframe with n_dup duplicate rows
rows = comb + shuf[:n_dup]
random.Random(n_dup).shuffle(rows)
return DataFrame(rows)
for i in range(5):
pdf = get_pdf(i)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
# subset columns, single columns
assert_df(
gdf.drop_duplicates(pdf.columns[:-1]),
pdf.drop_duplicates(pdf.columns[:-1]),
)
assert_df(
gdf.drop_duplicates(pdf.columns[-1]),
pdf.drop_duplicates(pdf.columns[-1]),
)
assert_df(
gdf.drop_duplicates(pdf.columns[0]),
pdf.drop_duplicates(pdf.columns[0]),
)
# subset columns shuffled
cols = list(pdf.columns)
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols), pdf.drop_duplicates(cols))
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols[:-1]), pdf.drop_duplicates(cols[:-1]))
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols[-1]), pdf.drop_duplicates(cols[-1]))
assert_df(
gdf.drop_duplicates(cols, keep="last"),
pdf.drop_duplicates(cols, keep="last"),
)
def test_dataframe_drop_duplicates_method():
pdf = DataFrame(
[(1, 2, "a"), (2, 3, "b"), (3, 4, "c"), (2, 3, "d"), (3, 5, "c")],
columns=["n1", "n2", "s1"],
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
assert_eq(
gdf.drop_duplicates("n1")["n1"].reset_index(drop=True),
pdf.drop_duplicates("n1")["n1"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("n2")["n2"].reset_index(drop=True),
pdf.drop_duplicates("n2")["n2"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("s1")["s1"].reset_index(drop=True),
pdf.drop_duplicates("s1")["s1"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("s1", keep="last")["s1"]
.sort_index()
.reset_index(drop=True),
pdf.drop_duplicates("s1", keep="last")["s1"].reset_index(drop=True),
)
assert gdf.drop_duplicates("s1", inplace=True) is None
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates("n1"), pdf.drop_duplicates("n1"))
assert_df(gdf.drop_duplicates("n2"), pdf.drop_duplicates("n2"))
assert_df(gdf.drop_duplicates("s1"), pdf.drop_duplicates("s1"))
assert_df(
gdf.drop_duplicates(["n1", "n2"]), pdf.drop_duplicates(["n1", "n2"])
)
assert_df(
gdf.drop_duplicates(["n1", "s1"]), pdf.drop_duplicates(["n1", "s1"])
)
# Test drop error
assert_exceptions_equal(
lfunc=pdf.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=(["n3"],),
rfunc_args_and_kwargs=(["n3"],),
expected_error_message="columns {'n3'} do not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=([["n1", "n4", "n3"]],),
rfunc_args_and_kwargs=([["n1", "n4", "n3"]],),
expected_error_message="columns {'n[34]', 'n[34]'} do not exist",
)
def test_datetime_drop_duplicates():
date_df = cudf.DataFrame()
date_df["date"] = date_range("11/20/2018", periods=6, freq="D")
date_df["value"] = np.random.sample(len(date_df))
df = concat([date_df, date_df[:4]])
assert_df(df[:-4], df.drop_duplicates())
df2 = df.reset_index()
assert_df(df2[:-4], df2.drop_duplicates())
df3 = df.set_index("date")
assert_df(df3[:-4], df3.drop_duplicates())
def test_drop_duplicates_NA():
# none
df = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
df = cudf.DataFrame.from_pandas(df)
# single column
result = df.drop_duplicates("A")
expected = df.to_pandas().loc[[0, 2, 3]]
assert_df(result, expected)
result = df.drop_duplicates("A", keep="last")
expected = df.to_pandas().loc[[1, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates("A", keep=False)
expected = df.to_pandas().loc[[]] # empty df
assert_df(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(["A", "B"])
expected = df.to_pandas().loc[[0, 2, 3, 6]]
assert_df(result, expected)
result = df.drop_duplicates(["A", "B"], keep="last")
expected = df.to_pandas().loc[[1, 5, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates(["A", "B"], keep=False)
expected = df.to_pandas().loc[[6]]
assert_df(result, expected)
# nan
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
df = cudf.DataFrame.from_pandas(df)
# single column
result = df.drop_duplicates("C")
expected = df[:2]
assert_df(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = df.to_pandas().loc[[3, 7]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = df.to_pandas().loc[[]] # empty df
assert_df(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(["C", "B"])
expected = df.to_pandas().loc[[0, 1, 2, 4]]
assert_df(result, expected)
result = df.drop_duplicates(["C", "B"], keep="last")
expected = df.to_pandas().loc[[1, 3, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates(["C", "B"], keep=False)
expected = df.to_pandas().loc[[1]]
assert_df(result, expected)
def test_drop_duplicates_NA_for_take_all():
# TODO: PANDAS 1.0 support - add ignore_index for
# pandas drop_duplicates calls in this function.
# none
pdf = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0],
}
)
df = cudf.DataFrame.from_pandas(pdf)
# single column
result = df.drop_duplicates("A")
expected = pdf.iloc[[0, 2, 3, 5, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True),
result.reset_index(drop=True),
)
result = df.drop_duplicates("A", keep="last")
expected = pdf.iloc[[1, 4, 5, 6, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True, keep="last"),
result.reset_index(drop=True),
)
result = df.drop_duplicates("A", keep=False)
expected = pdf.iloc[[5, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True, keep=False),
result.reset_index(drop=True),
)
# nan
# single column
result = df.drop_duplicates("C")
expected = pdf.iloc[[0, 1, 5, 6]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = pdf.iloc[[3, 5, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = pdf.iloc[[5, 6]]
assert_df(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
orig = cudf.DataFrame.from_pandas(orig)
# single column
df = orig.copy()
df.drop_duplicates("A", inplace=True)
expected = orig[:2]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates("A", keep="last", inplace=True)
expected = orig.loc[[6, 7]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates("A", keep=False, inplace=True)
expected = orig.loc[[]]
result = df
assert_df(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(["A", "B"], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates(["A", "B"], keep="last", inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates(["A", "B"], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
assert_df(result, expected)
# consider everything
orig2 = orig.loc[:, ["A", "B", "C"]].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(["A", "B"])
result = df2
assert_df(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep="last", inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep="last")
result = df2
assert_df(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep=False)
result = df2
assert_df(result, expected)
def test_drop_duplicates_multi_index():
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
idx = MultiIndex.from_tuples(list(zip(*arrays)), names=["a", "b"])
pdf = DataFrame(np.random.randint(0, 2, (8, 4)), index=idx)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.drop_duplicates()
result = gdf.drop_duplicates()
assert_df(result.to_pandas(), expected)
# FIXME: to_pandas needed until sort_index support for MultiIndex
for col in gdf.columns:
assert_df(
gdf[col].drop_duplicates().to_pandas(), pdf[col].drop_duplicates(),
)
| 30.724194 | 79 | 0.596462 |
import itertools as it
import random
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series, date_range
import cudf
from cudf import concat
from cudf.tests.utils import assert_eq, assert_exceptions_equal
def assert_df(g, p):
g = g.sort_index()
p = p.sort_index()
return assert_eq(g, p)
def assert_df2(g, p):
assert g.index.dtype == p.index.dtype
np.testing.assert_equal(g.index.to_array(), p.index)
assert tuple(g.columns) == tuple(p.columns)
for k in g.columns:
assert g[k].dtype == p[k].dtype
np.testing.assert_equal(g[k].to_array(), p[k])
@pytest.mark.parametrize("subset", ["a", ["a"], ["a", "B"]])
def test_duplicated_with_misspelled_column_name(subset):
df = DataFrame({"A": [0, 0, 1], "B": [0, 0, 1], "C": [0, 0, 1]})
gdf = cudf.DataFrame.from_pandas(df)
assert_exceptions_equal(
lfunc=df.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=([subset],),
rfunc_args_and_kwargs=([subset],),
compare_error_message=False,
)
@pytest.mark.parametrize("keep", ["first", "last", False])
@pytest.mark.parametrize(
"data",
[
[1, 2, 4, 5, 6, 6],
[],
["a", "b", "s", "sd", "a", "b"],
Series(["aaa"] * 10, dtype="object"),
],
)
def test_drop_duplicates_series(data, keep):
pds = cudf.utils.utils._create_pandas_series(data)
gds = cudf.from_pandas(pds)
assert_df(pds.drop_duplicates(keep=keep), gds.drop_duplicates(keep=keep))
pds.drop_duplicates(keep=keep, inplace=True)
gds.drop_duplicates(keep=keep, inplace=True)
assert_df(pds, gds)
def test_drop_duplicates():
pdf = DataFrame(
{
"AAA": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
result = gdf.copy()
result.drop_duplicates("AAA", inplace=True)
expected = pdf.copy()
expected.drop_duplicates("AAA", inplace=True)
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep="last")
expected = pdf.drop_duplicates("AAA", keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep=False)
expected = pdf.drop_duplicates("AAA", keep=False)
assert_df(result, expected)
assert len(result) == 0
expected = pdf.loc[[0, 1, 2, 3]]
result = gdf.drop_duplicates(np.array(["AAA", "B"]))
assert_df(result, expected)
result = pdf.drop_duplicates(np.array(["AAA", "B"]))
assert_df(result, expected)
result = gdf.drop_duplicates(("AAA", "B"), keep="last")
expected = pdf.drop_duplicates(("AAA", "B"), keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(("AAA", "B"), keep=False)
expected = pdf.drop_duplicates(("AAA", "B"), keep=False)
assert_df(result, expected)
df2 = gdf.loc[:, ["AAA", "B", "C"]]
result = df2.drop_duplicates()
expected = df2.drop_duplicates(["AAA", "B"])
assert_df(result, expected)
result = df2.drop_duplicates(keep="last")
expected = df2.drop_duplicates(["AAA", "B"], keep="last")
assert_df(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(["AAA", "B"], keep=False)
assert_df(result, expected)
result = gdf.drop_duplicates("C")
expected = pdf.drop_duplicates("C")
assert_df(result, expected)
result = gdf.drop_duplicates("C", keep="last")
expected = pdf.drop_duplicates("C", keep="last")
assert_df(result, expected)
gdf["E"] = gdf["C"].astype("int8")
result = gdf.drop_duplicates("E")
pdf["E"] = pdf["C"].astype("int8")
expected = pdf.drop_duplicates("E")
assert_df(result, expected)
result = gdf.drop_duplicates("E", keep="last")
expected = pdf.drop_duplicates("E", keep="last")
assert_df(result, expected)
pdf = DataFrame({"x": [7, 6, 3, 3, 4, 8, 0], "y": [0, 6, 5, 5, 9, 1, 2]})
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[1, 0], [0, 2]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[-2, 0], [0, -4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
x = np.iinfo(np.int64).max / 3 * 2
pdf = DataFrame([[-x, x], [0, x + 4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([[-x, x], [x, x + 4]])
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
pdf = DataFrame([i] * 9 for i in range(16))
pdf = pdf.append([[1] + [0] * 8], ignore_index=True)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
@pytest.mark.skip(reason="cudf does not support duplicate column names yet")
def test_drop_duplicates_with_duplicate_column_names():
df = DataFrame([[1, 2, 5], [3, 4, 6], [3, 4, 7]], columns=["a", "a", "b"])
df = cudf.DataFrame.from_pandas(df)
result0 = df.drop_duplicates()
assert_df(result0, df)
result1 = df.drop_duplicates("a")
expected1 = df[:2]
assert_df(result1, expected1)
def test_drop_duplicates_for_take_all():
pdf = DataFrame(
{
"AAA": ["foo", "bar", "baz", "bar", "foo", "bar", "qux", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
result = gdf.drop_duplicates("AAA")
expected = pdf.drop_duplicates("AAA")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep="last")
expected = pdf.drop_duplicates("AAA", keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates("AAA", keep=False)
expected = pdf.drop_duplicates("AAA", keep=False)
assert_df(result, expected)
result = gdf.drop_duplicates(["AAA", "B"])
expected = pdf.drop_duplicates(["AAA", "B"])
assert_df(result, expected)
result = gdf.drop_duplicates(["AAA", "B"], keep="last")
expected = pdf.drop_duplicates(["AAA", "B"], keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(["AAA", "B"], keep=False)
expected = pdf.drop_duplicates(["AAA", "B"], keep=False)
assert_df(result, expected)
def test_drop_duplicates_tuple():
pdf = DataFrame(
{
("AA", "AB"): [
"foo",
"bar",
"foo",
"bar",
"foo",
"bar",
"bar",
"foo",
],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
gdf = cudf.DataFrame.from_pandas(pdf)
result = gdf.drop_duplicates(("AA", "AB"))
expected = pdf.drop_duplicates(("AA", "AB"))
assert_df(result, expected)
result = gdf.drop_duplicates(("AA", "AB"), keep="last")
expected = pdf.drop_duplicates(("AA", "AB"), keep="last")
assert_df(result, expected)
result = gdf.drop_duplicates(("AA", "AB"), keep=False)
expected = pdf.drop_duplicates(("AA", "AB"), keep=False)
assert len(result) == 0
assert_df(result, expected)
expected = pdf.drop_duplicates((("AA", "AB"), "B"))
result = gdf.drop_duplicates((("AA", "AB"), "B"))
assert_df(result, expected)
@pytest.mark.parametrize(
"df",
[
DataFrame(),
DataFrame(columns=[]),
DataFrame(columns=["A", "B", "C"]),
DataFrame(index=[]),
DataFrame(index=["A", "B", "C"]),
],
)
def test_drop_duplicates_empty(df):
df = cudf.DataFrame.from_pandas(df)
result = df.drop_duplicates()
assert_df(result, df)
result = df.copy()
result.drop_duplicates(inplace=True)
assert_df(result, df)
@pytest.mark.parametrize("num_columns", [3, 4, 5])
def test_dataframe_drop_duplicates_numeric_method(num_columns):
comb = list(it.permutations(range(num_columns), num_columns))
shuf = list(comb)
random.Random(num_columns).shuffle(shuf)
def get_pdf(n_dup):
rows = comb + shuf[:n_dup]
random.Random(n_dup).shuffle(rows)
return DataFrame(rows)
for i in range(5):
pdf = get_pdf(i)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
assert_df(
gdf.drop_duplicates(pdf.columns[:-1]),
pdf.drop_duplicates(pdf.columns[:-1]),
)
assert_df(
gdf.drop_duplicates(pdf.columns[-1]),
pdf.drop_duplicates(pdf.columns[-1]),
)
assert_df(
gdf.drop_duplicates(pdf.columns[0]),
pdf.drop_duplicates(pdf.columns[0]),
)
cols = list(pdf.columns)
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols), pdf.drop_duplicates(cols))
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols[:-1]), pdf.drop_duplicates(cols[:-1]))
random.Random(3).shuffle(cols)
assert_df(gdf.drop_duplicates(cols[-1]), pdf.drop_duplicates(cols[-1]))
assert_df(
gdf.drop_duplicates(cols, keep="last"),
pdf.drop_duplicates(cols, keep="last"),
)
def test_dataframe_drop_duplicates_method():
pdf = DataFrame(
[(1, 2, "a"), (2, 3, "b"), (3, 4, "c"), (2, 3, "d"), (3, 5, "c")],
columns=["n1", "n2", "s1"],
)
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates(), pdf.drop_duplicates())
assert_eq(
gdf.drop_duplicates("n1")["n1"].reset_index(drop=True),
pdf.drop_duplicates("n1")["n1"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("n2")["n2"].reset_index(drop=True),
pdf.drop_duplicates("n2")["n2"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("s1")["s1"].reset_index(drop=True),
pdf.drop_duplicates("s1")["s1"].reset_index(drop=True),
)
assert_eq(
gdf.drop_duplicates("s1", keep="last")["s1"]
.sort_index()
.reset_index(drop=True),
pdf.drop_duplicates("s1", keep="last")["s1"].reset_index(drop=True),
)
assert gdf.drop_duplicates("s1", inplace=True) is None
gdf = cudf.DataFrame.from_pandas(pdf)
assert_df(gdf.drop_duplicates("n1"), pdf.drop_duplicates("n1"))
assert_df(gdf.drop_duplicates("n2"), pdf.drop_duplicates("n2"))
assert_df(gdf.drop_duplicates("s1"), pdf.drop_duplicates("s1"))
assert_df(
gdf.drop_duplicates(["n1", "n2"]), pdf.drop_duplicates(["n1", "n2"])
)
assert_df(
gdf.drop_duplicates(["n1", "s1"]), pdf.drop_duplicates(["n1", "s1"])
)
assert_exceptions_equal(
lfunc=pdf.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=(["n3"],),
rfunc_args_and_kwargs=(["n3"],),
expected_error_message="columns {'n3'} do not exist",
)
assert_exceptions_equal(
lfunc=pdf.drop_duplicates,
rfunc=gdf.drop_duplicates,
lfunc_args_and_kwargs=([["n1", "n4", "n3"]],),
rfunc_args_and_kwargs=([["n1", "n4", "n3"]],),
expected_error_message="columns {'n[34]', 'n[34]'} do not exist",
)
def test_datetime_drop_duplicates():
date_df = cudf.DataFrame()
date_df["date"] = date_range("11/20/2018", periods=6, freq="D")
date_df["value"] = np.random.sample(len(date_df))
df = concat([date_df, date_df[:4]])
assert_df(df[:-4], df.drop_duplicates())
df2 = df.reset_index()
assert_df(df2[:-4], df2.drop_duplicates())
df3 = df.set_index("date")
assert_df(df3[:-4], df3.drop_duplicates())
def test_drop_duplicates_NA():
df = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
df = cudf.DataFrame.from_pandas(df)
result = df.drop_duplicates("A")
expected = df.to_pandas().loc[[0, 2, 3]]
assert_df(result, expected)
result = df.drop_duplicates("A", keep="last")
expected = df.to_pandas().loc[[1, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates("A", keep=False)
expected = df.to_pandas().loc[[]]
assert_df(result, expected)
assert len(result) == 0
result = df.drop_duplicates(["A", "B"])
expected = df.to_pandas().loc[[0, 2, 3, 6]]
assert_df(result, expected)
result = df.drop_duplicates(["A", "B"], keep="last")
expected = df.to_pandas().loc[[1, 5, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates(["A", "B"], keep=False)
expected = df.to_pandas().loc[[6]]
assert_df(result, expected)
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 1.0, 1, 1.0],
"D": range(8),
}
)
df = cudf.DataFrame.from_pandas(df)
result = df.drop_duplicates("C")
expected = df[:2]
assert_df(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = df.to_pandas().loc[[3, 7]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = df.to_pandas().loc[[]]
assert_df(result, expected)
assert len(result) == 0
result = df.drop_duplicates(["C", "B"])
expected = df.to_pandas().loc[[0, 1, 2, 4]]
assert_df(result, expected)
result = df.drop_duplicates(["C", "B"], keep="last")
expected = df.to_pandas().loc[[1, 3, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates(["C", "B"], keep=False)
expected = df.to_pandas().loc[[1]]
assert_df(result, expected)
def test_drop_duplicates_NA_for_take_all():
pdf = DataFrame(
{
"A": [None, None, "foo", "bar", "foo", "baz", "bar", "qux"],
"C": [1.0, np.nan, np.nan, np.nan, 1.0, 2.0, 3, 1.0],
}
)
df = cudf.DataFrame.from_pandas(pdf)
result = df.drop_duplicates("A")
expected = pdf.iloc[[0, 2, 3, 5, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True),
result.reset_index(drop=True),
)
result = df.drop_duplicates("A", keep="last")
expected = pdf.iloc[[1, 4, 5, 6, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True, keep="last"),
result.reset_index(drop=True),
)
result = df.drop_duplicates("A", keep=False)
expected = pdf.iloc[[5, 7]]
assert_df(result, expected)
assert_df(
df.drop_duplicates("A", ignore_index=True, keep=False),
result.reset_index(drop=True),
)
result = df.drop_duplicates("C")
expected = pdf.iloc[[0, 1, 5, 6]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep="last")
expected = pdf.iloc[[3, 5, 6, 7]]
assert_df(result, expected)
result = df.drop_duplicates("C", keep=False)
expected = pdf.iloc[[5, 6]]
assert_df(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "bar", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": [1, 1, 2, 2, 2, 2, 1, 2],
"D": range(8),
}
)
orig = cudf.DataFrame.from_pandas(orig)
df = orig.copy()
df.drop_duplicates("A", inplace=True)
expected = orig[:2]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates("A", keep="last", inplace=True)
expected = orig.loc[[6, 7]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates("A", keep=False, inplace=True)
expected = orig.loc[[]]
result = df
assert_df(result, expected)
assert len(df) == 0
df = orig.copy()
df.drop_duplicates(["A", "B"], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates(["A", "B"], keep="last", inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
assert_df(result, expected)
df = orig.copy()
df.drop_duplicates(["A", "B"], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
assert_df(result, expected)
orig2 = orig.loc[:, ["A", "B", "C"]].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
expected = orig2.drop_duplicates(["A", "B"])
result = df2
assert_df(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep="last", inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep="last")
result = df2
assert_df(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(["A", "B"], keep=False)
result = df2
assert_df(result, expected)
def test_drop_duplicates_multi_index():
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
idx = MultiIndex.from_tuples(list(zip(*arrays)), names=["a", "b"])
pdf = DataFrame(np.random.randint(0, 2, (8, 4)), index=idx)
gdf = cudf.DataFrame.from_pandas(pdf)
expected = pdf.drop_duplicates()
result = gdf.drop_duplicates()
assert_df(result.to_pandas(), expected)
for col in gdf.columns:
assert_df(
gdf[col].drop_duplicates().to_pandas(), pdf[col].drop_duplicates(),
)
| true | true |
f721b7e1de7edb58c96d9d578d13892a84f8ff0c | 803 | py | Python | experiments/download.py | stasbel/Meme-Machinery-VKHack2018 | 5e15198d6bc8d350f2dc0158a34467f3415da0bc | [
"MIT"
] | 1 | 2018-11-15T08:30:34.000Z | 2018-11-15T08:30:34.000Z | experiments/download.py | stasbel/Meme-Machinery-VKHack2018 | 5e15198d6bc8d350f2dc0158a34467f3415da0bc | [
"MIT"
] | null | null | null | experiments/download.py | stasbel/Meme-Machinery-VKHack2018 | 5e15198d6bc8d350f2dc0158a34467f3415da0bc | [
"MIT"
] | null | null | null | """Downloads prescribed data from the Internet, embed and store it."""
import logging
import numpy as np
import torch
from experiments.scrap import META_PATH
from mem.gen.stages import Extractor
logger = logging.getLogger(__name__)
MATRIX_PATH = 'matrix.npy'
NEW_META_PATH = 'processed_reddit_data.pth'
def main(_):
meta = torch.load(META_PATH)
extractor = Extractor()
meta, matrix = extractor.extract(meta)
torch.save(meta, NEW_META_PATH)
logger.info(f'Obtain matrix of shape {matrix.shape}.')
np.save(MATRIX_PATH, matrix)
def _parse_config():
logging.basicConfig(
format='%(asctime)s | %(message)s',
handlers=[
logging.StreamHandler()
],
level=logging.INFO
)
if __name__ == '__main__':
main(_parse_config())
| 20.075 | 70 | 0.682441 |
import logging
import numpy as np
import torch
from experiments.scrap import META_PATH
from mem.gen.stages import Extractor
logger = logging.getLogger(__name__)
MATRIX_PATH = 'matrix.npy'
NEW_META_PATH = 'processed_reddit_data.pth'
def main(_):
meta = torch.load(META_PATH)
extractor = Extractor()
meta, matrix = extractor.extract(meta)
torch.save(meta, NEW_META_PATH)
logger.info(f'Obtain matrix of shape {matrix.shape}.')
np.save(MATRIX_PATH, matrix)
def _parse_config():
logging.basicConfig(
format='%(asctime)s | %(message)s',
handlers=[
logging.StreamHandler()
],
level=logging.INFO
)
if __name__ == '__main__':
main(_parse_config())
| true | true |
f721ba9b451bb59633ec0dcadf214c8d02e86015 | 1,447 | py | Python | Data Structures/Queues/QueueList.py | ayushkr459/Data-Structures-And-Algorithms | 050689a5e89a5afb0c907f16601d11706c04b614 | [
"MIT"
] | 4 | 2020-12-01T08:52:57.000Z | 2021-11-08T11:44:42.000Z | Data Structures/Queues/QueueList.py | ayushkr459/Data-Structures-And-Algorithms | 050689a5e89a5afb0c907f16601d11706c04b614 | [
"MIT"
] | 4 | 2020-09-30T19:54:10.000Z | 2020-10-17T05:04:04.000Z | Data Structures/Queues/QueueList.py | ayushkr459/Data-Structures-And-Algorithms | 050689a5e89a5afb0c907f16601d11706c04b614 | [
"MIT"
] | 12 | 2020-09-30T18:30:59.000Z | 2020-10-31T15:38:54.000Z |
# class node to create a node for the queue linked list
class Node :
def __init__(self, val) :
self.val = val
self.next = None
class Queue :
# contructor of the queue class
def __init__(self) :
self.front = None
self.rear = None
# method to insert an element into the queue
def insert(self, val) :
new_node = Node(val)
# if the front and rear pointer are null by default
if self.rear is None and self.front is None :
self.rear = new_node
self.front = new_node
# when there is already one more nodes or elements in the queue
else :
self.rear.next = new_node
self.rear = new_node
def traverse(self) :
ptr = self.front
if ptr is None :
print('Queue is empty')
return
else :
while ptr is not None :
print(ptr.val, end = " ")
ptr = ptr.next
def delete(self) :
ptr = self.front
if ptr is None :
print("queue is empty")
return
else :
print(ptr.val)
self.front = ptr.next
del ptr
if __name__ == '__main__' :
queue = Queue()
queue.insert(1)
queue.insert(2)
queue.insert(3)
queue.delete()
queue.delete()
queue.delete()
queue.delete()
queue.traverse() | 22.609375 | 73 | 0.520387 |
class Node :
def __init__(self, val) :
self.val = val
self.next = None
class Queue :
def __init__(self) :
self.front = None
self.rear = None
def insert(self, val) :
new_node = Node(val)
if self.rear is None and self.front is None :
self.rear = new_node
self.front = new_node
else :
self.rear.next = new_node
self.rear = new_node
def traverse(self) :
ptr = self.front
if ptr is None :
print('Queue is empty')
return
else :
while ptr is not None :
print(ptr.val, end = " ")
ptr = ptr.next
def delete(self) :
ptr = self.front
if ptr is None :
print("queue is empty")
return
else :
print(ptr.val)
self.front = ptr.next
del ptr
if __name__ == '__main__' :
queue = Queue()
queue.insert(1)
queue.insert(2)
queue.insert(3)
queue.delete()
queue.delete()
queue.delete()
queue.delete()
queue.traverse() | true | true |
f721bb48b8c008b5b87c1df753579d37ad8ec606 | 9,887 | py | Python | models.py | jiangyangby/DRDSC | 4b53e18626b9839578bea6c84bba47d15bc8d3d6 | [
"MIT"
] | 3 | 2020-10-12T02:30:11.000Z | 2021-07-09T07:04:12.000Z | models.py | jiangyangby/DRDSC | 4b53e18626b9839578bea6c84bba47d15bc8d3d6 | [
"MIT"
] | 1 | 2020-10-06T15:19:09.000Z | 2020-10-06T15:19:09.000Z | models.py | jiangyangby/DRDSC | 4b53e18626b9839578bea6c84bba47d15bc8d3d6 | [
"MIT"
] | 2 | 2020-04-09T15:46:59.000Z | 2021-08-13T16:39:31.000Z | from __future__ import print_function, absolute_import, division
import tensorflow as tf
from tensorflow.contrib import layers
mu = 1.0e-6
@tf.custom_gradient
def f_norm(x):
f2 = tf.square(tf.norm(x, ord='fro', axis=[-2, -1]))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
@tf.custom_gradient
def l2_norm(x):
f2 = tf.square(tf.norm(x, ord=2))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
class RSCConvAE:
'''
Duet Robust Deep Subspace Clustering
'''
def __init__(self, n_input, kernel_size, n_hidden, z_dim, lamda1=1.0,
lamda2=1.0, eta1=1.0, eta2=1.0, batch_size=200, reg=None,
denoise=False, save_path=None, restore_path=None,
normalize_input=False, logs_path='./logs'):
self.n_input = n_input
self.kernel_size = kernel_size
self.n_hidden = n_hidden
self.batch_size = batch_size
self.z_dim = z_dim
self.reg = reg
self.save_path = save_path
self.restore_path = restore_path
self.iter = 0
# input required to be fed
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
weights = self._initialize_weights()
self.x_noise = weights['x_noise']
self.z_noise = weights['z_noise']
self.z, self.Coef, self.x_r, self.x_diff, self.z_diff = \
self._forward(denoise, normalize_input, weights)
# l_2 reconstruction loss
self.reconst_cost = self._get_reconstruction_loss(eta1)
tf.summary.scalar("recons_loss", self.reconst_cost)
self.reg_loss = self._get_coef_reg_loss(reg_type='l2') # l2 reg
tf.summary.scalar("reg_loss", lamda2 * self.reg_loss)
selfexpress_cost = tf.square(self.z_diff - self.z_noise)
z_noise_reg = tf.map_fn(lambda frame: l2_norm(frame), self.z_noise)
self.selfexpress_loss = 0.5 * \
tf.reduce_sum(selfexpress_cost) + eta2 * tf.reduce_sum(z_noise_reg)
tf.summary.scalar("selfexpress_loss", lamda1 *
self.selfexpress_loss)
self.loss = self.reconst_cost + lamda1 * \
self.selfexpress_loss + lamda2 * self.reg_loss
self.merged_summary_op = tf.summary.merge_all()
self.optimizer = tf.train.AdamOptimizer(
# self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate).minimize(self.loss)
self.init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init)
self.saver = tf.train.Saver(
[v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))])
self.summary_writer = tf.summary.FileWriter(
logs_path, graph=tf.get_default_graph())
def _build_input(self, denoise, normalize_input):
if not normalize_input:
x_input = self.x
else:
x_input = tf.map_fn(
lambda frame: tf.image.per_image_standardization(frame), self.x)
if denoise:
x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
mean=0,
stddev=0.2,
dtype=tf.float32))
return x_input
def _forward(self, denoise, normalize_input, weights):
x_input = self._build_input(denoise, normalize_input)
latent, shape = self.encoder(x_input, weights)
z = tf.reshape(latent, [self.batch_size, -1])
Coef = weights['Coef']
Coef = Coef - tf.diag(tf.diag_part(Coef))
z_c = tf.matmul(Coef, z)
latent_c = tf.reshape(z_c, tf.shape(latent))
x_r = self.decoder(latent_c, weights, shape)
z_diff = z - z_c
x_diff = x_input - x_r
return z, Coef, x_r, x_diff, z_diff
def _get_reconstruction_loss(self, eta1):
reconst_cost = tf.square(self.x_diff - self.x_noise) # l2
x_noise_3dim = tf.squeeze(self.x_noise)
x_noise_group_reg = tf.map_fn(
lambda frame: f_norm(frame), x_noise_3dim)
reconst_cost = 0.5 * tf.reduce_sum(reconst_cost) + \
eta1 * tf.reduce_sum(x_noise_group_reg)
return reconst_cost
def _get_coef_reg_loss(self, reg_type='l2'):
if reg_type is 'l2':
loss = tf.reduce_sum(tf.square(self.Coef))
elif reg_type is 'l1':
loss = tf.reduce_sum(tf.abs(self.Coef))
return loss
def _initialize_weights(self):
all_weights = dict()
n_layers = len(self.n_hidden)
# all_weights['Coef'] = tf.Variable(
# tf.random_normal([self.batch_size, self.batch_size],
# mean=0.0, stddev=0.1, dtype=tf.float32,
# seed=None), name='Coef')
all_weights['Coef'] = tf.Variable(
0 * tf.ones([self.batch_size, self.batch_size], tf.float32), name='Coef')
all_weights['x_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.n_input[0],
self.n_input[1], 1], tf.float32), name='Coef')
all_weights['z_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.z_dim], tf.float32), name='Coef')
all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
all_weights['enc_b0'] = tf.Variable(
tf.zeros([self.n_hidden[0]], dtype=tf.float32))
for iter_i in range(1, n_layers):
enc_name_wi = 'enc_w' + str(iter_i)
all_weights[enc_name_wi] = tf.get_variable(enc_name_wi, shape=[self.kernel_size[iter_i], self.kernel_size[iter_i], self.n_hidden[iter_i - 1],
self.n_hidden[iter_i]], initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
enc_name_bi = 'enc_b' + str(iter_i)
all_weights[enc_name_bi] = tf.Variable(
tf.zeros([self.n_hidden[iter_i]], dtype=tf.float32))
for iter_i in range(1, n_layers):
dec_name_wi = 'dec_w' + str(iter_i - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[n_layers - iter_i], self.kernel_size[n_layers - iter_i],
self.n_hidden[n_layers - iter_i - 1], self.n_hidden[n_layers - iter_i]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(iter_i - 1)
all_weights[dec_name_bi] = tf.Variable(tf.zeros(
[self.n_hidden[n_layers - iter_i - 1]], dtype=tf.float32))
dec_name_wi = 'dec_w' + str(n_layers - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(n_layers - 1)
all_weights[dec_name_bi] = tf.Variable(
tf.zeros([1], dtype=tf.float32))
return all_weights
# Building the encoder
def encoder(self, x, weights):
shapes = []
shapes.append(x.get_shape().as_list())
layeri = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b0'])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
for iter_i in range(1, len(self.n_hidden)):
layeri = tf.nn.bias_add(tf.nn.conv2d(layeri, weights['enc_w' + str(iter_i)], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b' + str(iter_i)])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
layer3 = layeri
return layer3, shapes
# Building the decoder
def decoder(self, z, weights, shapes):
n_layers = len(self.n_hidden)
layer3 = z
for iter_i in range(n_layers):
shape_de = shapes[n_layers - iter_i - 1]
layer3 = tf.add(tf.nn.conv2d_transpose(layer3, weights['dec_w' + str(iter_i)], tf.stack([tf.shape(self.x)[0], shape_de[1], shape_de[2], shape_de[3]]),
strides=[1, 2, 2, 1], padding='SAME'), weights['dec_b' + str(iter_i)])
layer3 = tf.nn.relu(layer3)
return layer3
def partial_fit(self, X, lr):
cost, summary, _, Coef, z_diff, x_diff = self.sess.run(
(self.loss, self.merged_summary_op, self.optimizer, self.Coef,
self.z_diff, self.x_diff),
feed_dict={self.x: X, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter = self.iter + 1
return cost, Coef, z_diff, x_diff
def initlization(self):
self.sess.run(self.init)
def reconstruct(self, X):
return self.sess.run(self.x_r, feed_dict={self.x: X})
def transform(self, X):
return self.sess.run(self.z, feed_dict={self.x: X})
def save_model(self):
save_path = self.saver.save(self.sess, self.save_path)
print("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print("model restored")
| 42.986957 | 168 | 0.583392 | from __future__ import print_function, absolute_import, division
import tensorflow as tf
from tensorflow.contrib import layers
mu = 1.0e-6
@tf.custom_gradient
def f_norm(x):
f2 = tf.square(tf.norm(x, ord='fro', axis=[-2, -1]))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
@tf.custom_gradient
def l2_norm(x):
f2 = tf.square(tf.norm(x, ord=2))
f = tf.sqrt(f2 + mu ** 2) - mu
def grad(dy):
return dy * (x / tf.sqrt(f2 + mu ** 2))
return f, grad
class RSCConvAE:
def __init__(self, n_input, kernel_size, n_hidden, z_dim, lamda1=1.0,
lamda2=1.0, eta1=1.0, eta2=1.0, batch_size=200, reg=None,
denoise=False, save_path=None, restore_path=None,
normalize_input=False, logs_path='./logs'):
self.n_input = n_input
self.kernel_size = kernel_size
self.n_hidden = n_hidden
self.batch_size = batch_size
self.z_dim = z_dim
self.reg = reg
self.save_path = save_path
self.restore_path = restore_path
self.iter = 0
self.x = tf.placeholder(tf.float32, [None, n_input[0], n_input[1], 1])
self.learning_rate = tf.placeholder(tf.float32, [])
weights = self._initialize_weights()
self.x_noise = weights['x_noise']
self.z_noise = weights['z_noise']
self.z, self.Coef, self.x_r, self.x_diff, self.z_diff = \
self._forward(denoise, normalize_input, weights)
self.reconst_cost = self._get_reconstruction_loss(eta1)
tf.summary.scalar("recons_loss", self.reconst_cost)
self.reg_loss = self._get_coef_reg_loss(reg_type='l2')
tf.summary.scalar("reg_loss", lamda2 * self.reg_loss)
selfexpress_cost = tf.square(self.z_diff - self.z_noise)
z_noise_reg = tf.map_fn(lambda frame: l2_norm(frame), self.z_noise)
self.selfexpress_loss = 0.5 * \
tf.reduce_sum(selfexpress_cost) + eta2 * tf.reduce_sum(z_noise_reg)
tf.summary.scalar("selfexpress_loss", lamda1 *
self.selfexpress_loss)
self.loss = self.reconst_cost + lamda1 * \
self.selfexpress_loss + lamda2 * self.reg_loss
self.merged_summary_op = tf.summary.merge_all()
self.optimizer = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(self.loss)
self.init = tf.global_variables_initializer()
self.sess = tf.InteractiveSession()
self.sess.run(self.init)
self.saver = tf.train.Saver(
[v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))])
self.summary_writer = tf.summary.FileWriter(
logs_path, graph=tf.get_default_graph())
def _build_input(self, denoise, normalize_input):
if not normalize_input:
x_input = self.x
else:
x_input = tf.map_fn(
lambda frame: tf.image.per_image_standardization(frame), self.x)
if denoise:
x_input = tf.add(self.x, tf.random_normal(shape=tf.shape(self.x),
mean=0,
stddev=0.2,
dtype=tf.float32))
return x_input
def _forward(self, denoise, normalize_input, weights):
x_input = self._build_input(denoise, normalize_input)
latent, shape = self.encoder(x_input, weights)
z = tf.reshape(latent, [self.batch_size, -1])
Coef = weights['Coef']
Coef = Coef - tf.diag(tf.diag_part(Coef))
z_c = tf.matmul(Coef, z)
latent_c = tf.reshape(z_c, tf.shape(latent))
x_r = self.decoder(latent_c, weights, shape)
z_diff = z - z_c
x_diff = x_input - x_r
return z, Coef, x_r, x_diff, z_diff
def _get_reconstruction_loss(self, eta1):
reconst_cost = tf.square(self.x_diff - self.x_noise)
x_noise_3dim = tf.squeeze(self.x_noise)
x_noise_group_reg = tf.map_fn(
lambda frame: f_norm(frame), x_noise_3dim)
reconst_cost = 0.5 * tf.reduce_sum(reconst_cost) + \
eta1 * tf.reduce_sum(x_noise_group_reg)
return reconst_cost
def _get_coef_reg_loss(self, reg_type='l2'):
if reg_type is 'l2':
loss = tf.reduce_sum(tf.square(self.Coef))
elif reg_type is 'l1':
loss = tf.reduce_sum(tf.abs(self.Coef))
return loss
def _initialize_weights(self):
all_weights = dict()
n_layers = len(self.n_hidden)
all_weights['Coef'] = tf.Variable(
0 * tf.ones([self.batch_size, self.batch_size], tf.float32), name='Coef')
all_weights['x_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.n_input[0],
self.n_input[1], 1], tf.float32), name='Coef')
all_weights['z_noise'] = tf.Variable(
tf.zeros([self.batch_size, self.z_dim], tf.float32), name='Coef')
all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
all_weights['enc_b0'] = tf.Variable(
tf.zeros([self.n_hidden[0]], dtype=tf.float32))
for iter_i in range(1, n_layers):
enc_name_wi = 'enc_w' + str(iter_i)
all_weights[enc_name_wi] = tf.get_variable(enc_name_wi, shape=[self.kernel_size[iter_i], self.kernel_size[iter_i], self.n_hidden[iter_i - 1],
self.n_hidden[iter_i]], initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
enc_name_bi = 'enc_b' + str(iter_i)
all_weights[enc_name_bi] = tf.Variable(
tf.zeros([self.n_hidden[iter_i]], dtype=tf.float32))
for iter_i in range(1, n_layers):
dec_name_wi = 'dec_w' + str(iter_i - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[n_layers - iter_i], self.kernel_size[n_layers - iter_i],
self.n_hidden[n_layers - iter_i - 1], self.n_hidden[n_layers - iter_i]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(iter_i - 1)
all_weights[dec_name_bi] = tf.Variable(tf.zeros(
[self.n_hidden[n_layers - iter_i - 1]], dtype=tf.float32))
dec_name_wi = 'dec_w' + str(n_layers - 1)
all_weights[dec_name_wi] = tf.get_variable(dec_name_wi, shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]],
initializer=layers.xavier_initializer_conv2d(), regularizer=self.reg)
dec_name_bi = 'dec_b' + str(n_layers - 1)
all_weights[dec_name_bi] = tf.Variable(
tf.zeros([1], dtype=tf.float32))
return all_weights
def encoder(self, x, weights):
shapes = []
shapes.append(x.get_shape().as_list())
layeri = tf.nn.bias_add(tf.nn.conv2d(x, weights['enc_w0'], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b0'])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
for iter_i in range(1, len(self.n_hidden)):
layeri = tf.nn.bias_add(tf.nn.conv2d(layeri, weights['enc_w' + str(iter_i)], strides=[
1, 2, 2, 1], padding='SAME'), weights['enc_b' + str(iter_i)])
layeri = tf.nn.relu(layeri)
shapes.append(layeri.get_shape().as_list())
layer3 = layeri
return layer3, shapes
def decoder(self, z, weights, shapes):
n_layers = len(self.n_hidden)
layer3 = z
for iter_i in range(n_layers):
shape_de = shapes[n_layers - iter_i - 1]
layer3 = tf.add(tf.nn.conv2d_transpose(layer3, weights['dec_w' + str(iter_i)], tf.stack([tf.shape(self.x)[0], shape_de[1], shape_de[2], shape_de[3]]),
strides=[1, 2, 2, 1], padding='SAME'), weights['dec_b' + str(iter_i)])
layer3 = tf.nn.relu(layer3)
return layer3
def partial_fit(self, X, lr):
cost, summary, _, Coef, z_diff, x_diff = self.sess.run(
(self.loss, self.merged_summary_op, self.optimizer, self.Coef,
self.z_diff, self.x_diff),
feed_dict={self.x: X, self.learning_rate: lr})
self.summary_writer.add_summary(summary, self.iter)
self.iter = self.iter + 1
return cost, Coef, z_diff, x_diff
def initlization(self):
self.sess.run(self.init)
def reconstruct(self, X):
return self.sess.run(self.x_r, feed_dict={self.x: X})
def transform(self, X):
return self.sess.run(self.z, feed_dict={self.x: X})
def save_model(self):
save_path = self.saver.save(self.sess, self.save_path)
print("model saved in file: %s" % save_path)
def restore(self):
self.saver.restore(self.sess, self.restore_path)
print("model restored")
| true | true |
f721bbe3a503666d938fe4233b4619c044301e09 | 83,584 | py | Python | research/object_detection/metrics/coco_evaluation.py | raijin0704/models | 6906bfbdbf2ad628bb6aeca9989dc04f605b6a60 | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | research/object_detection/metrics/coco_evaluation.py | raijin0704/models | 6906bfbdbf2ad628bb6aeca9989dc04f605b6a60 | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | research/object_detection/metrics/coco_evaluation.py | raijin0704/models | 6906bfbdbf2ad628bb6aeca9989dc04f605b6a60 | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class for evaluating object detections with COCO metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_tools
from object_detection.utils import json_utils
from object_detection.utils import np_mask_ops
from object_detection.utils import object_detection_evaluation
class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self,
categories,
include_metrics_per_category=False,
all_metrics_per_category=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
all_metrics_per_category: Whether to include all the summary metrics for
each category in per_category_ap. Be careful with setting it to true if
you have more than handful of categories, because it will pollute
your mldash.
"""
super(CocoDetectionEvaluator, self).__init__(categories)
# _image_ids is a dictionary that maps unique image ids to Booleans which
# indicate whether a corresponding detection has been added.
self._image_ids = {}
self._groundtruth_list = []
self._detection_boxes_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._metrics = None
self._include_metrics_per_category = include_metrics_per_category
self._all_metrics_per_category = all_metrics_per_category
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_ids.clear()
self._groundtruth_list = []
self._detection_boxes_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
InputDataFields.groundtruth_keypoints (optional): float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
InputDataFields.groundtruth_keypoint_visibilities (optional): integer
numpy array of keypoint visibilities with shape [num_gt_boxes,
num_keypoints]. Integer is treated as an enum with 0=not labeled,
1=labeled but not visible and 2=labeled and visible.
"""
if image_id in self._image_ids:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
# Drop optional fields if empty tensor.
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
groundtruth_area = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_area)
groundtruth_keypoints = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoints)
groundtruth_keypoint_visibilities = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoint_visibilities)
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
groundtruth_is_crowd = None
if groundtruth_area is not None and not groundtruth_area.shape[0]:
groundtruth_area = None
if groundtruth_keypoints is not None and not groundtruth_keypoints.shape[0]:
groundtruth_keypoints = None
if groundtruth_keypoint_visibilities is not None and not groundtruth_keypoint_visibilities.shape[
0]:
groundtruth_keypoint_visibilities = None
self._groundtruth_list.extend(
coco_tools.ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes],
groundtruth_is_crowd=groundtruth_is_crowd,
groundtruth_area=groundtruth_area,
groundtruth_keypoints=groundtruth_keypoints,
groundtruth_keypoint_visibilities=groundtruth_keypoint_visibilities)
)
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
# Boolean to indicate whether a detection has been added for this image.
self._image_ids[image_id] = False
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_keypoints (optional): float numpy array
of keypoints with shape [num_boxes, num_keypoints, 2].
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
if image_id not in self._image_ids:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if self._image_ids[image_id]:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
# Drop optional fields if empty tensor.
detection_keypoints = detections_dict.get(
standard_fields.DetectionResultFields.detection_keypoints)
if detection_keypoints is not None and not detection_keypoints.shape[0]:
detection_keypoints = None
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detection_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detection_classes=detections_dict[
standard_fields.DetectionResultFields.detection_classes],
detection_keypoints=detection_keypoints))
self._image_ids[image_id] = True
def dump_detections_to_json_file(self, json_output_path):
"""Saves the detections into json_output_path in the format used by MS COCO.
Args:
json_output_path: String containing the output file's path. It can be also
None. In that case nothing will be written to the output file.
"""
if json_output_path and json_output_path is not None:
with tf.gfile.GFile(json_output_path, 'w') as fid:
tf.logging.info('Dumping detections to output json file.')
json_utils.Dump(
obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
"""Evaluates the detection boxes and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'DetectionBoxes_Precision/mAP': mean average precision over classes
averaged over IOU thresholds ranging from .5 to .95 with .05
increments.
'DetectionBoxes_Precision/mAP@.50IOU': mean average precision at 50% IOU
'DetectionBoxes_Precision/mAP@.75IOU': mean average precision at 75% IOU
'DetectionBoxes_Precision/mAP (small)': mean average precision for small
objects (area < 32^2 pixels).
'DetectionBoxes_Precision/mAP (medium)': mean average precision for
medium sized objects (32^2 pixels < area < 96^2 pixels).
'DetectionBoxes_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'DetectionBoxes_Recall/AR@1': average recall with 1 detection.
'DetectionBoxes_Recall/AR@10': average recall with 10 detections.
'DetectionBoxes_Recall/AR@100': average recall with 100 detections.
'DetectionBoxes_Recall/AR@100 (small)': average recall for small objects
with 100.
'DetectionBoxes_Recall/AR@100 (medium)': average recall for medium objects
with 100.
'DetectionBoxes_Recall/AR@100 (large)': average recall for large objects
with 100 detections.
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
"""
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
box_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category,
all_metrics_per_category=self._all_metrics_per_category)
box_metrics.update(box_per_category_ap)
box_metrics = {'DetectionBoxes_'+ key: value
for key, value in iter(box_metrics.items())}
return box_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(
image_id_batched,
groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_is_crowd_batched,
num_gt_boxes_per_image,
detection_boxes_batched,
detection_scores_batched,
detection_classes_batched,
num_det_boxes_per_image,
is_annotated_batched):
"""Update operation for adding batch of images to Coco evaluator."""
for (image_id, gt_box, gt_class, gt_is_crowd, num_gt_box, det_box,
det_score, det_class, num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
num_gt_boxes_per_image,
detection_boxes_batched, detection_scores_batched,
detection_classes_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box]
})
self.add_single_detected_image_info(
image_id,
{'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box]})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
return tf.py_func(update_op, [image_id,
groundtruth_boxes,
groundtruth_classes,
groundtruth_is_crowd,
num_gt_boxes_per_image,
detection_boxes,
detection_scores,
detection_classes,
num_det_boxes_per_image,
is_annotated], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
This function can take in groundtruth and detections for a batch of images,
or for a single image. For the latter case, the batch dimension for input
tensors need not be present.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionBoxes_Precision/mAP',
'DetectionBoxes_Precision/mAP@.50IOU',
'DetectionBoxes_Precision/mAP@.75IOU',
'DetectionBoxes_Precision/mAP (large)',
'DetectionBoxes_Precision/mAP (medium)',
'DetectionBoxes_Precision/mAP (small)',
'DetectionBoxes_Recall/AR@1',
'DetectionBoxes_Recall/AR@10',
'DetectionBoxes_Recall/AR@100',
'DetectionBoxes_Recall/AR@100 (large)',
'DetectionBoxes_Recall/AR@100 (medium)',
'DetectionBoxes_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def convert_masks_to_binary(masks):
"""Converts masks to 0 or 1 and uint8 type."""
return (masks > 0).astype(np.uint8)
class CocoKeypointEvaluator(CocoDetectionEvaluator):
"""Class to evaluate COCO keypoint metrics."""
def __init__(self,
category_id,
category_keypoints,
class_text,
oks_sigmas=None):
"""Constructor.
Args:
category_id: An integer id uniquely identifying this category.
category_keypoints: A list specifying keypoint mappings, with items:
'id': (required) an integer id identifying the keypoint.
'name': (required) a string representing the keypoint name.
class_text: A string representing the category name for which keypoint
metrics are to be computed.
oks_sigmas: A dict of keypoint name to standard deviation values for OKS
metrics. If not provided, default value of 0.05 will be used.
"""
self._category_id = category_id
self._category_name = class_text
self._keypoint_ids = sorted(
[keypoint['id'] for keypoint in category_keypoints])
kpt_id_to_name = {kpt['id']: kpt['name'] for kpt in category_keypoints}
if oks_sigmas:
self._oks_sigmas = np.array([
oks_sigmas[kpt_id_to_name[idx]] for idx in self._keypoint_ids
])
else:
# Default all per-keypoint sigmas to 0.
self._oks_sigmas = np.full((len(self._keypoint_ids)), 0.05)
tf.logging.warning('No default keypoint OKS sigmas provided. Will use '
'0.05')
tf.logging.info('Using the following keypoint OKS sigmas: {}'.format(
self._oks_sigmas))
self._metrics = None
super(CocoKeypointEvaluator, self).__init__([{
'id': self._category_id,
'name': class_text
}])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image with keypoints.
If the image has already been added, a warning is logged, and groundtruth
is ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
InputDataFields.groundtruth_area (optional): float numpy array of
shape [num_boxes] containing the area (in the original absolute
coordinates) of the annotated object.
InputDataFields.groundtruth_keypoints: float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
InputDataFields.groundtruth_keypoint_visibilities (optional): integer
numpy array of keypoint visibilities with shape [num_gt_boxes,
num_keypoints]. Integer is treated as an enum with 0=not labels,
1=labeled but not visible and 2=labeled and visible.
"""
# Keep only the groundtruth for our category and its keypoints.
groundtruth_classes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_boxes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes]
groundtruth_keypoints = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoints]
class_indices = [
idx for idx, gt_class_id in enumerate(groundtruth_classes)
if gt_class_id == self._category_id
]
filtered_groundtruth_classes = np.take(
groundtruth_classes, class_indices, axis=0)
filtered_groundtruth_boxes = np.take(
groundtruth_boxes, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
groundtruth_keypoints, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
filtered_groundtruth_keypoints, self._keypoint_ids, axis=1)
filtered_groundtruth_dict = {}
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_classes] = filtered_groundtruth_classes
filtered_groundtruth_dict[standard_fields.InputDataFields
.groundtruth_boxes] = filtered_groundtruth_boxes
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_keypoints] = filtered_groundtruth_keypoints
if (standard_fields.InputDataFields.groundtruth_is_crowd in
groundtruth_dict.keys()):
groundtruth_is_crowd = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_is_crowd]
filtered_groundtruth_is_crowd = np.take(groundtruth_is_crowd,
class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_is_crowd] = filtered_groundtruth_is_crowd
if (standard_fields.InputDataFields.groundtruth_area in
groundtruth_dict.keys()):
groundtruth_area = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_area]
filtered_groundtruth_area = np.take(groundtruth_area, class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_area] = filtered_groundtruth_area
if (standard_fields.InputDataFields.groundtruth_keypoint_visibilities in
groundtruth_dict.keys()):
groundtruth_keypoint_visibilities = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoint_visibilities]
filtered_groundtruth_keypoint_visibilities = np.take(
groundtruth_keypoint_visibilities, class_indices, axis=0)
filtered_groundtruth_keypoint_visibilities = np.take(
filtered_groundtruth_keypoint_visibilities,
self._keypoint_ids,
axis=1)
filtered_groundtruth_dict[
standard_fields.InputDataFields.
groundtruth_keypoint_visibilities] = filtered_groundtruth_keypoint_visibilities
super(CocoKeypointEvaluator,
self).add_single_ground_truth_image_info(image_id,
filtered_groundtruth_dict)
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image and the specific category for which keypoints are evaluated.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` detection boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_keypoints: float numpy array of
keypoints with shape [num_boxes, num_keypoints, 2].
Raises:
ValueError: If groundtruth for the image_id is not available.
"""
# Keep only the detections for our category and its keypoints.
detection_classes = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
detection_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores]
detection_keypoints = detections_dict[
standard_fields.DetectionResultFields.detection_keypoints]
class_indices = [
idx for idx, class_id in enumerate(detection_classes)
if class_id == self._category_id
]
filtered_detection_classes = np.take(
detection_classes, class_indices, axis=0)
filtered_detection_boxes = np.take(detection_boxes, class_indices, axis=0)
filtered_detection_scores = np.take(detection_scores, class_indices, axis=0)
filtered_detection_keypoints = np.take(
detection_keypoints, class_indices, axis=0)
filtered_detection_keypoints = np.take(
filtered_detection_keypoints, self._keypoint_ids, axis=1)
filtered_detections_dict = {}
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_classes] = filtered_detection_classes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_boxes] = filtered_detection_boxes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_scores] = filtered_detection_scores
filtered_detections_dict[standard_fields.DetectionResultFields.
detection_keypoints] = filtered_detection_keypoints
super(CocoKeypointEvaluator,
self).add_single_detected_image_info(image_id,
filtered_detections_dict)
def evaluate(self):
"""Evaluates the keypoints and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'Keypoints_Precision/mAP': mean average precision over classes
averaged over OKS thresholds ranging from .5 to .95 with .05
increments.
'Keypoints_Precision/mAP@.50IOU': mean average precision at 50% OKS
'Keypoints_Precision/mAP@.75IOU': mean average precision at 75% OKS
'Keypoints_Precision/mAP (medium)': mean average precision for medium
sized objects (32^2 pixels < area < 96^2 pixels).
'Keypoints_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'Keypoints_Recall/AR@1': average recall with 1 detection.
'Keypoints_Recall/AR@10': average recall with 10 detections.
'Keypoints_Recall/AR@100': average recall with 100 detections.
'Keypoints_Recall/AR@100 (medium)': average recall for medium objects with
100.
'Keypoints_Recall/AR@100 (large)': average recall for large objects with
100 detections.
"""
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='bbox')
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
keypoint_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth,
coco_wrapped_detections,
agnostic_mode=False,
iou_type='keypoints',
oks_sigmas=self._oks_sigmas)
keypoint_metrics, _ = keypoint_evaluator.ComputeMetrics(
include_metrics_per_category=False, all_metrics_per_category=False)
keypoint_metrics = {
'Keypoints_' + key: value
for key, value in iter(keypoint_metrics.items())
}
return keypoint_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(
image_id_batched,
groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_is_crowd_batched,
groundtruth_area_batched,
groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image,
detection_boxes_batched,
detection_scores_batched,
detection_classes_batched,
detection_keypoints_batched,
num_det_boxes_per_image,
is_annotated_batched):
"""Update operation for adding batch of images to Coco evaluator."""
for (image_id, gt_box, gt_class, gt_is_crowd, gt_area, gt_keyp,
gt_keyp_vis, num_gt_box, det_box, det_score, det_class, det_keyp,
num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
groundtruth_area_batched, groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image, detection_boxes_batched,
detection_scores_batched, detection_classes_batched,
detection_keypoints_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box],
'groundtruth_area': gt_area[:num_gt_box],
'groundtruth_keypoints': gt_keyp[:num_gt_box],
'groundtruth_keypoint_visibilities': gt_keyp_vis[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box],
'detection_keypoints': det_keyp[:num_det_box],
})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
groundtruth_area = eval_dict.get(input_data_fields.groundtruth_area, None)
groundtruth_keypoints = eval_dict[input_data_fields.groundtruth_keypoints]
groundtruth_keypoint_visibilities = eval_dict.get(
input_data_fields.groundtruth_keypoint_visibilities, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_keypoints = eval_dict[detection_fields.detection_keypoints]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if groundtruth_area is None:
groundtruth_area = tf.zeros_like(groundtruth_classes, dtype=tf.float32)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
groundtruth_area = tf.expand_dims(groundtruth_area, 0)
groundtruth_keypoints = tf.expand_dims(groundtruth_keypoints, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_keypoints = tf.expand_dims(detection_keypoints, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_boxes)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.expand_dims(
groundtruth_keypoint_visibilities, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_keypoints)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.tile(
tf.expand_dims(groundtruth_keypoint_visibilities, 0),
multiples=[tf.shape(groundtruth_keypoints)[0], 1, 1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd,
groundtruth_area, groundtruth_keypoints,
groundtruth_keypoint_visibilities, num_gt_boxes_per_image,
detection_boxes, detection_scores, detection_classes,
detection_keypoints, num_det_boxes_per_image, is_annotated
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
This function can take in groundtruth and detections for a batch of images,
or for a single image. For the latter case, the batch dimension for input
tensors need not be present.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
category = self._category_name
metric_names = [
'Keypoints_Precision/mAP ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.50IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.75IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (large) ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (medium) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@1 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@10 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (large) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (medium) ByCategory/{}'.format(category)
]
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate COCO detection metrics."""
def __init__(self, categories, include_metrics_per_category=False):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
"""
super(CocoMaskEvaluator, self).__init__(categories)
self._image_id_to_mask_shape_map = {}
self._image_ids_with_detections = set([])
self._groundtruth_list = []
self._detection_masks_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._include_metrics_per_category = include_metrics_per_category
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._image_id_to_mask_shape_map.clear()
self._image_ids_with_detections.clear()
self._groundtruth_list = []
self._detection_masks_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_boxes: float32 numpy array of shape
[num_boxes, 4] containing `num_boxes` groundtruth boxes of the format
[ymin, xmin, ymax, xmax] in absolute image coordinates.
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_boxes] containing 1-indexed groundtruth classes for the boxes.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_boxes, image_height, image_width] containing groundtruth masks
corresponding to the boxes. The elements of the array must be in
{0, 1}.
"""
if image_id in self._image_id_to_mask_shape_map:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
groundtruth_instance_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
groundtruth_instance_masks = convert_masks_to_binary(
groundtruth_instance_masks)
self._groundtruth_list.extend(
coco_tools.
ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields.
InputDataFields.
groundtruth_classes],
groundtruth_masks=groundtruth_instance_masks))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks].shape
def add_single_detected_image_info(self,
image_id,
detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_scores: float32 numpy array of shape
[num_boxes] containing detection scores for the boxes.
DetectionResultFields.detection_classes: integer numpy array of shape
[num_boxes] containing 1-indexed detection classes for the boxes.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_boxes, image_height, image_width] containing instance
masks corresponding to the boxes. The elements of the array must be
in {0, 1}.
Raises:
ValueError: If groundtruth for the image_id is not available or if
spatial shapes of groundtruth_instance_masks and detection_masks are
incompatible.
"""
if image_id not in self._image_id_to_mask_shape_map:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if image_id in self._image_ids_with_detections:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
detection_masks = detections_dict[standard_fields.DetectionResultFields.
detection_masks]
if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
raise ValueError('Spatial shape of groundtruth masks and detection masks '
'are incompatible: {} vs {}'.format(
groundtruth_masks_shape,
detection_masks.shape))
detection_masks = convert_masks_to_binary(detection_masks)
self._detection_masks_list.extend(
coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_masks=detection_masks,
detection_scores=detections_dict[standard_fields.
DetectionResultFields.
detection_scores],
detection_classes=detections_dict[standard_fields.
DetectionResultFields.
detection_classes]))
self._image_ids_with_detections.update([image_id])
def dump_detections_to_json_file(self, json_output_path):
"""Saves the detections into json_output_path in the format used by MS COCO.
Args:
json_output_path: String containing the output file's path. It can be also
None. In that case nothing will be written to the output file.
"""
if json_output_path and json_output_path is not None:
tf.logging.info('Dumping detections to output json file.')
with tf.gfile.GFile(json_output_path, 'w') as fid:
json_utils.Dump(
obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
"""Evaluates the detection masks and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metrics:
'DetectionMasks_Precision/mAP': mean average precision over classes
averaged over IOU thresholds ranging from .5 to .95 with .05 increments.
'DetectionMasks_Precision/mAP@.50IOU': mean average precision at 50% IOU.
'DetectionMasks_Precision/mAP@.75IOU': mean average precision at 75% IOU.
'DetectionMasks_Precision/mAP (small)': mean average precision for small
objects (area < 32^2 pixels).
'DetectionMasks_Precision/mAP (medium)': mean average precision for medium
sized objects (32^2 pixels < area < 96^2 pixels).
'DetectionMasks_Precision/mAP (large)': mean average precision for large
objects (96^2 pixels < area < 10000^2 pixels).
'DetectionMasks_Recall/AR@1': average recall with 1 detection.
'DetectionMasks_Recall/AR@10': average recall with 10 detections.
'DetectionMasks_Recall/AR@100': average recall with 100 detections.
'DetectionMasks_Recall/AR@100 (small)': average recall for small objects
with 100 detections.
'DetectionMasks_Recall/AR@100 (medium)': average recall for medium objects
with 100 detections.
'DetectionMasks_Recall/AR@100 (large)': average recall for large objects
with 100 detections.
2. per_category_ap: if include_metrics_per_category is True, category
specific results with keys of the form:
'Precision mAP ByCategory/category' (without the supercategory part if
no supercategories exist). For backward compatibility
'PerformanceByCategory' is included in the output regardless of
all_metrics_per_category.
"""
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
for image_id, shape in self._image_id_to_mask_shape_map.
items()],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='segmentation')
coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_masks_list)
mask_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detection_masks,
agnostic_mode=False, iou_type='segm')
mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category)
mask_metrics.update(mask_per_category_ap)
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in mask_metrics.items()}
return mask_metrics
def add_eval_dict(self, eval_dict):
"""Observes an evaluation result dict for a single example.
When executing eagerly, once all observations have been observed by this
method you can use `.evaluate()` to get the final metrics.
When using `tf.estimator.Estimator` for evaluation this function is used by
`get_estimator_eval_metric_ops()` to construct the metric update op.
Args:
eval_dict: A dictionary that holds tensors for evaluating an object
detection model, returned from
eval_util.result_dict_for_single_example().
Returns:
None when executing eagerly, or an update_op that can be used to update
the eval metrics in `tf.estimator.EstimatorSpec`.
"""
def update_op(image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image):
"""Update op for metrics."""
for (image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box,
detection_scores, detection_classes,
detection_masks, num_det_box) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes':
groundtruth_boxes[:num_gt_box],
'groundtruth_classes':
groundtruth_classes[:num_gt_box],
'groundtruth_instance_masks':
groundtruth_instance_masks[:num_gt_box],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_scores': detection_scores[:num_det_box],
'detection_classes': detection_classes[:num_det_box],
'detection_masks': detection_masks[:num_det_box]
})
# Unpack items from the evaluation dictionary.
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
num_gt_boxes_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_scores)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_scores)[1:2],
multiples=tf.shape(detection_scores)[0:1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd,
num_gt_boxes_per_image, detection_scores, detection_classes,
detection_masks, num_det_boxes_per_image
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_groundtruth_boxes_per_image' and 'num_det_boxes_per_image' to
properly unpad the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionMasks_Precision/mAP',
'DetectionMasks_Precision/mAP@.50IOU',
'DetectionMasks_Precision/mAP@.75IOU',
'DetectionMasks_Precision/mAP (large)',
'DetectionMasks_Precision/mAP (medium)',
'DetectionMasks_Precision/mAP (small)',
'DetectionMasks_Recall/AR@1',
'DetectionMasks_Recall/AR@10',
'DetectionMasks_Recall/AR@100',
'DetectionMasks_Recall/AR@100 (large)',
'DetectionMasks_Recall/AR@100 (medium)',
'DetectionMasks_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoPanopticSegmentationEvaluator(
object_detection_evaluation.DetectionEvaluator):
"""Class to evaluate PQ (panoptic quality) metric on COCO dataset.
More details about this metric: https://arxiv.org/pdf/1801.00868.pdf.
"""
def __init__(self,
categories,
include_metrics_per_category=False,
iou_threshold=0.5,
ioa_threshold=0.5):
"""Constructor.
Args:
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
include_metrics_per_category: If True, include metrics for each category.
iou_threshold: intersection-over-union threshold for mask matching (with
normal groundtruths).
ioa_threshold: intersection-over-area threshold for mask matching with
"is_crowd" groundtruths.
"""
super(CocoPanopticSegmentationEvaluator, self).__init__(categories)
self._groundtruth_masks = {}
self._groundtruth_class_labels = {}
self._groundtruth_is_crowd = {}
self._predicted_masks = {}
self._predicted_class_labels = {}
self._include_metrics_per_category = include_metrics_per_category
self._iou_threshold = iou_threshold
self._ioa_threshold = ioa_threshold
def clear(self):
"""Clears the state to prepare for a fresh evaluation."""
self._groundtruth_masks.clear()
self._groundtruth_class_labels.clear()
self._groundtruth_is_crowd.clear()
self._predicted_masks.clear()
self._predicted_class_labels.clear()
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
"""Adds groundtruth for a single image to be used for evaluation.
If the image has already been added, a warning is logged, and groundtruth is
ignored.
Args:
image_id: A unique string/integer identifier for the image.
groundtruth_dict: A dictionary containing -
InputDataFields.groundtruth_classes: integer numpy array of shape
[num_masks] containing 1-indexed groundtruth classes for the mask.
InputDataFields.groundtruth_instance_masks: uint8 numpy array of shape
[num_masks, image_height, image_width] containing groundtruth masks.
The elements of the array must be in {0, 1}.
InputDataFields.groundtruth_is_crowd (optional): integer numpy array of
shape [num_boxes] containing iscrowd flag for groundtruth boxes.
"""
if image_id in self._groundtruth_masks:
tf.logging.warning(
'Ignoring groundtruth with image %s, since it has already been '
'added to the ground truth database.', image_id)
return
self._groundtruth_masks[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._groundtruth_class_labels[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
# Drop groundtruth_is_crowd if empty tensor.
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.size > 0:
groundtruth_is_crowd = None
if groundtruth_is_crowd is not None:
self._groundtruth_is_crowd[image_id] = groundtruth_is_crowd
def add_single_detected_image_info(self, image_id, detections_dict):
"""Adds detections for a single image to be used for evaluation.
If a detection has already been added for this image id, a warning is
logged, and the detection is skipped.
Args:
image_id: A unique string/integer identifier for the image.
detections_dict: A dictionary containing -
DetectionResultFields.detection_classes: integer numpy array of shape
[num_masks] containing 1-indexed detection classes for the masks.
DetectionResultFields.detection_masks: optional uint8 numpy array of
shape [num_masks, image_height, image_width] containing instance
masks. The elements of the array must be in {0, 1}.
Raises:
ValueError: If results and groundtruth shape don't match.
"""
if image_id not in self._groundtruth_masks:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._predicted_masks[image_id] = detection_masks
self._predicted_class_labels[image_id] = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
groundtruth_mask_shape = self._groundtruth_masks[image_id].shape
if groundtruth_mask_shape[1:] != detection_masks.shape[1:]:
raise ValueError("The shape of results doesn't match groundtruth.")
def evaluate(self):
"""Evaluates the detection masks and returns a dictionary of coco metrics.
Returns:
A dictionary holding -
1. summary_metric:
'PanopticQuality@%.2fIOU': mean panoptic quality averaged over classes at
the required IOU.
'SegmentationQuality@%.2fIOU': mean segmentation quality averaged over
classes at the required IOU.
'RecognitionQuality@%.2fIOU': mean recognition quality averaged over
classes at the required IOU.
'NumValidClasses': number of valid classes. A valid class should have at
least one normal (is_crowd=0) groundtruth mask or one predicted mask.
'NumTotalClasses': number of total classes.
2. per_category_pq: if include_metrics_per_category is True, category
specific results with keys of the form:
'PanopticQuality@%.2fIOU_ByCategory/category'.
"""
# Evaluate and accumulate the iou/tp/fp/fn.
sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn = self._evaluate_all_masks()
# Compute PQ metric for each category and average over all classes.
mask_metrics = self._compute_panoptic_metrics(sum_tp_iou, sum_num_tp,
sum_num_fp, sum_num_fn)
return mask_metrics
def get_estimator_eval_metric_ops(self, eval_dict):
"""Returns a dictionary of eval metric ops.
Note that once value_op is called, the detections and groundtruth added via
update_op are cleared.
Args:
eval_dict: A dictionary that holds tensors for evaluating object detection
performance. For single-image evaluation, this dictionary may be
produced from eval_util.result_dict_for_single_example(). If multi-image
evaluation, `eval_dict` should contain the fields
'num_gt_masks_per_image' and 'num_det_masks_per_image' to properly unpad
the tensors from the batch.
Returns:
a dictionary of metric names to tuple of value_op and update_op that can
be used as eval metric ops in tf.estimator.EstimatorSpec. Note that all
update ops must be run together and similarly all value ops must be run
together to guarantee correct behaviour.
"""
def update_op(image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_masks_per_image,
detection_classes_batched, detection_masks_batched,
num_det_masks_per_image):
"""Update op for metrics."""
for (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_mask, detection_classes,
detection_masks, num_det_mask) in zip(
image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched, groundtruth_is_crowd_batched,
num_gt_masks_per_image, detection_classes_batched,
detection_masks_batched, num_det_masks_per_image):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_classes':
groundtruth_classes[:num_gt_mask],
'groundtruth_instance_masks':
groundtruth_instance_masks[:num_gt_mask],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_mask]
})
self.add_single_detected_image_info(
image_id, {
'detection_classes': detection_classes[:num_det_mask],
'detection_masks': detection_masks[:num_det_mask]
})
# Unpack items from the evaluation dictionary.
(image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
) = self._unpack_evaluation_dictionary_items(eval_dict)
update_op = tf.py_func(update_op, [
image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
], [])
metric_names = [
'PanopticQuality@%.2fIOU' % self._iou_threshold,
'SegmentationQuality@%.2fIOU' % self._iou_threshold,
'RecognitionQuality@%.2fIOU' % self._iou_threshold
]
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold, category_dict['name']))
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def _evaluate_all_masks(self):
"""Evaluate all masks and compute sum iou/TP/FP/FN."""
sum_num_tp = {category['id']: 0 for category in self._categories}
sum_num_fp = sum_num_tp.copy()
sum_num_fn = sum_num_tp.copy()
sum_tp_iou = sum_num_tp.copy()
for image_id in self._groundtruth_class_labels:
# Separate normal and is_crowd groundtruth
crowd_gt_indices = self._groundtruth_is_crowd.get(image_id)
(normal_gt_masks, normal_gt_classes, crowd_gt_masks,
crowd_gt_classes) = self._separate_normal_and_crowd_labels(
crowd_gt_indices, self._groundtruth_masks[image_id],
self._groundtruth_class_labels[image_id])
# Mask matching to normal GT.
predicted_masks = self._predicted_masks[image_id]
predicted_class_labels = self._predicted_class_labels[image_id]
(overlaps, pred_matched,
gt_matched) = self._match_predictions_to_groundtruths(
predicted_masks,
predicted_class_labels,
normal_gt_masks,
normal_gt_classes,
self._iou_threshold,
is_crowd=False,
with_replacement=False)
# Accumulate true positives.
for (class_id, is_matched, overlap) in zip(predicted_class_labels,
pred_matched, overlaps):
if is_matched:
sum_num_tp[class_id] += 1
sum_tp_iou[class_id] += overlap
# Accumulate false negatives.
for (class_id, is_matched) in zip(normal_gt_classes, gt_matched):
if not is_matched:
sum_num_fn[class_id] += 1
# Match remaining predictions to crowd gt.
remained_pred_indices = np.logical_not(pred_matched)
remained_pred_masks = predicted_masks[remained_pred_indices, :, :]
remained_pred_classes = predicted_class_labels[remained_pred_indices]
_, pred_matched, _ = self._match_predictions_to_groundtruths(
remained_pred_masks,
remained_pred_classes,
crowd_gt_masks,
crowd_gt_classes,
self._ioa_threshold,
is_crowd=True,
with_replacement=True)
# Accumulate false positives
for (class_id, is_matched) in zip(remained_pred_classes, pred_matched):
if not is_matched:
sum_num_fp[class_id] += 1
return sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn
def _compute_panoptic_metrics(self, sum_tp_iou, sum_num_tp, sum_num_fp,
sum_num_fn):
"""Compute PQ metric for each category and average over all classes.
Args:
sum_tp_iou: dict, summed true positive intersection-over-union (IoU) for
each class, keyed by class_id.
sum_num_tp: the total number of true positives for each class, keyed by
class_id.
sum_num_fp: the total number of false positives for each class, keyed by
class_id.
sum_num_fn: the total number of false negatives for each class, keyed by
class_id.
Returns:
mask_metrics: a dictionary containing averaged metrics over all classes,
and per-category metrics if required.
"""
mask_metrics = {}
sum_pq = 0
sum_sq = 0
sum_rq = 0
num_valid_classes = 0
for category in self._categories:
class_id = category['id']
(panoptic_quality, segmentation_quality,
recognition_quality) = self._compute_panoptic_metrics_single_class(
sum_tp_iou[class_id], sum_num_tp[class_id], sum_num_fp[class_id],
sum_num_fn[class_id])
if panoptic_quality is not None:
sum_pq += panoptic_quality
sum_sq += segmentation_quality
sum_rq += recognition_quality
num_valid_classes += 1
if self._include_metrics_per_category:
mask_metrics['PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold,
category['name'])] = panoptic_quality
mask_metrics['PanopticQuality@%.2fIOU' %
self._iou_threshold] = sum_pq / num_valid_classes
mask_metrics['SegmentationQuality@%.2fIOU' %
self._iou_threshold] = sum_sq / num_valid_classes
mask_metrics['RecognitionQuality@%.2fIOU' %
self._iou_threshold] = sum_rq / num_valid_classes
mask_metrics['NumValidClasses'] = num_valid_classes
mask_metrics['NumTotalClasses'] = len(self._categories)
return mask_metrics
def _compute_panoptic_metrics_single_class(self, sum_tp_iou, num_tp, num_fp,
num_fn):
"""Compute panoptic metrics: panoptic/segmentation/recognition quality.
More computation details in https://arxiv.org/pdf/1801.00868.pdf.
Args:
sum_tp_iou: summed true positive intersection-over-union (IoU) for a
specific class.
num_tp: the total number of true positives for a specific class.
num_fp: the total number of false positives for a specific class.
num_fn: the total number of false negatives for a specific class.
Returns:
panoptic_quality: sum_tp_iou / (num_tp + 0.5*num_fp + 0.5*num_fn).
segmentation_quality: sum_tp_iou / num_tp.
recognition_quality: num_tp / (num_tp + 0.5*num_fp + 0.5*num_fn).
"""
denominator = num_tp + 0.5 * num_fp + 0.5 * num_fn
# Calculate metric only if there is at least one GT or one prediction.
if denominator > 0:
recognition_quality = num_tp / denominator
if num_tp > 0:
segmentation_quality = sum_tp_iou / num_tp
else:
# If there is no TP for this category.
segmentation_quality = 0
panoptic_quality = segmentation_quality * recognition_quality
return panoptic_quality, segmentation_quality, recognition_quality
else:
return None, None, None
def _separate_normal_and_crowd_labels(self, crowd_gt_indices,
groundtruth_masks, groundtruth_classes):
"""Separate normal and crowd groundtruth class_labels and masks.
Args:
crowd_gt_indices: None or array of shape [num_groundtruths]. If None, all
groundtruths are treated as normal ones.
groundtruth_masks: array of shape [num_groundtruths, height, width].
groundtruth_classes: array of shape [num_groundtruths].
Returns:
normal_gt_masks: array of shape [num_normal_groundtruths, height, width].
normal_gt_classes: array of shape [num_normal_groundtruths].
crowd_gt_masks: array of shape [num_crowd_groundtruths, height, width].
crowd_gt_classes: array of shape [num_crowd_groundtruths].
Raises:
ValueError: if the shape of groundtruth classes doesn't match groundtruth
masks or if the shape of crowd_gt_indices.
"""
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of labels.")
if crowd_gt_indices is None:
# All gts are treated as normal
crowd_gt_indices = np.zeros(groundtruth_masks.shape, dtype=np.bool)
else:
if groundtruth_masks.shape[0] != crowd_gt_indices.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of is_crowd labels.")
crowd_gt_indices = crowd_gt_indices.astype(np.bool)
normal_gt_indices = np.logical_not(crowd_gt_indices)
if normal_gt_indices.size:
normal_gt_masks = groundtruth_masks[normal_gt_indices, :, :]
normal_gt_classes = groundtruth_classes[normal_gt_indices]
crowd_gt_masks = groundtruth_masks[crowd_gt_indices, :, :]
crowd_gt_classes = groundtruth_classes[crowd_gt_indices]
else:
# No groundtruths available, groundtruth_masks.shape = (0, h, w)
normal_gt_masks = groundtruth_masks
normal_gt_classes = groundtruth_classes
crowd_gt_masks = groundtruth_masks
crowd_gt_classes = groundtruth_classes
return normal_gt_masks, normal_gt_classes, crowd_gt_masks, crowd_gt_classes
def _match_predictions_to_groundtruths(self,
predicted_masks,
predicted_classes,
groundtruth_masks,
groundtruth_classes,
matching_threshold,
is_crowd=False,
with_replacement=False):
"""Match the predicted masks to groundtruths.
Args:
predicted_masks: array of shape [num_predictions, height, width].
predicted_classes: array of shape [num_predictions].
groundtruth_masks: array of shape [num_groundtruths, height, width].
groundtruth_classes: array of shape [num_groundtruths].
matching_threshold: if the overlap between a prediction and a groundtruth
is larger than this threshold, the prediction is true positive.
is_crowd: whether the groundtruths are crowd annotation or not. If True,
use intersection over area (IoA) as the overlapping metric; otherwise
use intersection over union (IoU).
with_replacement: whether a groundtruth can be matched to multiple
predictions. By default, for normal groundtruths, only 1-1 matching is
allowed for normal groundtruths; for crowd groundtruths, 1-to-many must
be allowed.
Returns:
best_overlaps: array of shape [num_predictions]. Values representing the
IoU
or IoA with best matched groundtruth.
pred_matched: array of shape [num_predictions]. Boolean value representing
whether the ith prediction is matched to a groundtruth.
gt_matched: array of shape [num_groundtruth]. Boolean value representing
whether the ith groundtruth is matched to a prediction.
Raises:
ValueError: if the shape of groundtruth/predicted masks doesn't match
groundtruth/predicted classes.
"""
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of GT masks doesn't match the number of labels.")
if predicted_masks.shape[0] != predicted_classes.shape[0]:
raise ValueError(
"The number of predicted masks doesn't match the number of labels.")
gt_matched = np.zeros(groundtruth_classes.shape, dtype=np.bool)
pred_matched = np.zeros(predicted_classes.shape, dtype=np.bool)
best_overlaps = np.zeros(predicted_classes.shape)
for pid in range(predicted_classes.shape[0]):
best_overlap = 0
matched_gt_id = -1
for gid in range(groundtruth_classes.shape[0]):
if predicted_classes[pid] == groundtruth_classes[gid]:
if (not with_replacement) and gt_matched[gid]:
continue
if not is_crowd:
overlap = np_mask_ops.iou(predicted_masks[pid:pid + 1],
groundtruth_masks[gid:gid + 1])[0, 0]
else:
overlap = np_mask_ops.ioa(groundtruth_masks[gid:gid + 1],
predicted_masks[pid:pid + 1])[0, 0]
if overlap >= matching_threshold and overlap > best_overlap:
matched_gt_id = gid
best_overlap = overlap
if matched_gt_id >= 0:
gt_matched[matched_gt_id] = True
pred_matched[pid] = True
best_overlaps[pid] = best_overlap
return best_overlaps, pred_matched, gt_matched
def _unpack_evaluation_dictionary_items(self, eval_dict):
"""Unpack items from the evaluation dictionary."""
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
num_gt_masks_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_masks_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.shape(groundtruth_classes)[1:2]
else:
num_gt_masks_per_image = tf.expand_dims(num_gt_masks_per_image, 0)
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.shape(detection_classes)[1:2]
else:
num_det_masks_per_image = tf.expand_dims(num_det_masks_per_image, 0)
else:
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.tile(
tf.shape(groundtruth_classes)[1:2],
multiples=tf.shape(groundtruth_classes)[0:1])
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.tile(
tf.shape(detection_classes)[1:2],
multiples=tf.shape(detection_classes)[0:1])
return (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image)
| 46.6689 | 102 | 0.696964 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
from object_detection.core import standard_fields
from object_detection.metrics import coco_tools
from object_detection.utils import json_utils
from object_detection.utils import np_mask_ops
from object_detection.utils import object_detection_evaluation
class CocoDetectionEvaluator(object_detection_evaluation.DetectionEvaluator):
def __init__(self,
categories,
include_metrics_per_category=False,
all_metrics_per_category=False):
super(CocoDetectionEvaluator, self).__init__(categories)
self._image_ids = {}
self._groundtruth_list = []
self._detection_boxes_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._metrics = None
self._include_metrics_per_category = include_metrics_per_category
self._all_metrics_per_category = all_metrics_per_category
def clear(self):
self._image_ids.clear()
self._groundtruth_list = []
self._detection_boxes_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
if image_id in self._image_ids:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
groundtruth_area = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_area)
groundtruth_keypoints = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoints)
groundtruth_keypoint_visibilities = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_keypoint_visibilities)
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.shape[0]:
groundtruth_is_crowd = None
if groundtruth_area is not None and not groundtruth_area.shape[0]:
groundtruth_area = None
if groundtruth_keypoints is not None and not groundtruth_keypoints.shape[0]:
groundtruth_keypoints = None
if groundtruth_keypoint_visibilities is not None and not groundtruth_keypoint_visibilities.shape[
0]:
groundtruth_keypoint_visibilities = None
self._groundtruth_list.extend(
coco_tools.ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes],
groundtruth_classes=groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes],
groundtruth_is_crowd=groundtruth_is_crowd,
groundtruth_area=groundtruth_area,
groundtruth_keypoints=groundtruth_keypoints,
groundtruth_keypoint_visibilities=groundtruth_keypoint_visibilities)
)
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_ids[image_id] = False
def add_single_detected_image_info(self,
image_id,
detections_dict):
if image_id not in self._image_ids:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if self._image_ids[image_id]:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
detection_keypoints = detections_dict.get(
standard_fields.DetectionResultFields.detection_keypoints)
if detection_keypoints is not None and not detection_keypoints.shape[0]:
detection_keypoints = None
self._detection_boxes_list.extend(
coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_boxes=detections_dict[
standard_fields.DetectionResultFields.detection_boxes],
detection_scores=detections_dict[
standard_fields.DetectionResultFields.detection_scores],
detection_classes=detections_dict[
standard_fields.DetectionResultFields.detection_classes],
detection_keypoints=detection_keypoints))
self._image_ids[image_id] = True
def dump_detections_to_json_file(self, json_output_path):
if json_output_path and json_output_path is not None:
with tf.gfile.GFile(json_output_path, 'w') as fid:
tf.logging.info('Dumping detections to output json file.')
json_utils.Dump(
obj=self._detection_boxes_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(groundtruth_dict)
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
box_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detections, agnostic_mode=False)
box_metrics, box_per_category_ap = box_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category,
all_metrics_per_category=self._all_metrics_per_category)
box_metrics.update(box_per_category_ap)
box_metrics = {'DetectionBoxes_'+ key: value
for key, value in iter(box_metrics.items())}
return box_metrics
def add_eval_dict(self, eval_dict):
def update_op(
image_id_batched,
groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_is_crowd_batched,
num_gt_boxes_per_image,
detection_boxes_batched,
detection_scores_batched,
detection_classes_batched,
num_det_boxes_per_image,
is_annotated_batched):
for (image_id, gt_box, gt_class, gt_is_crowd, num_gt_box, det_box,
det_score, det_class, num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
num_gt_boxes_per_image,
detection_boxes_batched, detection_scores_batched,
detection_classes_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box]
})
self.add_single_detected_image_info(
image_id,
{'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box]})
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
return tf.py_func(update_op, [image_id,
groundtruth_boxes,
groundtruth_classes,
groundtruth_is_crowd,
num_gt_boxes_per_image,
detection_boxes,
detection_scores,
detection_classes,
num_det_boxes_per_image,
is_annotated], [])
def get_estimator_eval_metric_ops(self, eval_dict):
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionBoxes_Precision/mAP',
'DetectionBoxes_Precision/mAP@.50IOU',
'DetectionBoxes_Precision/mAP@.75IOU',
'DetectionBoxes_Precision/mAP (large)',
'DetectionBoxes_Precision/mAP (medium)',
'DetectionBoxes_Precision/mAP (small)',
'DetectionBoxes_Recall/AR@1',
'DetectionBoxes_Recall/AR@10',
'DetectionBoxes_Recall/AR@100',
'DetectionBoxes_Recall/AR@100 (large)',
'DetectionBoxes_Recall/AR@100 (medium)',
'DetectionBoxes_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionBoxes_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def convert_masks_to_binary(masks):
return (masks > 0).astype(np.uint8)
class CocoKeypointEvaluator(CocoDetectionEvaluator):
def __init__(self,
category_id,
category_keypoints,
class_text,
oks_sigmas=None):
self._category_id = category_id
self._category_name = class_text
self._keypoint_ids = sorted(
[keypoint['id'] for keypoint in category_keypoints])
kpt_id_to_name = {kpt['id']: kpt['name'] for kpt in category_keypoints}
if oks_sigmas:
self._oks_sigmas = np.array([
oks_sigmas[kpt_id_to_name[idx]] for idx in self._keypoint_ids
])
else:
self._oks_sigmas = np.full((len(self._keypoint_ids)), 0.05)
tf.logging.warning('No default keypoint OKS sigmas provided. Will use '
'0.05')
tf.logging.info('Using the following keypoint OKS sigmas: {}'.format(
self._oks_sigmas))
self._metrics = None
super(CocoKeypointEvaluator, self).__init__([{
'id': self._category_id,
'name': class_text
}])
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
groundtruth_classes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_boxes = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_boxes]
groundtruth_keypoints = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoints]
class_indices = [
idx for idx, gt_class_id in enumerate(groundtruth_classes)
if gt_class_id == self._category_id
]
filtered_groundtruth_classes = np.take(
groundtruth_classes, class_indices, axis=0)
filtered_groundtruth_boxes = np.take(
groundtruth_boxes, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
groundtruth_keypoints, class_indices, axis=0)
filtered_groundtruth_keypoints = np.take(
filtered_groundtruth_keypoints, self._keypoint_ids, axis=1)
filtered_groundtruth_dict = {}
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_classes] = filtered_groundtruth_classes
filtered_groundtruth_dict[standard_fields.InputDataFields
.groundtruth_boxes] = filtered_groundtruth_boxes
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_keypoints] = filtered_groundtruth_keypoints
if (standard_fields.InputDataFields.groundtruth_is_crowd in
groundtruth_dict.keys()):
groundtruth_is_crowd = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_is_crowd]
filtered_groundtruth_is_crowd = np.take(groundtruth_is_crowd,
class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_is_crowd] = filtered_groundtruth_is_crowd
if (standard_fields.InputDataFields.groundtruth_area in
groundtruth_dict.keys()):
groundtruth_area = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_area]
filtered_groundtruth_area = np.take(groundtruth_area, class_indices, 0)
filtered_groundtruth_dict[
standard_fields.InputDataFields
.groundtruth_area] = filtered_groundtruth_area
if (standard_fields.InputDataFields.groundtruth_keypoint_visibilities in
groundtruth_dict.keys()):
groundtruth_keypoint_visibilities = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_keypoint_visibilities]
filtered_groundtruth_keypoint_visibilities = np.take(
groundtruth_keypoint_visibilities, class_indices, axis=0)
filtered_groundtruth_keypoint_visibilities = np.take(
filtered_groundtruth_keypoint_visibilities,
self._keypoint_ids,
axis=1)
filtered_groundtruth_dict[
standard_fields.InputDataFields.
groundtruth_keypoint_visibilities] = filtered_groundtruth_keypoint_visibilities
super(CocoKeypointEvaluator,
self).add_single_ground_truth_image_info(image_id,
filtered_groundtruth_dict)
def add_single_detected_image_info(self, image_id, detections_dict):
detection_classes = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
detection_boxes = detections_dict[
standard_fields.DetectionResultFields.detection_boxes]
detection_scores = detections_dict[
standard_fields.DetectionResultFields.detection_scores]
detection_keypoints = detections_dict[
standard_fields.DetectionResultFields.detection_keypoints]
class_indices = [
idx for idx, class_id in enumerate(detection_classes)
if class_id == self._category_id
]
filtered_detection_classes = np.take(
detection_classes, class_indices, axis=0)
filtered_detection_boxes = np.take(detection_boxes, class_indices, axis=0)
filtered_detection_scores = np.take(detection_scores, class_indices, axis=0)
filtered_detection_keypoints = np.take(
detection_keypoints, class_indices, axis=0)
filtered_detection_keypoints = np.take(
filtered_detection_keypoints, self._keypoint_ids, axis=1)
filtered_detections_dict = {}
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_classes] = filtered_detection_classes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_boxes] = filtered_detection_boxes
filtered_detections_dict[standard_fields.DetectionResultFields
.detection_scores] = filtered_detection_scores
filtered_detections_dict[standard_fields.DetectionResultFields.
detection_keypoints] = filtered_detection_keypoints
super(CocoKeypointEvaluator,
self).add_single_detected_image_info(image_id,
filtered_detections_dict)
def evaluate(self):
tf.logging.info('Performing evaluation on %d images.', len(self._image_ids))
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id} for image_id in self._image_ids],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='bbox')
coco_wrapped_detections = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_boxes_list)
keypoint_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth,
coco_wrapped_detections,
agnostic_mode=False,
iou_type='keypoints',
oks_sigmas=self._oks_sigmas)
keypoint_metrics, _ = keypoint_evaluator.ComputeMetrics(
include_metrics_per_category=False, all_metrics_per_category=False)
keypoint_metrics = {
'Keypoints_' + key: value
for key, value in iter(keypoint_metrics.items())
}
return keypoint_metrics
def add_eval_dict(self, eval_dict):
def update_op(
image_id_batched,
groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_is_crowd_batched,
groundtruth_area_batched,
groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image,
detection_boxes_batched,
detection_scores_batched,
detection_classes_batched,
detection_keypoints_batched,
num_det_boxes_per_image,
is_annotated_batched):
for (image_id, gt_box, gt_class, gt_is_crowd, gt_area, gt_keyp,
gt_keyp_vis, num_gt_box, det_box, det_score, det_class, det_keyp,
num_det_box, is_annotated) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_is_crowd_batched,
groundtruth_area_batched, groundtruth_keypoints_batched,
groundtruth_keypoint_visibilities_batched,
num_gt_boxes_per_image, detection_boxes_batched,
detection_scores_batched, detection_classes_batched,
detection_keypoints_batched, num_det_boxes_per_image,
is_annotated_batched):
if is_annotated:
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes': gt_box[:num_gt_box],
'groundtruth_classes': gt_class[:num_gt_box],
'groundtruth_is_crowd': gt_is_crowd[:num_gt_box],
'groundtruth_area': gt_area[:num_gt_box],
'groundtruth_keypoints': gt_keyp[:num_gt_box],
'groundtruth_keypoint_visibilities': gt_keyp_vis[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_boxes': det_box[:num_det_box],
'detection_scores': det_score[:num_det_box],
'detection_classes': det_class[:num_det_box],
'detection_keypoints': det_keyp[:num_det_box],
})
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
groundtruth_area = eval_dict.get(input_data_fields.groundtruth_area, None)
groundtruth_keypoints = eval_dict[input_data_fields.groundtruth_keypoints]
groundtruth_keypoint_visibilities = eval_dict.get(
input_data_fields.groundtruth_keypoint_visibilities, None)
detection_boxes = eval_dict[detection_fields.detection_boxes]
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_keypoints = eval_dict[detection_fields.detection_keypoints]
num_gt_boxes_per_image = eval_dict.get(
'num_groundtruth_boxes_per_image', None)
num_det_boxes_per_image = eval_dict.get('num_det_boxes_per_image', None)
is_annotated = eval_dict.get('is_annotated', None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if groundtruth_area is None:
groundtruth_area = tf.zeros_like(groundtruth_classes, dtype=tf.float32)
if not image_id.shape.as_list():
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
groundtruth_area = tf.expand_dims(groundtruth_area, 0)
groundtruth_keypoints = tf.expand_dims(groundtruth_keypoints, 0)
detection_boxes = tf.expand_dims(detection_boxes, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_keypoints = tf.expand_dims(detection_keypoints, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_boxes)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
if is_annotated is None:
is_annotated = tf.constant([True])
else:
is_annotated = tf.expand_dims(is_annotated, 0)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_boxes)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.expand_dims(
groundtruth_keypoint_visibilities, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_boxes)[1:2],
multiples=tf.shape(detection_boxes)[0:1])
if is_annotated is None:
is_annotated = tf.ones_like(image_id, dtype=tf.bool)
if groundtruth_keypoint_visibilities is None:
groundtruth_keypoint_visibilities = tf.fill([
tf.shape(groundtruth_keypoints)[1],
tf.shape(groundtruth_keypoints)[2]
], tf.constant(2, dtype=tf.int32))
groundtruth_keypoint_visibilities = tf.tile(
tf.expand_dims(groundtruth_keypoint_visibilities, 0),
multiples=[tf.shape(groundtruth_keypoints)[0], 1, 1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes, groundtruth_is_crowd,
groundtruth_area, groundtruth_keypoints,
groundtruth_keypoint_visibilities, num_gt_boxes_per_image,
detection_boxes, detection_scores, detection_classes,
detection_keypoints, num_det_boxes_per_image, is_annotated
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
update_op = self.add_eval_dict(eval_dict)
category = self._category_name
metric_names = [
'Keypoints_Precision/mAP ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.50IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP@.75IOU ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (large) ByCategory/{}'.format(category),
'Keypoints_Precision/mAP (medium) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@1 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@10 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (large) ByCategory/{}'.format(category),
'Keypoints_Recall/AR@100 (medium) ByCategory/{}'.format(category)
]
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoMaskEvaluator(object_detection_evaluation.DetectionEvaluator):
def __init__(self, categories, include_metrics_per_category=False):
super(CocoMaskEvaluator, self).__init__(categories)
self._image_id_to_mask_shape_map = {}
self._image_ids_with_detections = set([])
self._groundtruth_list = []
self._detection_masks_list = []
self._category_id_set = set([cat['id'] for cat in self._categories])
self._annotation_id = 1
self._include_metrics_per_category = include_metrics_per_category
def clear(self):
self._image_id_to_mask_shape_map.clear()
self._image_ids_with_detections.clear()
self._groundtruth_list = []
self._detection_masks_list = []
def add_single_ground_truth_image_info(self,
image_id,
groundtruth_dict):
if image_id in self._image_id_to_mask_shape_map:
tf.logging.warning('Ignoring ground truth with image id %s since it was '
'previously added', image_id)
return
groundtruth_instance_masks = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
groundtruth_instance_masks = convert_masks_to_binary(
groundtruth_instance_masks)
self._groundtruth_list.extend(
coco_tools.
ExportSingleImageGroundtruthToCoco(
image_id=image_id,
next_annotation_id=self._annotation_id,
category_id_set=self._category_id_set,
groundtruth_boxes=groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes],
groundtruth_classes=groundtruth_dict[standard_fields.
InputDataFields.
groundtruth_classes],
groundtruth_masks=groundtruth_instance_masks))
self._annotation_id += groundtruth_dict[standard_fields.InputDataFields.
groundtruth_boxes].shape[0]
self._image_id_to_mask_shape_map[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks].shape
def add_single_detected_image_info(self,
image_id,
detections_dict):
if image_id not in self._image_id_to_mask_shape_map:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
if image_id in self._image_ids_with_detections:
tf.logging.warning('Ignoring detection with image id %s since it was '
'previously added', image_id)
return
groundtruth_masks_shape = self._image_id_to_mask_shape_map[image_id]
detection_masks = detections_dict[standard_fields.DetectionResultFields.
detection_masks]
if groundtruth_masks_shape[1:] != detection_masks.shape[1:]:
raise ValueError('Spatial shape of groundtruth masks and detection masks '
'are incompatible: {} vs {}'.format(
groundtruth_masks_shape,
detection_masks.shape))
detection_masks = convert_masks_to_binary(detection_masks)
self._detection_masks_list.extend(
coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id=image_id,
category_id_set=self._category_id_set,
detection_masks=detection_masks,
detection_scores=detections_dict[standard_fields.
DetectionResultFields.
detection_scores],
detection_classes=detections_dict[standard_fields.
DetectionResultFields.
detection_classes]))
self._image_ids_with_detections.update([image_id])
def dump_detections_to_json_file(self, json_output_path):
if json_output_path and json_output_path is not None:
tf.logging.info('Dumping detections to output json file.')
with tf.gfile.GFile(json_output_path, 'w') as fid:
json_utils.Dump(
obj=self._detection_masks_list, fid=fid, float_digits=4, indent=2)
def evaluate(self):
groundtruth_dict = {
'annotations': self._groundtruth_list,
'images': [{'id': image_id, 'height': shape[1], 'width': shape[2]}
for image_id, shape in self._image_id_to_mask_shape_map.
items()],
'categories': self._categories
}
coco_wrapped_groundtruth = coco_tools.COCOWrapper(
groundtruth_dict, detection_type='segmentation')
coco_wrapped_detection_masks = coco_wrapped_groundtruth.LoadAnnotations(
self._detection_masks_list)
mask_evaluator = coco_tools.COCOEvalWrapper(
coco_wrapped_groundtruth, coco_wrapped_detection_masks,
agnostic_mode=False, iou_type='segm')
mask_metrics, mask_per_category_ap = mask_evaluator.ComputeMetrics(
include_metrics_per_category=self._include_metrics_per_category)
mask_metrics.update(mask_per_category_ap)
mask_metrics = {'DetectionMasks_'+ key: value
for key, value in mask_metrics.items()}
return mask_metrics
def add_eval_dict(self, eval_dict):
def update_op(image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image):
for (image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd, num_gt_box,
detection_scores, detection_classes,
detection_masks, num_det_box) in zip(
image_id_batched, groundtruth_boxes_batched,
groundtruth_classes_batched, groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_boxes_per_image,
detection_scores_batched, detection_classes_batched,
detection_masks_batched, num_det_boxes_per_image):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_boxes':
groundtruth_boxes[:num_gt_box],
'groundtruth_classes':
groundtruth_classes[:num_gt_box],
'groundtruth_instance_masks':
groundtruth_instance_masks[:num_gt_box],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_box]
})
self.add_single_detected_image_info(
image_id, {
'detection_scores': detection_scores[:num_det_box],
'detection_classes': detection_classes[:num_det_box],
'detection_masks': detection_masks[:num_det_box]
})
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_boxes = eval_dict[input_data_fields.groundtruth_boxes]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(
input_data_fields.groundtruth_is_crowd, None)
num_gt_boxes_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_scores = eval_dict[detection_fields.detection_scores]
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_boxes_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
image_id = tf.expand_dims(image_id, 0)
groundtruth_boxes = tf.expand_dims(groundtruth_boxes, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_scores = tf.expand_dims(detection_scores, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.shape(groundtruth_boxes)[1:2]
else:
num_gt_boxes_per_image = tf.expand_dims(num_gt_boxes_per_image, 0)
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.shape(detection_scores)[1:2]
else:
num_det_boxes_per_image = tf.expand_dims(num_det_boxes_per_image, 0)
else:
if num_gt_boxes_per_image is None:
num_gt_boxes_per_image = tf.tile(
tf.shape(groundtruth_boxes)[1:2],
multiples=tf.shape(groundtruth_boxes)[0:1])
if num_det_boxes_per_image is None:
num_det_boxes_per_image = tf.tile(
tf.shape(detection_scores)[1:2],
multiples=tf.shape(detection_scores)[0:1])
return tf.py_func(update_op, [
image_id, groundtruth_boxes, groundtruth_classes,
groundtruth_instance_masks, groundtruth_is_crowd,
num_gt_boxes_per_image, detection_scores, detection_classes,
detection_masks, num_det_boxes_per_image
], [])
def get_estimator_eval_metric_ops(self, eval_dict):
update_op = self.add_eval_dict(eval_dict)
metric_names = ['DetectionMasks_Precision/mAP',
'DetectionMasks_Precision/mAP@.50IOU',
'DetectionMasks_Precision/mAP@.75IOU',
'DetectionMasks_Precision/mAP (large)',
'DetectionMasks_Precision/mAP (medium)',
'DetectionMasks_Precision/mAP (small)',
'DetectionMasks_Recall/AR@1',
'DetectionMasks_Recall/AR@10',
'DetectionMasks_Recall/AR@100',
'DetectionMasks_Recall/AR@100 (large)',
'DetectionMasks_Recall/AR@100 (medium)',
'DetectionMasks_Recall/AR@100 (small)']
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('DetectionMasks_PerformanceByCategory/mAP/' +
category_dict['name'])
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
class CocoPanopticSegmentationEvaluator(
object_detection_evaluation.DetectionEvaluator):
def __init__(self,
categories,
include_metrics_per_category=False,
iou_threshold=0.5,
ioa_threshold=0.5):
super(CocoPanopticSegmentationEvaluator, self).__init__(categories)
self._groundtruth_masks = {}
self._groundtruth_class_labels = {}
self._groundtruth_is_crowd = {}
self._predicted_masks = {}
self._predicted_class_labels = {}
self._include_metrics_per_category = include_metrics_per_category
self._iou_threshold = iou_threshold
self._ioa_threshold = ioa_threshold
def clear(self):
self._groundtruth_masks.clear()
self._groundtruth_class_labels.clear()
self._groundtruth_is_crowd.clear()
self._predicted_masks.clear()
self._predicted_class_labels.clear()
def add_single_ground_truth_image_info(self, image_id, groundtruth_dict):
if image_id in self._groundtruth_masks:
tf.logging.warning(
'Ignoring groundtruth with image %s, since it has already been '
'added to the ground truth database.', image_id)
return
self._groundtruth_masks[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_instance_masks]
self._groundtruth_class_labels[image_id] = groundtruth_dict[
standard_fields.InputDataFields.groundtruth_classes]
groundtruth_is_crowd = groundtruth_dict.get(
standard_fields.InputDataFields.groundtruth_is_crowd)
if groundtruth_is_crowd is not None and not groundtruth_is_crowd.size > 0:
groundtruth_is_crowd = None
if groundtruth_is_crowd is not None:
self._groundtruth_is_crowd[image_id] = groundtruth_is_crowd
def add_single_detected_image_info(self, image_id, detections_dict):
if image_id not in self._groundtruth_masks:
raise ValueError('Missing groundtruth for image id: {}'.format(image_id))
detection_masks = detections_dict[
standard_fields.DetectionResultFields.detection_masks]
self._predicted_masks[image_id] = detection_masks
self._predicted_class_labels[image_id] = detections_dict[
standard_fields.DetectionResultFields.detection_classes]
groundtruth_mask_shape = self._groundtruth_masks[image_id].shape
if groundtruth_mask_shape[1:] != detection_masks.shape[1:]:
raise ValueError("The shape of results doesn't match groundtruth.")
def evaluate(self):
# Evaluate and accumulate the iou/tp/fp/fn.
sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn = self._evaluate_all_masks()
# Compute PQ metric for each category and average over all classes.
mask_metrics = self._compute_panoptic_metrics(sum_tp_iou, sum_num_tp,
sum_num_fp, sum_num_fn)
return mask_metrics
def get_estimator_eval_metric_ops(self, eval_dict):
def update_op(image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched,
groundtruth_is_crowd_batched, num_gt_masks_per_image,
detection_classes_batched, detection_masks_batched,
num_det_masks_per_image):
for (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_mask, detection_classes,
detection_masks, num_det_mask) in zip(
image_id_batched, groundtruth_classes_batched,
groundtruth_instance_masks_batched, groundtruth_is_crowd_batched,
num_gt_masks_per_image, detection_classes_batched,
detection_masks_batched, num_det_masks_per_image):
self.add_single_ground_truth_image_info(
image_id, {
'groundtruth_classes':
groundtruth_classes[:num_gt_mask],
'groundtruth_instance_masks':
groundtruth_instance_masks[:num_gt_mask],
'groundtruth_is_crowd':
groundtruth_is_crowd[:num_gt_mask]
})
self.add_single_detected_image_info(
image_id, {
'detection_classes': detection_classes[:num_det_mask],
'detection_masks': detection_masks[:num_det_mask]
})
# Unpack items from the evaluation dictionary.
(image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
) = self._unpack_evaluation_dictionary_items(eval_dict)
update_op = tf.py_func(update_op, [
image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image
], [])
metric_names = [
'PanopticQuality@%.2fIOU' % self._iou_threshold,
'SegmentationQuality@%.2fIOU' % self._iou_threshold,
'RecognitionQuality@%.2fIOU' % self._iou_threshold
]
if self._include_metrics_per_category:
for category_dict in self._categories:
metric_names.append('PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold, category_dict['name']))
def first_value_func():
self._metrics = self.evaluate()
self.clear()
return np.float32(self._metrics[metric_names[0]])
def value_func_factory(metric_name):
def value_func():
return np.float32(self._metrics[metric_name])
return value_func
# Ensure that the metrics are only evaluated once.
first_value_op = tf.py_func(first_value_func, [], tf.float32)
eval_metric_ops = {metric_names[0]: (first_value_op, update_op)}
with tf.control_dependencies([first_value_op]):
for metric_name in metric_names[1:]:
eval_metric_ops[metric_name] = (tf.py_func(
value_func_factory(metric_name), [], np.float32), update_op)
return eval_metric_ops
def _evaluate_all_masks(self):
sum_num_tp = {category['id']: 0 for category in self._categories}
sum_num_fp = sum_num_tp.copy()
sum_num_fn = sum_num_tp.copy()
sum_tp_iou = sum_num_tp.copy()
for image_id in self._groundtruth_class_labels:
# Separate normal and is_crowd groundtruth
crowd_gt_indices = self._groundtruth_is_crowd.get(image_id)
(normal_gt_masks, normal_gt_classes, crowd_gt_masks,
crowd_gt_classes) = self._separate_normal_and_crowd_labels(
crowd_gt_indices, self._groundtruth_masks[image_id],
self._groundtruth_class_labels[image_id])
# Mask matching to normal GT.
predicted_masks = self._predicted_masks[image_id]
predicted_class_labels = self._predicted_class_labels[image_id]
(overlaps, pred_matched,
gt_matched) = self._match_predictions_to_groundtruths(
predicted_masks,
predicted_class_labels,
normal_gt_masks,
normal_gt_classes,
self._iou_threshold,
is_crowd=False,
with_replacement=False)
# Accumulate true positives.
for (class_id, is_matched, overlap) in zip(predicted_class_labels,
pred_matched, overlaps):
if is_matched:
sum_num_tp[class_id] += 1
sum_tp_iou[class_id] += overlap
# Accumulate false negatives.
for (class_id, is_matched) in zip(normal_gt_classes, gt_matched):
if not is_matched:
sum_num_fn[class_id] += 1
# Match remaining predictions to crowd gt.
remained_pred_indices = np.logical_not(pred_matched)
remained_pred_masks = predicted_masks[remained_pred_indices, :, :]
remained_pred_classes = predicted_class_labels[remained_pred_indices]
_, pred_matched, _ = self._match_predictions_to_groundtruths(
remained_pred_masks,
remained_pred_classes,
crowd_gt_masks,
crowd_gt_classes,
self._ioa_threshold,
is_crowd=True,
with_replacement=True)
# Accumulate false positives
for (class_id, is_matched) in zip(remained_pred_classes, pred_matched):
if not is_matched:
sum_num_fp[class_id] += 1
return sum_tp_iou, sum_num_tp, sum_num_fp, sum_num_fn
def _compute_panoptic_metrics(self, sum_tp_iou, sum_num_tp, sum_num_fp,
sum_num_fn):
mask_metrics = {}
sum_pq = 0
sum_sq = 0
sum_rq = 0
num_valid_classes = 0
for category in self._categories:
class_id = category['id']
(panoptic_quality, segmentation_quality,
recognition_quality) = self._compute_panoptic_metrics_single_class(
sum_tp_iou[class_id], sum_num_tp[class_id], sum_num_fp[class_id],
sum_num_fn[class_id])
if panoptic_quality is not None:
sum_pq += panoptic_quality
sum_sq += segmentation_quality
sum_rq += recognition_quality
num_valid_classes += 1
if self._include_metrics_per_category:
mask_metrics['PanopticQuality@%.2fIOU_ByCategory/%s' %
(self._iou_threshold,
category['name'])] = panoptic_quality
mask_metrics['PanopticQuality@%.2fIOU' %
self._iou_threshold] = sum_pq / num_valid_classes
mask_metrics['SegmentationQuality@%.2fIOU' %
self._iou_threshold] = sum_sq / num_valid_classes
mask_metrics['RecognitionQuality@%.2fIOU' %
self._iou_threshold] = sum_rq / num_valid_classes
mask_metrics['NumValidClasses'] = num_valid_classes
mask_metrics['NumTotalClasses'] = len(self._categories)
return mask_metrics
def _compute_panoptic_metrics_single_class(self, sum_tp_iou, num_tp, num_fp,
num_fn):
denominator = num_tp + 0.5 * num_fp + 0.5 * num_fn
# Calculate metric only if there is at least one GT or one prediction.
if denominator > 0:
recognition_quality = num_tp / denominator
if num_tp > 0:
segmentation_quality = sum_tp_iou / num_tp
else:
# If there is no TP for this category.
segmentation_quality = 0
panoptic_quality = segmentation_quality * recognition_quality
return panoptic_quality, segmentation_quality, recognition_quality
else:
return None, None, None
def _separate_normal_and_crowd_labels(self, crowd_gt_indices,
groundtruth_masks, groundtruth_classes):
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of labels.")
if crowd_gt_indices is None:
crowd_gt_indices = np.zeros(groundtruth_masks.shape, dtype=np.bool)
else:
if groundtruth_masks.shape[0] != crowd_gt_indices.shape[0]:
raise ValueError(
"The number of masks doesn't match the number of is_crowd labels.")
crowd_gt_indices = crowd_gt_indices.astype(np.bool)
normal_gt_indices = np.logical_not(crowd_gt_indices)
if normal_gt_indices.size:
normal_gt_masks = groundtruth_masks[normal_gt_indices, :, :]
normal_gt_classes = groundtruth_classes[normal_gt_indices]
crowd_gt_masks = groundtruth_masks[crowd_gt_indices, :, :]
crowd_gt_classes = groundtruth_classes[crowd_gt_indices]
else:
# No groundtruths available, groundtruth_masks.shape = (0, h, w)
normal_gt_masks = groundtruth_masks
normal_gt_classes = groundtruth_classes
crowd_gt_masks = groundtruth_masks
crowd_gt_classes = groundtruth_classes
return normal_gt_masks, normal_gt_classes, crowd_gt_masks, crowd_gt_classes
def _match_predictions_to_groundtruths(self,
predicted_masks,
predicted_classes,
groundtruth_masks,
groundtruth_classes,
matching_threshold,
is_crowd=False,
with_replacement=False):
if groundtruth_masks.shape[0] != groundtruth_classes.shape[0]:
raise ValueError(
"The number of GT masks doesn't match the number of labels.")
if predicted_masks.shape[0] != predicted_classes.shape[0]:
raise ValueError(
"The number of predicted masks doesn't match the number of labels.")
gt_matched = np.zeros(groundtruth_classes.shape, dtype=np.bool)
pred_matched = np.zeros(predicted_classes.shape, dtype=np.bool)
best_overlaps = np.zeros(predicted_classes.shape)
for pid in range(predicted_classes.shape[0]):
best_overlap = 0
matched_gt_id = -1
for gid in range(groundtruth_classes.shape[0]):
if predicted_classes[pid] == groundtruth_classes[gid]:
if (not with_replacement) and gt_matched[gid]:
continue
if not is_crowd:
overlap = np_mask_ops.iou(predicted_masks[pid:pid + 1],
groundtruth_masks[gid:gid + 1])[0, 0]
else:
overlap = np_mask_ops.ioa(groundtruth_masks[gid:gid + 1],
predicted_masks[pid:pid + 1])[0, 0]
if overlap >= matching_threshold and overlap > best_overlap:
matched_gt_id = gid
best_overlap = overlap
if matched_gt_id >= 0:
gt_matched[matched_gt_id] = True
pred_matched[pid] = True
best_overlaps[pid] = best_overlap
return best_overlaps, pred_matched, gt_matched
def _unpack_evaluation_dictionary_items(self, eval_dict):
input_data_fields = standard_fields.InputDataFields
detection_fields = standard_fields.DetectionResultFields
image_id = eval_dict[input_data_fields.key]
groundtruth_classes = eval_dict[input_data_fields.groundtruth_classes]
groundtruth_instance_masks = eval_dict[
input_data_fields.groundtruth_instance_masks]
groundtruth_is_crowd = eval_dict.get(input_data_fields.groundtruth_is_crowd,
None)
num_gt_masks_per_image = eval_dict.get(
input_data_fields.num_groundtruth_boxes, None)
detection_classes = eval_dict[detection_fields.detection_classes]
detection_masks = eval_dict[detection_fields.detection_masks]
num_det_masks_per_image = eval_dict.get(detection_fields.num_detections,
None)
if groundtruth_is_crowd is None:
groundtruth_is_crowd = tf.zeros_like(groundtruth_classes, dtype=tf.bool)
if not image_id.shape.as_list():
# Apply a batch dimension to all tensors.
image_id = tf.expand_dims(image_id, 0)
groundtruth_classes = tf.expand_dims(groundtruth_classes, 0)
groundtruth_instance_masks = tf.expand_dims(groundtruth_instance_masks, 0)
groundtruth_is_crowd = tf.expand_dims(groundtruth_is_crowd, 0)
detection_classes = tf.expand_dims(detection_classes, 0)
detection_masks = tf.expand_dims(detection_masks, 0)
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.shape(groundtruth_classes)[1:2]
else:
num_gt_masks_per_image = tf.expand_dims(num_gt_masks_per_image, 0)
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.shape(detection_classes)[1:2]
else:
num_det_masks_per_image = tf.expand_dims(num_det_masks_per_image, 0)
else:
if num_gt_masks_per_image is None:
num_gt_masks_per_image = tf.tile(
tf.shape(groundtruth_classes)[1:2],
multiples=tf.shape(groundtruth_classes)[0:1])
if num_det_masks_per_image is None:
num_det_masks_per_image = tf.tile(
tf.shape(detection_classes)[1:2],
multiples=tf.shape(detection_classes)[0:1])
return (image_id, groundtruth_classes, groundtruth_instance_masks,
groundtruth_is_crowd, num_gt_masks_per_image, detection_classes,
detection_masks, num_det_masks_per_image)
| true | true |
f721bc197ab237261a546eeac5adc901979a3a4e | 487 | py | Python | libs/external_libs/Genshi-0.5.1/examples/turbogears/genshitest/tests/test_controllers.py | google-code-export/django-hotclub | d783a5bbcc06816289565f3eae6d99461188ca4a | [
"MIT"
] | 3 | 2015-12-25T14:45:36.000Z | 2016-11-28T09:58:03.000Z | libs/external_libs/Genshi-0.5.1/examples/turbogears/genshitest/tests/test_controllers.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | libs/external_libs/Genshi-0.5.1/examples/turbogears/genshitest/tests/test_controllers.py | indro/t2c | 56482ad4aed150f29353e054db2c97b567243bf8 | [
"MIT"
] | null | null | null | from turbogears import testutil
from genshitest.controllers import Root
import cherrypy
cherrypy.root = Root()
def test_method():
"the index method should return a string called now"
import types
result = testutil.call(cherrypy.root.index)
assert type(result["now"]) == types.StringType
def test_indextitle():
"The mainpage should have the right title"
testutil.createRequest("/")
assert "<TITLE>Welcome to TurboGears</TITLE>" in cherrypy.response.body[0]
| 28.647059 | 78 | 0.73922 | from turbogears import testutil
from genshitest.controllers import Root
import cherrypy
cherrypy.root = Root()
def test_method():
import types
result = testutil.call(cherrypy.root.index)
assert type(result["now"]) == types.StringType
def test_indextitle():
testutil.createRequest("/")
assert "<TITLE>Welcome to TurboGears</TITLE>" in cherrypy.response.body[0]
| true | true |
f721bd50781f7bcc154d691ea0d6153c17a983a4 | 6,230 | py | Python | tools/parse_llvm_coverage.py | AsdMonio/rr-external_skia | 3839e72932bcef2f26a4f8826bb92b195f6cc396 | [
"Apache-2.0"
] | 5,964 | 2016-09-27T03:46:29.000Z | 2022-03-31T16:25:27.000Z | third_party/skia/tools/parse_llvm_coverage.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 459 | 2016-09-29T00:51:38.000Z | 2022-03-07T14:37:46.000Z | third_party/skia/tools/parse_llvm_coverage.py | w4454962/miniblink49 | b294b6eacb3333659bf7b94d670d96edeeba14c0 | [
"Apache-2.0"
] | 1,006 | 2016-09-27T05:17:27.000Z | 2022-03-30T02:46:51.000Z | #!/usr/bin/env python
# Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse an LLVM coverage report to generate useable results."""
import argparse
import json
import os
import re
import subprocess
import sys
def _fix_filename(filename):
"""Return a filename which we can use to identify the file.
The file paths printed by llvm-cov take the form:
/path/to/repo/out/dir/../../src/filename.cpp
And then they're truncated to 22 characters with leading ellipses:
...../../src/filename.cpp
This makes it really tough to determine whether the file actually belongs in
the Skia repo. This function strips out the leading junk so that, if the file
exists in the repo, the returned string matches the end of some relative path
in the repo. This doesn't guarantee correctness, but it's about as close as
we can get.
"""
return filename.split('..')[-1].lstrip('./')
def _file_in_repo(filename, all_files):
"""Return the name of the checked-in file matching the given filename.
Use suffix matching to determine which checked-in files the given filename
matches. If there are no matches or multiple matches, return None.
"""
new_file = _fix_filename(filename)
matched = []
for f in all_files:
if f.endswith(new_file):
matched.append(f)
if len(matched) == 1:
return matched[0]
elif len(matched) > 1:
print >> sys.stderr, ('WARNING: multiple matches for %s; skipping:\n\t%s'
% (new_file, '\n\t'.join(matched)))
return None
def _get_per_file_per_line_coverage(report):
"""Return a dict whose keys are file names and values are coverage data.
Values are lists which take the form (lineno, coverage, code).
"""
all_files = subprocess.check_output(['git', 'ls-files']).splitlines()
lines = report.splitlines()
current_file = None
file_lines = []
files = {}
not_checked_in = '%' # Use this as the file name for not-checked-in files.
for line in lines:
m = re.match('([a-zA-Z0-9\./_-]+):', line)
if m:
if current_file and current_file != not_checked_in:
files[current_file] = file_lines
match_filename = _file_in_repo(m.groups()[0], all_files)
current_file = match_filename or not_checked_in
file_lines = []
else:
if current_file != not_checked_in:
skip = re.match('^\s{2}-+$|^\s{2}\|.+$', line)
if line and not skip:
cov, linenum, code = line.split('|', 2)
cov = cov.strip()
if cov:
cov = int(cov)
else:
cov = None # We don't care about coverage for this line.
linenum = int(linenum.strip())
assert linenum == len(file_lines) + 1
file_lines.append((linenum, cov, code.decode('utf-8', 'replace')))
return files
def _testname(filename):
"""Transform the file name into an ingestible test name."""
return re.sub(r'[^a-zA-Z0-9]', '_', filename)
def _nanobench_json(results, properties, key):
"""Return the results in JSON format like that produced by nanobench."""
rv = {}
# Copy over the properties first, then set the 'key' and 'results' keys,
# in order to avoid bad formatting in case the user passes in a properties
# dict containing those keys.
rv.update(properties)
rv['key'] = key
rv['results'] = {
_testname(f): {
'coverage': {
'percent': percent,
'lines_not_covered': not_covered_lines,
'options': {
'fullname': f,
'dir': os.path.dirname(f),
'source_type': 'coverage',
},
},
} for percent, not_covered_lines, f in results
}
return rv
def _parse_key_value(kv_list):
"""Return a dict whose key/value pairs are derived from the given list.
For example:
['k1', 'v1', 'k2', 'v2']
becomes:
{'k1': 'v1',
'k2': 'v2'}
"""
if len(kv_list) % 2 != 0:
raise Exception('Invalid key/value pairs: %s' % kv_list)
rv = {}
for i in xrange(len(kv_list) / 2):
rv[kv_list[i*2]] = kv_list[i*2+1]
return rv
def _get_per_file_summaries(line_by_line):
"""Summarize the full line-by-line coverage report by file."""
per_file = []
for filepath, lines in line_by_line.iteritems():
total_lines = 0
covered_lines = 0
for _, cov, _ in lines:
if cov is not None:
total_lines += 1
if cov > 0:
covered_lines += 1
if total_lines > 0:
per_file.append((float(covered_lines)/float(total_lines)*100.0,
total_lines - covered_lines,
filepath))
return per_file
def main():
"""Generate useful data from a coverage report."""
# Parse args.
parser = argparse.ArgumentParser()
parser.add_argument('--report', help='input file; an llvm coverage report.',
required=True)
parser.add_argument('--nanobench', help='output file for nanobench data.')
parser.add_argument(
'--key', metavar='key_or_value', nargs='+',
help='key/value pairs identifying this bot.')
parser.add_argument(
'--properties', metavar='key_or_value', nargs='+',
help='key/value pairs representing properties of this build.')
parser.add_argument('--linebyline',
help='output file for line-by-line JSON data.')
args = parser.parse_args()
if args.nanobench and not (args.key and args.properties):
raise Exception('--key and --properties are required with --nanobench')
with open(args.report) as f:
report = f.read()
line_by_line = _get_per_file_per_line_coverage(report)
if args.linebyline:
with open(args.linebyline, 'w') as f:
json.dump(line_by_line, f)
if args.nanobench:
# Parse the key and properties for use in the nanobench JSON output.
key = _parse_key_value(args.key)
properties = _parse_key_value(args.properties)
# Get per-file summaries.
per_file = _get_per_file_summaries(line_by_line)
# Write results.
format_results = _nanobench_json(per_file, properties, key)
with open(args.nanobench, 'w') as f:
json.dump(format_results, f)
if __name__ == '__main__':
main()
| 30.390244 | 80 | 0.648957 |
import argparse
import json
import os
import re
import subprocess
import sys
def _fix_filename(filename):
return filename.split('..')[-1].lstrip('./')
def _file_in_repo(filename, all_files):
new_file = _fix_filename(filename)
matched = []
for f in all_files:
if f.endswith(new_file):
matched.append(f)
if len(matched) == 1:
return matched[0]
elif len(matched) > 1:
print >> sys.stderr, ('WARNING: multiple matches for %s; skipping:\n\t%s'
% (new_file, '\n\t'.join(matched)))
return None
def _get_per_file_per_line_coverage(report):
all_files = subprocess.check_output(['git', 'ls-files']).splitlines()
lines = report.splitlines()
current_file = None
file_lines = []
files = {}
not_checked_in = '%'
for line in lines:
m = re.match('([a-zA-Z0-9\./_-]+):', line)
if m:
if current_file and current_file != not_checked_in:
files[current_file] = file_lines
match_filename = _file_in_repo(m.groups()[0], all_files)
current_file = match_filename or not_checked_in
file_lines = []
else:
if current_file != not_checked_in:
skip = re.match('^\s{2}-+$|^\s{2}\|.+$', line)
if line and not skip:
cov, linenum, code = line.split('|', 2)
cov = cov.strip()
if cov:
cov = int(cov)
else:
cov = None
linenum = int(linenum.strip())
assert linenum == len(file_lines) + 1
file_lines.append((linenum, cov, code.decode('utf-8', 'replace')))
return files
def _testname(filename):
return re.sub(r'[^a-zA-Z0-9]', '_', filename)
def _nanobench_json(results, properties, key):
rv = {}
# Copy over the properties first, then set the 'key' and 'results' keys,
# in order to avoid bad formatting in case the user passes in a properties
# dict containing those keys.
rv.update(properties)
rv['key'] = key
rv['results'] = {
_testname(f): {
'coverage': {
'percent': percent,
'lines_not_covered': not_covered_lines,
'options': {
'fullname': f,
'dir': os.path.dirname(f),
'source_type': 'coverage',
},
},
} for percent, not_covered_lines, f in results
}
return rv
def _parse_key_value(kv_list):
if len(kv_list) % 2 != 0:
raise Exception('Invalid key/value pairs: %s' % kv_list)
rv = {}
for i in xrange(len(kv_list) / 2):
rv[kv_list[i*2]] = kv_list[i*2+1]
return rv
def _get_per_file_summaries(line_by_line):
per_file = []
for filepath, lines in line_by_line.iteritems():
total_lines = 0
covered_lines = 0
for _, cov, _ in lines:
if cov is not None:
total_lines += 1
if cov > 0:
covered_lines += 1
if total_lines > 0:
per_file.append((float(covered_lines)/float(total_lines)*100.0,
total_lines - covered_lines,
filepath))
return per_file
def main():
# Parse args.
parser = argparse.ArgumentParser()
parser.add_argument('--report', help='input file; an llvm coverage report.',
required=True)
parser.add_argument('--nanobench', help='output file for nanobench data.')
parser.add_argument(
'--key', metavar='key_or_value', nargs='+',
help='key/value pairs identifying this bot.')
parser.add_argument(
'--properties', metavar='key_or_value', nargs='+',
help='key/value pairs representing properties of this build.')
parser.add_argument('--linebyline',
help='output file for line-by-line JSON data.')
args = parser.parse_args()
if args.nanobench and not (args.key and args.properties):
raise Exception('--key and --properties are required with --nanobench')
with open(args.report) as f:
report = f.read()
line_by_line = _get_per_file_per_line_coverage(report)
if args.linebyline:
with open(args.linebyline, 'w') as f:
json.dump(line_by_line, f)
if args.nanobench:
# Parse the key and properties for use in the nanobench JSON output.
key = _parse_key_value(args.key)
properties = _parse_key_value(args.properties)
# Get per-file summaries.
per_file = _get_per_file_summaries(line_by_line)
# Write results.
format_results = _nanobench_json(per_file, properties, key)
with open(args.nanobench, 'w') as f:
json.dump(format_results, f)
if __name__ == '__main__':
main()
| true | true |
f721bd62e54683913fbde95759866ef0c91bb0fc | 45,876 | py | Python | tests/test_url.py | Laerte/w3lib | d9763db408c474dd4872d788398db41e5c7773ae | [
"BSD-3-Clause"
] | 1 | 2020-07-15T19:41:36.000Z | 2020-07-15T19:41:36.000Z | tests/test_url.py | Laerte/w3lib | d9763db408c474dd4872d788398db41e5c7773ae | [
"BSD-3-Clause"
] | 4 | 2021-03-11T12:09:35.000Z | 2021-11-15T08:39:21.000Z | tests/test_url.py | zanachka/w3lib | d9763db408c474dd4872d788398db41e5c7773ae | [
"BSD-3-Clause"
] | null | null | null | import os
import unittest
from urllib.parse import urlparse
import pytest
from w3lib.url import (
add_or_replace_parameter,
add_or_replace_parameters,
any_to_uri,
canonicalize_url,
file_uri_to_path,
is_url,
parse_data_uri,
parse_url,
path_to_file_uri,
safe_download_url,
safe_url_string,
url_query_parameter,
url_query_cleaner,
)
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = "\u8349\u8599 \u7d20\u5b50"
self.assertEqual(
safe_url_string(motoko), # note the %20 for space
"%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90",
)
self.assertEqual(
safe_url_string(motoko), safe_url_string(safe_url_string(motoko))
)
self.assertEqual(safe_url_string("©"), "%C2%A9") # copyright symbol
# page-encoding does not affect URL path
self.assertEqual(safe_url_string("©", "iso-8859-1"), "%C2%A9")
# path_encoding does
self.assertEqual(safe_url_string("©", path_encoding="iso-8859-1"), "%A9")
self.assertEqual(
safe_url_string("http://www.example.org/"), "http://www.example.org/"
)
alessi = "/ecommerce/oggetto/Te \xf2/tea-strainer/1273"
self.assertEqual(
safe_url_string(alessi), "/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273"
)
self.assertEqual(
safe_url_string(
"http://www.example.com/test?p(29)url(http://www.another.net/page)"
),
"http://www.example.com/test?p(29)url(http://www.another.net/page)",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"
),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200",
)
# page-encoding does not affect URL path
# we still end up UTF-8 encoding characters before percent-escaping
safeurl = safe_url_string("http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", path_encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b"http://example.com/"), str))
def test_safe_url_string_remove_ascii_tab_and_newlines(self):
self.assertEqual(
safe_url_string("http://example.com/test\n.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\t.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html\n"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r\n.html\t"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\a\n.html"),
"http://example.com/test%07.html",
)
def test_safe_url_string_unsafe_chars(self):
safeurl = safe_url_string(
r"http://localhost:8001/unwise{,},|,\,^,[,],`?|=[]&[]=|"
)
self.assertEqual(
safeurl, r"http://localhost:8001/unwise%7B,%7D,|,%5C,%5E,[,],%60?|=[]&[]=|"
)
def test_safe_url_string_quote_path(self):
safeurl = safe_url_string('http://google.com/"hello"', quote_path=True)
self.assertEqual(safeurl, "http://google.com/%22hello%22")
safeurl = safe_url_string('http://google.com/"hello"', quote_path=False)
self.assertEqual(safeurl, 'http://google.com/"hello"')
safeurl = safe_url_string('http://google.com/"hello"')
self.assertEqual(safeurl, "http://google.com/%22hello%22")
def test_safe_url_string_with_query(self):
safeurl = safe_url_string("http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ", path_encoding="latin-1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ",
encoding="latin-1",
path_encoding="latin-1",
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
# mixing Unicode and percent-escaped sequences
safeurl = safe_url_string("http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
# bytes input is assumed to be UTF-8
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
# page-encoding encoded bytes still end up as UTF-8 sequences in path
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding="latin1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(
b"http://www.example.com/\xa3?unit=\xb5", encoding="latin1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
# latin1
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
# cp1251
# >>> 'Россия'.encode('cp1251')
# '\xd0\xee\xf1\xf1\xe8\xff'
safeurl = safe_url_string(
b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
# adapted from:
# https://ssl.icu-project.org/icu-bin/idnbrowser
# http://unicode.org/faq/idn.html
# + various others
websites = (
(
"http://www.färgbolaget.nu/färgbolaget",
"http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget",
),
(
"http://www.räksmörgås.se/?räksmörgås=yes",
"http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes",
),
(
"http://www.brændendekærlighed.com/brændende/kærlighed",
"http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed",
),
("http://www.예비교사.com", "http://www.xn--9d0bm53a3xbzui.com"),
("http://理容ナカムラ.com", "http://xn--lck1c3crb1723bpq4a.com"),
("http://あーるいん.com", "http://xn--l8je6s7a45b.com"),
# --- real websites ---
# in practice, this redirect (301) to http://www.buecher.de/?q=b%C3%BCcher
(
"http://www.bücher.de/?q=bücher",
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
),
# Japanese
(
"http://はじめよう.みんな/?query=サ&maxResults=5",
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5",
),
# Russian
("http://кто.рф/", "http://xn--j1ail.xn--p1ai/"),
(
"http://кто.рф/index.php?domain=Что",
"http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE",
),
# Korean
("http://내도메인.한국/", "http://xn--220b31d95hq8o.xn--3e0b707e/"),
(
"http://맨체스터시티축구단.한국/",
"http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/",
),
# Arabic
("http://nic.شبكة", "http://nic.xn--ngbc5azd"),
# Chinese
("https://www.贷款.在线", "https://www.xn--0kwr83e.xn--3ds443g"),
("https://www2.xn--0kwr83e.在线", "https://www2.xn--0kwr83e.xn--3ds443g"),
("https://www3.贷款.xn--3ds443g", "https://www3.xn--0kwr83e.xn--3ds443g"),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
# make sure the safe URL is unchanged when made safe a 2nd time
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
# missing DNS label
self.assertEqual(
safe_url_string("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
safe_url_string(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_port_number(self):
self.assertEqual(
safe_url_string("http://www.example.com:80/résumé?q=résumé"),
"http://www.example.com:80/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string("http://www.example.com:/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_string_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
def test_safe_download_url(self):
self.assertEqual(
safe_download_url("http://www.example.org"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../../images/../image"),
"http://www.example.org/image",
)
self.assertEqual(
safe_download_url("http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
self.assertEqual(
safe_download_url(b"http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
# Encoding related tests
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xa3",
encoding="latin-1",
path_encoding="latin-1",
),
"http://www.example.org/?%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xc2\xa3",
encoding="utf-8",
path_encoding="utf-8",
),
"http://www.example.org/?%C2%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org/\xc2\xa3?\xc2\xa3",
encoding="utf-8",
path_encoding="latin-1",
),
"http://www.example.org/%A3?%C2%A3",
)
def test_is_url(self):
self.assertTrue(is_url("http://www.example.org"))
self.assertTrue(is_url("https://www.example.org"))
self.assertTrue(is_url("file:///some/path"))
self.assertFalse(is_url("foo://bar"))
self.assertFalse(is_url("foo--bar"))
def test_url_query_parameter(self):
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "id"), "200"
)
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
"mydefault",
)
self.assertEqual(url_query_parameter("product.html?id=", "id"), None)
self.assertEqual(
url_query_parameter("product.html?id=", "id", keep_blank_values=1), ""
)
def test_url_query_parameter_2(self):
"""
This problem was seen several times in the feeds. Sometime affiliate URLs contains
nested encoded affiliate URL with direct URL as parameters. For example:
aff_url1 = 'http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1'
the typical code to extract needed URL from it is:
aff_url2 = url_query_parameter(aff_url1, 'url')
after this aff2_url is:
'http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's gardenfurniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1'
the direct URL extraction is
url = url_query_parameter(aff_url2, 'referredURL')
but this will not work, because aff_url2 contains ' (comma sign encoded in the feed)
and the URL extraction will fail, current workaround was made in the spider,
just a replace for ' to %27
"""
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1",
)
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
# fails, prod_url is None now
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1",
)
def test_add_or_replace_parameter(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameter(url, "arg", "v"), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg3", "nv3"),
"http://domain/test?arg1=v1&arg2=v2&arg3=nv3",
)
self.assertEqual(
add_or_replace_parameter(
"http://domain/moreInfo.asp?prodID=", "prodID", "20"
),
"http://domain/moreInfo.asp?prodID=20",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue",
)
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(
add_or_replace_parameter(url, "version", "2"),
"http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2",
)
self.assertEqual(
add_or_replace_parameter(url, "pageurl", "test"),
"http://example.com/?version=1&pageurl=test¶m2=value2",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg1", "v3"),
"http://domain/test?arg1=v3&arg2=v2",
)
@pytest.mark.xfail(reason="https://github.com/scrapy/w3lib/issues/164")
def test_add_or_replace_parameter_fail(self):
self.assertEqual(
add_or_replace_parameter(
"http://domain/test?arg1=v1;arg2=v2", "arg1", "v3"
),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameters(url, {"arg": "v"}), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4", "arg3": "v3new"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3new&arg4=v4",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg1": "v3"}),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters_does_not_change_input_param(self):
url = "http://domain/test?arg=original"
input_param = {"arg": "value"}
add_or_replace_parameters(url, input_param) # noqa
self.assertEqual(input_param, {"arg": "value"})
def test_url_query_cleaner(self):
self.assertEqual("product.html", url_query_cleaner("product.html?"))
self.assertEqual("product.html", url_query_cleaner("product.html?&"))
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?foo=bar&name=wired", ["id"])
)
self.assertEqual(
"product.html?id=200&name=wired",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id", "name"]),
)
self.assertEqual(
"product.html?id",
url_query_cleaner("product.html?id&other=3&novalue=", ["id"]),
)
# default is to remove duplicate keys
self.assertEqual(
"product.html?d=1",
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ["d"]),
)
# unique=False disables duplicate keys filtering
self.assertEqual(
"product.html?d=1&d=2&d=3",
url_query_cleaner(
"product.html?d=1&e=b&d=2&d=3&other=other", ["d"], unique=False
),
)
self.assertEqual(
"product.html?id=200&foo=bar",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#id20", ["id", "foo"]
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired", ["id"], remove=True
),
)
self.assertEqual(
"product.html?name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "foo"], remove=True
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "footo"], remove=True
),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html", ["id"], remove=True)
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?&", ["id"], remove=True)
)
self.assertEqual(
"product.html?foo=bar",
url_query_cleaner("product.html?foo=bar&name=wired", "foo"),
)
self.assertEqual(
"product.html?foobar=wired",
url_query_cleaner("product.html?foo=bar&foobar=wired", "foobar"),
)
def test_url_query_cleaner_keep_fragments(self):
self.assertEqual(
"product.html?id=200#foo",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#foo",
["id"],
keep_fragments=True,
),
)
def test_path_to_file_uri(self):
if os.name == "nt":
self.assertEqual(
path_to_file_uri(r"C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi",
)
else:
self.assertEqual(
path_to_file_uri("/some/path.txt"), "file:///some/path.txt"
)
fn = "test.txt"
x = path_to_file_uri(fn)
self.assertTrue(x.startswith("file:///"))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == "nt":
self.assertEqual(
file_uri_to_path("file:///C:/windows/clock.avi"),
r"C:\\windows\clock.avi",
)
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(
file_uri_to_path("file:///path/to/test.txt"), "/path/to/test.txt"
)
self.assertEqual(file_uri_to_path("/path/to/test.txt"), "/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"), "test.txt")
def test_any_to_uri(self):
if os.name == "nt":
self.assertEqual(
any_to_uri(r"C:\\windows\clock.avi"), "file:///C:/windows/clock.avi"
)
else:
self.assertEqual(any_to_uri("/some/path.txt"), "file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"), "file:///some/path.txt")
self.assertEqual(
any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt",
)
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(
canonicalize_url("http://www.example.com/"), "http://www.example.com/"
)
def test_return_str(self):
assert isinstance(canonicalize_url("http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com"), "http://www.example.com/"
)
def test_typical_usage(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1",
)
def test_port_number(self):
self.assertEqual(
canonicalize_url("http://www.example.com:8888/do?a=1&b=2&c=3"),
"http://www.example.com:8888/do?a=1&b=2&c=3",
)
# trailing empty ports are removed
self.assertEqual(
canonicalize_url("http://www.example.com:/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
def test_sorting(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3",
)
def test_keep_blank_values(self):
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&c&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?1750,4"),
"http://www.example.com/do?1750%2C4=",
)
def test_spaces(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
def test_canonicalize_url_unicode_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(
canonicalize_url("http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# passed encoding will affect query string
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?q=résumé", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%E9sum%E9",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="cp1251"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%EE%F1%F1%E8%FF",
)
def test_canonicalize_url_unicode_query_string_wrong_encoding(self):
# trying to encode with wrong encoding
# fallback to UTF-8
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?currency=€", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?currency=%E2%82%AC",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%A0%D0%BE%D1%81%D1%81%D0%B8%D1%8F",
)
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/r%c3%a9sum%c3%a9"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
# non-UTF8 encoded sequences: they should be kept untouched, only upper-cased
# 'latin1'-encoded sequence in path
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do",
)
# 'latin1'-encoded path, UTF-8 encoded query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9"),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
# 'latin1'-encoded path and query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%e9sum%e9"),
"http://www.example.com/a%A3do?q=r%E9sum%E9",
)
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=r%c3%a9sum%c3%a9"),
"http://www.example.com/do?k=r%C3%A9sum%C3%A9",
)
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?price=£500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500",
)
def test_urls_with_auth_and_ports(self):
self.assertEqual(
canonicalize_url("http://user:pass@www.example.com:81/do?now=1"),
"http://user:pass@www.example.com:81/do?now=1",
)
def test_remove_fragments(self):
self.assertEqual(
canonicalize_url("http://user:pass@www.example.com/do?a=1#frag"),
"http://user:pass@www.example.com/do?a=1",
)
self.assertEqual(
canonicalize_url(
"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True
),
"http://user:pass@www.example.com/do?a=1#frag",
)
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(
canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"
),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html",
)
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(
canonicalize_url("http://www.example.com/caf%E9-con-leche.htm"),
"http://www.example.com/caf%E9-con-leche.htm",
)
def test_domains_are_case_insensitive(self):
self.assertEqual(
canonicalize_url("http://www.EXAMPLE.com/"), "http://www.example.com/"
)
def test_canonicalize_idns(self):
self.assertEqual(
canonicalize_url("http://www.bücher.de?q=bücher"),
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
)
# Japanese (+ reordering query parameters)
self.assertEqual(
canonicalize_url("http://はじめよう.みんな/?query=サ&maxResults=5"),
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?maxResults=5&query=%E3%82%B5",
)
def test_quoted_slash_and_question_sign(self):
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1",
)
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC/"), "http://foo.com/AC%2FDC/"
)
def test_canonicalize_urlparsed(self):
# canonicalize_url() can be passed an already urlparse'd URL
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
urlparse("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_parse_url(self):
# parse_url() wraps urlparse and is used in link extractors
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
parse_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_idempotence(self):
for url, enc in [
("http://www.bücher.de/résumé?q=résumé", "utf8"),
("http://www.example.com/résumé?q=résumé", "latin1"),
("http://www.example.com/résumé?country=Россия", "cp1251"),
("http://はじめよう.みんな/?query=サ&maxResults=5", "iso2022jp"),
]:
canonicalized = canonicalize_url(url, encoding=enc)
# if we canonicalize again, we ge the same result
self.assertEqual(
canonicalize_url(canonicalized, encoding=enc), canonicalized
)
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
def test_canonicalize_url_idna_exceptions(self):
# missing DNS label
self.assertEqual(
canonicalize_url("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
canonicalize_url(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_preserve_nonfragment_hash(self):
# don't decode `%23` to `#`
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar#frag", keep_fragments=True
),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
keep_fragments=True,
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
class DataURITests(unittest.TestCase):
def test_default_mediatype_charset(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "US-ASCII"})
self.assertEqual(result.data, b"A brief note")
def test_text_uri(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_bytes_uri(self):
result = parse_data_uri(b"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_unicode_uri(self):
result = parse_data_uri("data:,é")
self.assertEqual(result.data, "é".encode())
def test_default_mediatype(self):
result = parse_data_uri("data:;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_text_charset(self):
result = parse_data_uri("data:text/plain;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_mediatype_parameters(self):
result = parse_data_uri(
"data:text/plain;"
"foo=%22foo;bar%5C%22%22;"
"charset=utf-8;"
"bar=%22foo;%5C%22foo%20;/%20,%22,"
"%CE%8E%CE%A3%CE%8E"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(
result.media_type_parameters,
{"charset": "utf-8", "foo": 'foo;bar"', "bar": 'foo;"foo ;/ ,'},
)
self.assertEqual(result.data, b"\xce\x8e\xce\xa3\xce\x8e")
def test_base64(self):
result = parse_data_uri("data:text/plain;base64," "SGVsbG8sIHdvcmxkLg%3D%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_base64_spaces(self):
result = parse_data_uri(
"data:text/plain;base64,SGVsb%20G8sIH%0A%20%20"
"dvcm%20%20%20xk%20Lg%3D%0A%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
result = parse_data_uri(
"data:text/plain;base64,SGVsb G8sIH\n " "dvcm xk Lg%3D\n%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_wrong_base64_param(self):
with self.assertRaises(ValueError):
parse_data_uri("data:text/plain;baes64,SGVsbG8sIHdvcmxkLg%3D%3D")
def test_missing_comma(self):
with self.assertRaises(ValueError):
parse_data_uri("data:A%20brief%20note")
def test_missing_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("text/plain,A%20brief%20note")
def test_wrong_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("http://example.com/")
def test_scheme_case_insensitive(self):
result = parse_data_uri("DATA:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
result = parse_data_uri("DaTa:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
if __name__ == "__main__":
unittest.main()
| 41.629764 | 497 | 0.593884 | import os
import unittest
from urllib.parse import urlparse
import pytest
from w3lib.url import (
add_or_replace_parameter,
add_or_replace_parameters,
any_to_uri,
canonicalize_url,
file_uri_to_path,
is_url,
parse_data_uri,
parse_url,
path_to_file_uri,
safe_download_url,
safe_url_string,
url_query_parameter,
url_query_cleaner,
)
class UrlTests(unittest.TestCase):
def test_safe_url_string(self):
motoko = "\u8349\u8599 \u7d20\u5b50"
self.assertEqual(
safe_url_string(motoko),
"%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90",
)
self.assertEqual(
safe_url_string(motoko), safe_url_string(safe_url_string(motoko))
)
self.assertEqual(safe_url_string("©"), "%C2%A9")
self.assertEqual(safe_url_string("©", "iso-8859-1"), "%C2%A9")
self.assertEqual(safe_url_string("©", path_encoding="iso-8859-1"), "%A9")
self.assertEqual(
safe_url_string("http://www.example.org/"), "http://www.example.org/"
)
alessi = "/ecommerce/oggetto/Te \xf2/tea-strainer/1273"
self.assertEqual(
safe_url_string(alessi), "/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273"
)
self.assertEqual(
safe_url_string(
"http://www.example.com/test?p(29)url(http://www.another.net/page)"
),
"http://www.example.com/test?p(29)url(http://www.another.net/page)",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"
),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200",
)
safeurl = safe_url_string("http://www.example.com/£")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
safeurl = safe_url_string("http://www.example.com/£", path_encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
self.assertTrue(isinstance(safe_url_string(b"http://example.com/"), str))
def test_safe_url_string_remove_ascii_tab_and_newlines(self):
self.assertEqual(
safe_url_string("http://example.com/test\n.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\t.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r.html\n"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\r\n.html\t"),
"http://example.com/test.html",
)
self.assertEqual(
safe_url_string("http://example.com/test\a\n.html"),
"http://example.com/test%07.html",
)
def test_safe_url_string_unsafe_chars(self):
safeurl = safe_url_string(
r"http://localhost:8001/unwise{,},|,\,^,[,],`?|=[]&[]=|"
)
self.assertEqual(
safeurl, r"http://localhost:8001/unwise%7B,%7D,|,%5C,%5E,[,],%60?|=[]&[]=|"
)
def test_safe_url_string_quote_path(self):
safeurl = safe_url_string('http://google.com/"hello"', quote_path=True)
self.assertEqual(safeurl, "http://google.com/%22hello%22")
safeurl = safe_url_string('http://google.com/"hello"', quote_path=False)
self.assertEqual(safeurl, 'http://google.com/"hello"')
safeurl = safe_url_string('http://google.com/"hello"')
self.assertEqual(safeurl, "http://google.com/%22hello%22")
def test_safe_url_string_with_query(self):
safeurl = safe_url_string("http://www.example.com/£?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="utf-8")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/£?unit=µ", encoding="latin-1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ", path_encoding="latin-1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%C2%B5")
safeurl = safe_url_string(
"http://www.example.com/£?unit=µ",
encoding="latin-1",
path_encoding="latin-1",
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
def test_safe_url_string_misc(self):
safeurl = safe_url_string("http://www.example.com/£?unit=%C2%B5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
safeurl = safe_url_string("http://www.example.com/%C2%A3?unit=µ")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%C2%B5")
def test_safe_url_string_bytes_input(self):
safeurl = safe_url_string(b"http://www.example.com/")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/")
safeurl = safe_url_string(b"http://www.example.com/\xc2\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(b"http://www.example.com/\xb5", encoding="latin1")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%B5")
safeurl = safe_url_string(
b"http://www.example.com/\xa3?unit=\xb5", encoding="latin1"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3?unit=%B5")
def test_safe_url_string_bytes_input_nonutf8(self):
safeurl = safe_url_string(b"http://www.example.com/\xa3?unit=\xb5")
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3?unit=%B5")
safeurl = safe_url_string(
b"http://www.example.com/country/\xd0\xee\xf1\xf1\xe8\xff"
)
self.assertTrue(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/country/%D0%EE%F1%F1%E8%FF")
def test_safe_url_idna(self):
websites = (
(
"http://www.färgbolaget.nu/färgbolaget",
"http://www.xn--frgbolaget-q5a.nu/f%C3%A4rgbolaget",
),
(
"http://www.räksmörgås.se/?räksmörgås=yes",
"http://www.xn--rksmrgs-5wao1o.se/?r%C3%A4ksm%C3%B6rg%C3%A5s=yes",
),
(
"http://www.brændendekærlighed.com/brændende/kærlighed",
"http://www.xn--brndendekrlighed-vobh.com/br%C3%A6ndende/k%C3%A6rlighed",
),
("http://www.예비교사.com", "http://www.xn--9d0bm53a3xbzui.com"),
("http://理容ナカムラ.com", "http://xn--lck1c3crb1723bpq4a.com"),
("http://あーるいん.com", "http://xn--l8je6s7a45b.com"),
(
"http://www.bücher.de/?q=bücher",
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
),
(
"http://はじめよう.みんな/?query=サ&maxResults=5",
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?query=%E3%82%B5&maxResults=5",
),
("http://кто.рф/", "http://xn--j1ail.xn--p1ai/"),
(
"http://кто.рф/index.php?domain=Что",
"http://xn--j1ail.xn--p1ai/index.php?domain=%D0%A7%D1%82%D0%BE",
),
("http://내도메인.한국/", "http://xn--220b31d95hq8o.xn--3e0b707e/"),
(
"http://맨체스터시티축구단.한국/",
"http://xn--2e0b17htvgtvj9haj53ccob62ni8d.xn--3e0b707e/",
),
("http://nic.شبكة", "http://nic.xn--ngbc5azd"),
("https://www.贷款.在线", "https://www.xn--0kwr83e.xn--3ds443g"),
("https://www2.xn--0kwr83e.在线", "https://www2.xn--0kwr83e.xn--3ds443g"),
("https://www3.贷款.xn--3ds443g", "https://www3.xn--0kwr83e.xn--3ds443g"),
)
for idn_input, safe_result in websites:
safeurl = safe_url_string(idn_input)
self.assertEqual(safeurl, safe_result)
for _, safe_result in websites:
safeurl = safe_url_string(safe_result)
self.assertEqual(safeurl, safe_result)
def test_safe_url_idna_encoding_failure(self):
self.assertEqual(
safe_url_string("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_port_number(self):
self.assertEqual(
safe_url_string("http://www.example.com:80/résumé?q=résumé"),
"http://www.example.com:80/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
safe_url_string("http://www.example.com:/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_safe_url_string_preserve_nonfragment_hash(self):
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
safe_url_string("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
safe_url_string(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
def test_safe_download_url(self):
self.assertEqual(
safe_download_url("http://www.example.org"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../"), "http://www.example.org/"
)
self.assertEqual(
safe_download_url("http://www.example.org/../../images/../image"),
"http://www.example.org/image",
)
self.assertEqual(
safe_download_url("http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
self.assertEqual(
safe_download_url(b"http://www.example.org/dir/"),
"http://www.example.org/dir/",
)
# Encoding related tests
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xa3",
encoding="latin-1",
path_encoding="latin-1",
),
"http://www.example.org/?%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org?\xc2\xa3",
encoding="utf-8",
path_encoding="utf-8",
),
"http://www.example.org/?%C2%A3",
)
self.assertEqual(
safe_download_url(
b"http://www.example.org/\xc2\xa3?\xc2\xa3",
encoding="utf-8",
path_encoding="latin-1",
),
"http://www.example.org/%A3?%C2%A3",
)
def test_is_url(self):
self.assertTrue(is_url("http://www.example.org"))
self.assertTrue(is_url("https://www.example.org"))
self.assertTrue(is_url("file:///some/path"))
self.assertFalse(is_url("foo://bar"))
self.assertFalse(is_url("foo--bar"))
def test_url_query_parameter(self):
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "id"), "200"
)
self.assertEqual(
url_query_parameter("product.html?id=200&foo=bar", "notthere", "mydefault"),
"mydefault",
)
self.assertEqual(url_query_parameter("product.html?id=", "id"), None)
self.assertEqual(
url_query_parameter("product.html?id=", "id", keep_blank_values=1), ""
)
def test_url_query_parameter_2(self):
return # FIXME: this test should pass but currently doesnt
# correct case
aff_url1 = "http://www.anrdoezrs.net/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EGarden+table+and+chair+sets%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357199%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Garden table and chair sets&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357199%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357199&langId=-1",
)
# weird case
aff_url1 = "http://www.tkqlhce.com/click-2590032-10294381?url=http%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FArgosCreateReferral%3FstoreId%3D10001%26langId%3D-1%26referrer%3DCOJUN%26params%3Dadref%253DGarden+and+DIY-%3EGarden+furniture-%3EChildren%26%2339%3Bs+garden+furniture%26referredURL%3Dhttp%3A%2F%2Fwww.argos.co.uk%2Fwebapp%2Fwcs%2Fstores%2Fservlet%2FProductDisplay%253FstoreId%253D10001%2526catalogId%253D1500001501%2526productId%253D1500357023%2526langId%253D-1"
aff_url2 = url_query_parameter(aff_url1, "url")
self.assertEqual(
aff_url2,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ArgosCreateReferral?storeId=10001&langId=-1&referrer=COJUN¶ms=adref%3DGarden and DIY->Garden furniture->Children's garden furniture&referredURL=http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay%3FstoreId%3D10001%26catalogId%3D1500001501%26productId%3D1500357023%26langId%3D-1",
)
prod_url = url_query_parameter(aff_url2, "referredURL")
# fails, prod_url is None now
self.assertEqual(
prod_url,
"http://www.argos.co.uk/webapp/wcs/stores/servlet/ProductDisplay?storeId=10001&catalogId=1500001501&productId=1500357023&langId=-1",
)
def test_add_or_replace_parameter(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameter(url, "arg", "v"), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg3", "nv3"),
"http://domain/test?arg1=v1&arg2=v2&arg3=nv3",
)
self.assertEqual(
add_or_replace_parameter(
"http://domain/moreInfo.asp?prodID=", "prodID", "20"
),
"http://domain/moreInfo.asp?prodID=20",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2%2C60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?BCat=2,60&CatID=60"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue&CatID=60",
)
url = "http://rmc-offers.co.uk/productlist.asp?"
self.assertEqual(
add_or_replace_parameter(url, "BCat", "newvalue"),
"http://rmc-offers.co.uk/productlist.asp?BCat=newvalue",
)
url = "http://example.com/?version=1&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2"
self.assertEqual(
add_or_replace_parameter(url, "version", "2"),
"http://example.com/?version=2&pageurl=http%3A%2F%2Fwww.example.com%2Ftest%2F%23fragment%3Dy¶m2=value2",
)
self.assertEqual(
add_or_replace_parameter(url, "pageurl", "test"),
"http://example.com/?version=1&pageurl=test¶m2=value2",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameter(url, "arg4", "v4"),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameter(url, "arg1", "v3"),
"http://domain/test?arg1=v3&arg2=v2",
)
@pytest.mark.xfail(reason="https://github.com/scrapy/w3lib/issues/164")
def test_add_or_replace_parameter_fail(self):
self.assertEqual(
add_or_replace_parameter(
"http://domain/test?arg1=v1;arg2=v2", "arg1", "v3"
),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters(self):
url = "http://domain/test"
self.assertEqual(
add_or_replace_parameters(url, {"arg": "v"}), "http://domain/test?arg=v"
)
url = "http://domain/test?arg1=v1&arg2=v2&arg3=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4", "arg3": "v3new"}),
"http://domain/test?arg1=v1&arg2=v2&arg3=v3new&arg4=v4",
)
url = "http://domain/test?arg1=v1&arg2=v2&arg1=v3"
self.assertEqual(
add_or_replace_parameters(url, {"arg4": "v4"}),
"http://domain/test?arg1=v1&arg2=v2&arg1=v3&arg4=v4",
)
self.assertEqual(
add_or_replace_parameters(url, {"arg1": "v3"}),
"http://domain/test?arg1=v3&arg2=v2",
)
def test_add_or_replace_parameters_does_not_change_input_param(self):
url = "http://domain/test?arg=original"
input_param = {"arg": "value"}
add_or_replace_parameters(url, input_param) # noqa
self.assertEqual(input_param, {"arg": "value"})
def test_url_query_cleaner(self):
self.assertEqual("product.html", url_query_cleaner("product.html?"))
self.assertEqual("product.html", url_query_cleaner("product.html?&"))
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html?id=200",
url_query_cleaner("product.html?&id=200&&foo=bar&name=wired", ["id"]),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?foo=bar&name=wired", ["id"])
)
self.assertEqual(
"product.html?id=200&name=wired",
url_query_cleaner("product.html?id=200&foo=bar&name=wired", ["id", "name"]),
)
self.assertEqual(
"product.html?id",
url_query_cleaner("product.html?id&other=3&novalue=", ["id"]),
)
# default is to remove duplicate keys
self.assertEqual(
"product.html?d=1",
url_query_cleaner("product.html?d=1&e=b&d=2&d=3&other=other", ["d"]),
)
# unique=False disables duplicate keys filtering
self.assertEqual(
"product.html?d=1&d=2&d=3",
url_query_cleaner(
"product.html?d=1&e=b&d=2&d=3&other=other", ["d"], unique=False
),
)
self.assertEqual(
"product.html?id=200&foo=bar",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#id20", ["id", "foo"]
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired", ["id"], remove=True
),
)
self.assertEqual(
"product.html?name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "foo"], remove=True
),
)
self.assertEqual(
"product.html?foo=bar&name=wired",
url_query_cleaner(
"product.html?id=2&foo=bar&name=wired", ["id", "footo"], remove=True
),
)
self.assertEqual(
"product.html", url_query_cleaner("product.html", ["id"], remove=True)
)
self.assertEqual(
"product.html", url_query_cleaner("product.html?&", ["id"], remove=True)
)
self.assertEqual(
"product.html?foo=bar",
url_query_cleaner("product.html?foo=bar&name=wired", "foo"),
)
self.assertEqual(
"product.html?foobar=wired",
url_query_cleaner("product.html?foo=bar&foobar=wired", "foobar"),
)
def test_url_query_cleaner_keep_fragments(self):
self.assertEqual(
"product.html?id=200#foo",
url_query_cleaner(
"product.html?id=200&foo=bar&name=wired#foo",
["id"],
keep_fragments=True,
),
)
def test_path_to_file_uri(self):
if os.name == "nt":
self.assertEqual(
path_to_file_uri(r"C:\\windows\clock.avi"),
"file:///C:/windows/clock.avi",
)
else:
self.assertEqual(
path_to_file_uri("/some/path.txt"), "file:///some/path.txt"
)
fn = "test.txt"
x = path_to_file_uri(fn)
self.assertTrue(x.startswith("file:///"))
self.assertEqual(file_uri_to_path(x).lower(), os.path.abspath(fn).lower())
def test_file_uri_to_path(self):
if os.name == "nt":
self.assertEqual(
file_uri_to_path("file:///C:/windows/clock.avi"),
r"C:\\windows\clock.avi",
)
uri = "file:///C:/windows/clock.avi"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
else:
self.assertEqual(
file_uri_to_path("file:///path/to/test.txt"), "/path/to/test.txt"
)
self.assertEqual(file_uri_to_path("/path/to/test.txt"), "/path/to/test.txt")
uri = "file:///path/to/test.txt"
uri2 = path_to_file_uri(file_uri_to_path(uri))
self.assertEqual(uri, uri2)
self.assertEqual(file_uri_to_path("test.txt"), "test.txt")
def test_any_to_uri(self):
if os.name == "nt":
self.assertEqual(
any_to_uri(r"C:\\windows\clock.avi"), "file:///C:/windows/clock.avi"
)
else:
self.assertEqual(any_to_uri("/some/path.txt"), "file:///some/path.txt")
self.assertEqual(any_to_uri("file:///some/path.txt"), "file:///some/path.txt")
self.assertEqual(
any_to_uri("http://www.example.com/some/path.txt"),
"http://www.example.com/some/path.txt",
)
class CanonicalizeUrlTest(unittest.TestCase):
def test_canonicalize_url(self):
# simplest case
self.assertEqual(
canonicalize_url("http://www.example.com/"), "http://www.example.com/"
)
def test_return_str(self):
assert isinstance(canonicalize_url("http://www.example.com"), str)
assert isinstance(canonicalize_url(b"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com"), "http://www.example.com/"
)
def test_typical_usage(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1",
)
def test_port_number(self):
self.assertEqual(
canonicalize_url("http://www.example.com:8888/do?a=1&b=2&c=3"),
"http://www.example.com:8888/do?a=1&b=2&c=3",
)
# trailing empty ports are removed
self.assertEqual(
canonicalize_url("http://www.example.com:/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3",
)
def test_sorting(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3",
)
def test_keep_blank_values(self):
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/do?b=&c&a=2", keep_blank_values=False
),
"http://www.example.com/do?a=2",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?1750,4"),
"http://www.example.com/do?1750%2C4=",
)
def test_spaces(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space",
)
def test_canonicalize_url_unicode_path(self):
self.assertEqual(
canonicalize_url("http://www.example.com/résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_unicode_query_string(self):
# default encoding for path and query is UTF-8
self.assertEqual(
canonicalize_url("http://www.example.com/résumé?q=résumé"),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# passed encoding will affect query string
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?q=résumé", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%E9sum%E9",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="cp1251"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%EE%F1%F1%E8%FF",
)
def test_canonicalize_url_unicode_query_string_wrong_encoding(self):
# trying to encode with wrong encoding
# fallback to UTF-8
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?currency=€", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?currency=%E2%82%AC",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/résumé?country=Россия", encoding="latin1"
),
"http://www.example.com/r%C3%A9sum%C3%A9?country=%D0%A0%D0%BE%D1%81%D1%81%D0%B8%D1%8F",
)
def test_normalize_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/r%c3%a9sum%c3%a9"),
"http://www.example.com/r%C3%A9sum%C3%A9",
)
# non-UTF8 encoded sequences: they should be kept untouched, only upper-cased
# 'latin1'-encoded sequence in path
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do",
)
# 'latin1'-encoded path, UTF-8 encoded query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9"),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
# 'latin1'-encoded path and query string
self.assertEqual(
canonicalize_url("http://www.example.com/a%a3do?q=r%e9sum%e9"),
"http://www.example.com/a%A3do?q=r%E9sum%E9",
)
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3",
)
self.assertEqual(
canonicalize_url("http://www.example.com/do?k=r%c3%a9sum%c3%a9"),
"http://www.example.com/do?k=r%C3%A9sum%C3%A9",
)
def test_non_ascii_percent_encoding_in_paths(self):
self.assertEqual(
canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1",
)
self.assertEqual(
canonicalize_url("http://www.example.com/a do£.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1",
)
def test_non_ascii_percent_encoding_in_query_arguments(self):
self.assertEqual(
canonicalize_url("http://www.example.com/do?price=£500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3",
)
self.assertEqual(
canonicalize_url(b"http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500",
)
def test_urls_with_auth_and_ports(self):
self.assertEqual(
canonicalize_url("http://user:pass@www.example.com:81/do?now=1"),
"http://user:pass@www.example.com:81/do?now=1",
)
def test_remove_fragments(self):
self.assertEqual(
canonicalize_url("http://user:pass@www.example.com/do?a=1#frag"),
"http://user:pass@www.example.com/do?a=1",
)
self.assertEqual(
canonicalize_url(
"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True
),
"http://user:pass@www.example.com/do?a=1#frag",
)
def test_dont_convert_safe_characters(self):
# dont convert safe characters to percent encoding representation
self.assertEqual(
canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"
),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html",
)
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
self.assertEqual(
canonicalize_url("http://www.example.com/caf%E9-con-leche.htm"),
"http://www.example.com/caf%E9-con-leche.htm",
)
def test_domains_are_case_insensitive(self):
self.assertEqual(
canonicalize_url("http://www.EXAMPLE.com/"), "http://www.example.com/"
)
def test_canonicalize_idns(self):
self.assertEqual(
canonicalize_url("http://www.bücher.de?q=bücher"),
"http://www.xn--bcher-kva.de/?q=b%C3%BCcher",
)
self.assertEqual(
canonicalize_url("http://はじめよう.みんな/?query=サ&maxResults=5"),
"http://xn--p8j9a0d9c9a.xn--q9jyb4c/?maxResults=5&query=%E3%82%B5",
)
def test_quoted_slash_and_question_sign(self):
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1",
)
self.assertEqual(
canonicalize_url("http://foo.com/AC%2FDC/"), "http://foo.com/AC%2FDC/"
)
def test_canonicalize_urlparsed(self):
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(urlparse("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
urlparse("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_parse_url(self):
# parse_url() wraps urlparse and is used in link extractors
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/résumé?q=résumé")),
"http://www.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
self.assertEqual(
canonicalize_url(parse_url("http://www.example.com/caf%e9-con-leche.htm")),
"http://www.example.com/caf%E9-con-leche.htm",
)
self.assertEqual(
canonicalize_url(
parse_url("http://www.example.com/a%a3do?q=r%c3%a9sum%c3%a9")
),
"http://www.example.com/a%A3do?q=r%C3%A9sum%C3%A9",
)
def test_canonicalize_url_idempotence(self):
for url, enc in [
("http://www.bücher.de/résumé?q=résumé", "utf8"),
("http://www.example.com/résumé?q=résumé", "latin1"),
("http://www.example.com/résumé?country=Россия", "cp1251"),
("http://はじめよう.みんな/?query=サ&maxResults=5", "iso2022jp"),
]:
canonicalized = canonicalize_url(url, encoding=enc)
# if we canonicalize again, we ge the same result
self.assertEqual(
canonicalize_url(canonicalized, encoding=enc), canonicalized
)
# without encoding, already canonicalized URL is canonicalized identically
self.assertEqual(canonicalize_url(canonicalized), canonicalized)
def test_canonicalize_url_idna_exceptions(self):
# missing DNS label
self.assertEqual(
canonicalize_url("http://.example.com/résumé?q=résumé"),
"http://.example.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
# DNS label too long
self.assertEqual(
canonicalize_url(f"http://www.{'example' * 11}.com/résumé?q=résumé"),
f"http://www.{'example' * 11}.com/r%C3%A9sum%C3%A9?q=r%C3%A9sum%C3%A9",
)
def test_preserve_nonfragment_hash(self):
# don't decode `%23` to `
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url("http://www.example.com/path/to/%23/foo/bar#frag"),
"http://www.example.com/path/to/%23/foo/bar",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar#frag", keep_fragments=True
),
"http://www.example.com/path/to/%23/foo/bar#frag",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag"
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo",
)
self.assertEqual(
canonicalize_url(
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
keep_fragments=True,
),
"http://www.example.com/path/to/%23/foo/bar?url=http%3A%2F%2Fwww.example.com%2F%2Fpath%2Fto%2F%23%2Fbar%2Ffoo#frag",
)
class DataURITests(unittest.TestCase):
def test_default_mediatype_charset(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "US-ASCII"})
self.assertEqual(result.data, b"A brief note")
def test_text_uri(self):
result = parse_data_uri("data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_bytes_uri(self):
result = parse_data_uri(b"data:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
def test_unicode_uri(self):
result = parse_data_uri("data:,é")
self.assertEqual(result.data, "é".encode())
def test_default_mediatype(self):
result = parse_data_uri("data:;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_text_charset(self):
result = parse_data_uri("data:text/plain;charset=iso-8859-7,%be%d3%be")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.media_type_parameters, {"charset": "iso-8859-7"})
self.assertEqual(result.data, b"\xbe\xd3\xbe")
def test_mediatype_parameters(self):
result = parse_data_uri(
"data:text/plain;"
"foo=%22foo;bar%5C%22%22;"
"charset=utf-8;"
"bar=%22foo;%5C%22foo%20;/%20,%22,"
"%CE%8E%CE%A3%CE%8E"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(
result.media_type_parameters,
{"charset": "utf-8", "foo": 'foo;bar"', "bar": 'foo;"foo ;/ ,'},
)
self.assertEqual(result.data, b"\xce\x8e\xce\xa3\xce\x8e")
def test_base64(self):
result = parse_data_uri("data:text/plain;base64," "SGVsbG8sIHdvcmxkLg%3D%3D")
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_base64_spaces(self):
result = parse_data_uri(
"data:text/plain;base64,SGVsb%20G8sIH%0A%20%20"
"dvcm%20%20%20xk%20Lg%3D%0A%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
result = parse_data_uri(
"data:text/plain;base64,SGVsb G8sIH\n " "dvcm xk Lg%3D\n%3D"
)
self.assertEqual(result.media_type, "text/plain")
self.assertEqual(result.data, b"Hello, world.")
def test_wrong_base64_param(self):
with self.assertRaises(ValueError):
parse_data_uri("data:text/plain;baes64,SGVsbG8sIHdvcmxkLg%3D%3D")
def test_missing_comma(self):
with self.assertRaises(ValueError):
parse_data_uri("data:A%20brief%20note")
def test_missing_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("text/plain,A%20brief%20note")
def test_wrong_scheme(self):
with self.assertRaises(ValueError):
parse_data_uri("http://example.com/")
def test_scheme_case_insensitive(self):
result = parse_data_uri("DATA:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
result = parse_data_uri("DaTa:,A%20brief%20note")
self.assertEqual(result.data, b"A brief note")
if __name__ == "__main__":
unittest.main()
| true | true |
f721bde404ba19ca373a5664a9ea44c898ea8a20 | 5,264 | py | Python | python/readsPerGeneScript.sup4.edited.py | Asplund-Samuelsson/ribopipe | a27c09f990757116871345b0748015507dd3e8e8 | [
"MIT"
] | 2 | 2019-11-11T18:32:56.000Z | 2020-10-26T10:39:22.000Z | python/readsPerGeneScript.sup4.edited.py | Asplund-Samuelsson/ribopipe | a27c09f990757116871345b0748015507dd3e8e8 | [
"MIT"
] | null | null | null | python/readsPerGeneScript.sup4.edited.py | Asplund-Samuelsson/ribopipe | a27c09f990757116871345b0748015507dd3e8e8 | [
"MIT"
] | 1 | 2020-05-21T19:28:48.000Z | 2020-05-21T19:28:48.000Z | #!/usr/bin/python2.7
from Bio import SeqIO
"""
Supplementary Note 4: Read density per gene
Authors: Eugene Oh
Modified by: Johannes Asplund-Samuelsson (KTH)
inputFileP:
read density file for plus strand (Supplementary Note 2)
col0: position along genome
col1: read density at that position
inputFileM:
read density file for minus strand (Supplementary Note 2)
col0: position along genome
col1: read density at that position
inputListP:
E. coli MC4100 gene list of the plus strand
col0: gene name
col1: start coordinate of gene
col2: stop coordinate of gene
inputListM
E. coli MC4100 gene list of the minus strand
col0: gene name
col1: start coordinate of gene
col2: stop coordinate of gene
outputFileP:
read densities per gene on plus strand
col0: gene name
col1: start coordinate of gene
col2: stop coordinate of gene
col3: sum of read densities
outputFileM:
read densities per gene on minus strand
col0: gene name
col1: start coordinate of gene
col2: stop coordinate of gene
col3: sum of read densities
genomeLength:
length of genome sequence for negative gene position handling
int
"""
def expression(inputFileP, inputFileM, inputListP, inputListM, outputFileP, \
outputFileM, inputFileG):
# Initialize readcount dictionaries
DictP = {}
DictM = {}
# Load reference sequence lengths
ref_lengths = {}
FastaFile = open(inputFileG, 'rU')
for rec in SeqIO.parse(FastaFile, 'fasta'):
# Store length of reference sequence
ref_lengths[rec.id] = len(rec)
# Initialize reference sequence entries in readcount dictionaries
DictP[rec.id] = {}
DictM[rec.id] = {}
FastaFile.close()
### PLUS STRAND ###
# Upload read density file from plus strand as a dictionary
def load_input(infile, Dict):
inFile = open(infile, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
try:
col0 = str(fields[-3])
except IndexError:
# If no reference, assume fasta has one reference
col0 = list(Dict.keys())[0]
col1 = int(fields[-2])
col2 = float(fields[-1])
Dict[col0][col1] = col2
line = inFile.readline()
load_input(inputFileP, DictP)
# Upload plus strand gene list as a dictionary and list
def assign_gene_reads(inputList, Dict, outputFile):
geneDict = {} # create dictionary with col0=gene name; col1=read number
geneList = [] # create list that looks like input gene list
inFile = open(inputList, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
geneList.append(fields) # add an item to the end of the list
ref = str(fields[0]) # reference sequence ID
gene = str(fields[1]) # gene name
start = int(fields[2]) # start
stop = int(fields[3]) # stop
# Sum up and write read densities per protein coding region in dictionary
for Z in range(start, stop + 1):
if Z < 0:
# Handle gene positions that are negative or zero
Z = ref_lengths[ref] + Z + 1
if Z in Dict[ref] and gene in geneDict:
geneDict[gene] += Dict[ref][Z]
elif Z in Dict[ref]:
geneDict[gene] = Dict[ref][Z]
line = inFile.readline()
# Assign gene expression levels to all genes
tupledlist = geneDict.items()
for J in geneList:
match = 0
for K in tupledlist:
if J[1] == K[0]:
match = 1
J.append(K[1])
if match == 0: #list genes that don't have any reads
J.append(0)
# Output file for plus strand
outFile = open(outputFile, 'w')
for J in geneList:
output = '\t'.join([str(x) for x in J]) + '\n'
outFile.write(output)
assign_gene_reads(inputListP, DictP, outputFileP)
### MINUS STRAND ###
# Upload read density file from minus strand as a dictionary
load_input(inputFileM, DictM)
# Upload minus strand gene list as a dictionary and list
assign_gene_reads(inputListM, DictM, outputFileM)
if __name__ == '__main__':
# Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--inP', help='Input file P.')
parser.add_argument('--inM', help='Input file M.')
parser.add_argument('--listP', help='Input list P.')
parser.add_argument('--listM', help='Input list M.')
parser.add_argument('--outP', help='Output file P.')
parser.add_argument('--outM', help='Output file M.')
parser.add_argument('--inG', help='Genome fasta.')
args = parser.parse_args()
inputFileP = args.inP
inputFileM = args.inM
inputListP = args.listP
inputListM = args.listM
outputFileP = args.outP
outputFileM = args.outM
inputFileG = args.inG
expression(inputFileP, inputFileM, inputListP, inputListM, outputFileP, outputFileM, inputFileG)
| 28.923077 | 100 | 0.612842 |
from Bio import SeqIO
def expression(inputFileP, inputFileM, inputListP, inputListM, outputFileP, \
outputFileM, inputFileG):
DictP = {}
DictM = {}
ref_lengths = {}
FastaFile = open(inputFileG, 'rU')
for rec in SeqIO.parse(FastaFile, 'fasta'):
ref_lengths[rec.id] = len(rec)
DictP[rec.id] = {}
DictM[rec.id] = {}
FastaFile.close()
inFile = open(infile, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
try:
col0 = str(fields[-3])
except IndexError:
col0 = list(Dict.keys())[0]
col1 = int(fields[-2])
col2 = float(fields[-1])
Dict[col0][col1] = col2
line = inFile.readline()
load_input(inputFileP, DictP)
def assign_gene_reads(inputList, Dict, outputFile):
geneDict = {}
geneList = []
inFile = open(inputList, 'r')
line = inFile.readline()
while line != '':
fields = line.split()
geneList.append(fields)
ref = str(fields[0])
gene = str(fields[1])
start = int(fields[2])
stop = int(fields[3])
for Z in range(start, stop + 1):
if Z < 0:
Z = ref_lengths[ref] + Z + 1
if Z in Dict[ref] and gene in geneDict:
geneDict[gene] += Dict[ref][Z]
elif Z in Dict[ref]:
geneDict[gene] = Dict[ref][Z]
line = inFile.readline()
tupledlist = geneDict.items()
for J in geneList:
match = 0
for K in tupledlist:
if J[1] == K[0]:
match = 1
J.append(K[1])
if match == 0:
J.append(0)
# Output file for plus strand
outFile = open(outputFile, 'w')
for J in geneList:
output = '\t'.join([str(x) for x in J]) + '\n'
outFile.write(output)
assign_gene_reads(inputListP, DictP, outputFileP)
### MINUS STRAND ###
# Upload read density file from minus strand as a dictionary
load_input(inputFileM, DictM)
# Upload minus strand gene list as a dictionary and list
assign_gene_reads(inputListM, DictM, outputFileM)
if __name__ == '__main__':
# Parse commandline arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--inP', help='Input file P.')
parser.add_argument('--inM', help='Input file M.')
parser.add_argument('--listP', help='Input list P.')
parser.add_argument('--listM', help='Input list M.')
parser.add_argument('--outP', help='Output file P.')
parser.add_argument('--outM', help='Output file M.')
parser.add_argument('--inG', help='Genome fasta.')
args = parser.parse_args()
inputFileP = args.inP
inputFileM = args.inM
inputListP = args.listP
inputListM = args.listM
outputFileP = args.outP
outputFileM = args.outM
inputFileG = args.inG
expression(inputFileP, inputFileM, inputListP, inputListM, outputFileP, outputFileM, inputFileG)
| true | true |
f721beb5e7c054fc46f9b84585695379792bee59 | 426 | py | Python | tests/2017/test_22_sporifica_virus.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 20 | 2019-10-15T07:33:13.000Z | 2022-01-19T13:40:36.000Z | tests/2017/test_22_sporifica_virus.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 5 | 2019-02-01T23:31:27.000Z | 2021-12-03T06:55:58.000Z | tests/2017/test_22_sporifica_virus.py | wimglenn/advent-of-code-wim | 6308c3fa5d29b318680419f877fd5b8ac1359b5d | [
"WTFPL"
] | 8 | 2019-12-03T15:41:23.000Z | 2021-12-06T17:13:57.000Z | import pytest
from aoc_wim.aoc2017.q22 import mutate
test_data = """\
..#
#..
...
"""
@pytest.mark.parametrize("n,expected,part", [
(7, 5, "a"),
(70, 41, "a"),
(10000, 5587, "a"),
(100, 26, "b"),
(10000000, 2511944, "b")
], ids=["a_short", "a_medium", "a_long", "b_medium", "b_long_slow"])
def test_virus_mutation(n, expected, part):
assert mutate(test_data, n_iterations=n, part=part) == expected
| 20.285714 | 68 | 0.600939 | import pytest
from aoc_wim.aoc2017.q22 import mutate
test_data = """\
..#
#..
...
"""
@pytest.mark.parametrize("n,expected,part", [
(7, 5, "a"),
(70, 41, "a"),
(10000, 5587, "a"),
(100, 26, "b"),
(10000000, 2511944, "b")
], ids=["a_short", "a_medium", "a_long", "b_medium", "b_long_slow"])
def test_virus_mutation(n, expected, part):
assert mutate(test_data, n_iterations=n, part=part) == expected
| true | true |
f721c009fbb629ad735fa8472ac120fec2b6de3e | 7,073 | py | Python | menpo/math/linalg.py | yutiansut/menpo | 62af28606bc55985ab764f8ad38d239d1572bf1e | [
"BSD-3-Clause"
] | null | null | null | menpo/math/linalg.py | yutiansut/menpo | 62af28606bc55985ab764f8ad38d239d1572bf1e | [
"BSD-3-Clause"
] | 1 | 2019-03-09T16:01:46.000Z | 2019-03-09T16:01:46.000Z | menpo/math/linalg.py | yutiansut/menpo | 62af28606bc55985ab764f8ad38d239d1572bf1e | [
"BSD-3-Clause"
] | 1 | 2020-05-01T09:55:57.000Z | 2020-05-01T09:55:57.000Z | from itertools import islice
import numpy as np
from menpo.visualize import print_progress, bytes_str, print_dynamic
def dot_inplace_left(a, b, block_size=1000):
r"""
Inplace dot product for memory efficiency. It computes ``a * b = c``, where
``a`` will be replaced inplace with ``c``.
Parameters
----------
a : ``(n_big, k)`` `ndarray`
First array to dot - assumed to be large. Will be damaged by this
function call as it is used to store the output inplace.
b : ``(k, n_small)`` `ndarray`, ``n_small <= k``
The second array to dot - assumed to be small. ``n_small`` must be
smaller than ``k`` so the result can be stored within the memory space
of ``a``.
block_size : `int`, optional
The size of the block of ``a`` that will be dotted against ``b`` in
each iteration. larger block sizes increase the time performance of the
dot product at the cost of a higher memory overhead for the operation.
Returns
-------
c : ``(n_big, n_small)`` `ndarray`
The output of the operation. Exactly the same as a memory view onto
``a`` (``a[:, :n_small]``) as ``a`` is modified inplace to store the
result.
"""
(n_big, k_a), (k_b, n_small) = a.shape, b.shape
if k_a != k_b:
raise ValueError('Cannot dot {} * {}'.format(a.shape, b.shape))
if n_small > k_a:
raise ValueError('Cannot dot inplace left - '
'b.shape[1] ({}) > a.shape[1] '
'({})'.format(n_small, k_a))
for i in range(0, n_big, block_size):
j = i + block_size
a[i:j, :n_small] = a[i:j].dot(b)
return a[:, :n_small]
def dot_inplace_right(a, b, block_size=1000):
r"""
Inplace dot product for memory efficiency. It computes ``a * b = c`` where
``b`` will be replaced inplace with ``c``.
Parameters
----------
a : ``(n_small, k)`` `ndarray`, n_small <= k
The first array to dot - assumed to be small. ``n_small`` must be
smaller than ``k`` so the result can be stored within the memory space
of ``b``.
b : ``(k, n_big)`` `ndarray`
Second array to dot - assumed to be large. Will be damaged by this
function call as it is used to store the output inplace.
block_size : `int`, optional
The size of the block of ``b`` that ``a`` will be dotted against
in each iteration. larger block sizes increase the time performance of
the dot product at the cost of a higher memory overhead for the
operation.
Returns
-------
c : ``(n_small, n_big)`` `ndarray`
The output of the operation. Exactly the same as a memory view onto
``b`` (``b[:n_small]``) as ``b`` is modified inplace to store the
result.
"""
(n_small, k_a), (k_b, n_big) = a.shape, b.shape
if k_a != k_b:
raise ValueError('Cannot dot {} * {}'.format(a.shape, b.shape))
if n_small > k_b:
raise ValueError('Cannot dot inplace right - '
'a.shape[1] ({}) > b.shape[0] '
'({})'.format(n_small, k_b))
for i in range(0, n_big, block_size):
j = i + block_size
b[:n_small, i:j] = a.dot(b[:, i:j])
return b[:n_small]
def as_matrix(vectorizables, length=None, return_template=False, verbose=False):
r"""
Create a matrix from a list/generator of :map:`Vectorizable` objects.
All the objects in the list **must** be the same size when vectorized.
Consider using a generator if the matrix you are creating is large and
passing the length of the generator explicitly.
Parameters
----------
vectorizables : `list` or generator if :map:`Vectorizable` objects
A list or generator of objects that supports the vectorizable interface
length : `int`, optional
Length of the vectorizable list. Useful if you are passing a generator
with a known length.
verbose : `bool`, optional
If ``True``, will print the progress of building the matrix.
return_template : `bool`, optional
If ``True``, will return the first element of the list/generator, which
was used as the template. Useful if you need to map back from the
matrix to a list of vectorizable objects.
Returns
-------
M : (length, n_features) `ndarray`
Every row is an element of the list.
template : :map:`Vectorizable`, optional
If ``return_template == True``, will return the template used to
build the matrix `M`.
Raises
------
ValueError
``vectorizables`` terminates in fewer than ``length`` iterations
"""
# get the first element as the template and use it to configure the
# data matrix
if length is None:
# samples is a list
length = len(vectorizables)
template = vectorizables[0]
vectorizables = vectorizables[1:]
else:
# samples is an iterator
template = next(vectorizables)
n_features = template.n_parameters
template_vector = template.as_vector()
data = np.zeros((length, n_features), dtype=template_vector.dtype)
if verbose:
print('Allocated data matrix of size {} '
'({} samples)'.format(bytes_str(data.nbytes), length))
# now we can fill in the first element from the template
data[0] = template_vector
del template_vector
# ensure we take at most the remaining length - 1 elements
vectorizables = islice(vectorizables, length - 1)
if verbose:
vectorizables = print_progress(vectorizables, n_items=length, offset=1,
prefix='Building data matrix',
end_with_newline=False)
# 1-based as we have the template vector set already
i = 0
for i, sample in enumerate(vectorizables, 1):
data[i] = sample.as_vector()
# we have exhausted the iterable, but did we get enough items?
if i != length - 1: # -1
raise ValueError('Incomplete data matrix due to early iterator '
'termination (expected {} items, got {})'.format(
length, i + 1))
if return_template:
return data, template
else:
return data
def from_matrix(matrix, template):
r"""
Create a generator from a matrix given a template :map:`Vectorizable`
objects as a template. The ``from_vector`` method will be used to
reconstruct each object.
If you want a list, warp the returned value in ``list()``.
Parameters
----------
matrix : (n_items, n_features) `ndarray`
A matrix whereby every *row* represents the data of a vectorizable
object.
template : :map:`Vectorizable`
The template object to use to reconstruct each row of the matrix with.
Returns
-------
vectorizables : generator of :map:`Vectorizable`
Every row of the matrix becomes an element of the list.
"""
return (template.from_vector(row) for row in matrix)
| 37.42328 | 80 | 0.61247 | from itertools import islice
import numpy as np
from menpo.visualize import print_progress, bytes_str, print_dynamic
def dot_inplace_left(a, b, block_size=1000):
(n_big, k_a), (k_b, n_small) = a.shape, b.shape
if k_a != k_b:
raise ValueError('Cannot dot {} * {}'.format(a.shape, b.shape))
if n_small > k_a:
raise ValueError('Cannot dot inplace left - '
'b.shape[1] ({}) > a.shape[1] '
'({})'.format(n_small, k_a))
for i in range(0, n_big, block_size):
j = i + block_size
a[i:j, :n_small] = a[i:j].dot(b)
return a[:, :n_small]
def dot_inplace_right(a, b, block_size=1000):
(n_small, k_a), (k_b, n_big) = a.shape, b.shape
if k_a != k_b:
raise ValueError('Cannot dot {} * {}'.format(a.shape, b.shape))
if n_small > k_b:
raise ValueError('Cannot dot inplace right - '
'a.shape[1] ({}) > b.shape[0] '
'({})'.format(n_small, k_b))
for i in range(0, n_big, block_size):
j = i + block_size
b[:n_small, i:j] = a.dot(b[:, i:j])
return b[:n_small]
def as_matrix(vectorizables, length=None, return_template=False, verbose=False):
if length is None:
length = len(vectorizables)
template = vectorizables[0]
vectorizables = vectorizables[1:]
else:
template = next(vectorizables)
n_features = template.n_parameters
template_vector = template.as_vector()
data = np.zeros((length, n_features), dtype=template_vector.dtype)
if verbose:
print('Allocated data matrix of size {} '
'({} samples)'.format(bytes_str(data.nbytes), length))
data[0] = template_vector
del template_vector
vectorizables = islice(vectorizables, length - 1)
if verbose:
vectorizables = print_progress(vectorizables, n_items=length, offset=1,
prefix='Building data matrix',
end_with_newline=False)
i = 0
for i, sample in enumerate(vectorizables, 1):
data[i] = sample.as_vector()
if i != length - 1:
raise ValueError('Incomplete data matrix due to early iterator '
'termination (expected {} items, got {})'.format(
length, i + 1))
if return_template:
return data, template
else:
return data
def from_matrix(matrix, template):
return (template.from_vector(row) for row in matrix)
| true | true |
f721c0352c2ce9e62a832ade6a760d74538547cf | 247 | py | Python | answers/Siddhant Saxena/Day 5/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Siddhant Saxena/Day 5/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Siddhant Saxena/Day 5/Question1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | c=1
for i in range(5):
if i==1:
print("*",end="")
for j in range(i):
if c > 1:
for i in range(2,c):
if (c % i) == 0:
print("*",end="")
break
else:
print("#",end="")
c+=1
print()
| 13.722222 | 27 | 0.37247 | c=1
for i in range(5):
if i==1:
print("*",end="")
for j in range(i):
if c > 1:
for i in range(2,c):
if (c % i) == 0:
print("*",end="")
break
else:
print("#",end="")
c+=1
print()
| true | true |
f721c0db5e07bedca61cf25c2a2f37316d73a074 | 77 | py | Python | mainDraw.py | smdth/mimLab | 78a49c17a4e103841f49cd4b880561a490682864 | [
"0BSD"
] | null | null | null | mainDraw.py | smdth/mimLab | 78a49c17a4e103841f49cd4b880561a490682864 | [
"0BSD"
] | null | null | null | mainDraw.py | smdth/mimLab | 78a49c17a4e103841f49cd4b880561a490682864 | [
"0BSD"
] | null | null | null | #!/bin/env python
from mimDrawer import *
print drawLine([0,0], [1,1], 10)
| 12.833333 | 32 | 0.649351 |
from mimDrawer import *
print drawLine([0,0], [1,1], 10)
| false | true |
f721c245101ff24635224c6a90a3a8df3d443626 | 5,281 | py | Python | src/olympia/amo/monitors.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 843 | 2016-02-09T13:00:37.000Z | 2022-03-20T19:17:06.000Z | src/olympia/amo/monitors.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 10,187 | 2016-02-05T23:51:05.000Z | 2022-03-31T15:24:44.000Z | src/olympia/amo/monitors.py | covariant/addons-server | 41e6ee9e426facb19a1e1ca8d40277cb6f94a7da | [
"BSD-3-Clause"
] | 551 | 2016-02-08T20:32:16.000Z | 2022-03-15T16:49:24.000Z | import os
import io
import socket
import traceback
from django.conf import settings
import requests
from kombu import Connection
from PIL import Image
import olympia.core.logger
from olympia.amo import search
from olympia.amo.templatetags.jinja_helpers import user_media_path
monitor_log = olympia.core.logger.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception as e:
result = False
status = f'Failed to connect to memcached ({host}): {e}'
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if not using_twemproxy and len(memcache_results) < 2:
status = ('2+ memcache servers are required. %s available') % len(
memcache_results
)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(io.BytesIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception as e:
msg = 'Failed to create a jpeg image: %s' % e
libraries_results.append(('PIL+JPEG', False, msg))
missing_libs = [lib for lib, success, _ in libraries_results if not success]
if missing_libs:
status = 'missing libs: %s' % ','.join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
es = search.get_es()
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = {'exception': traceback.format_exc()}
return status, elastic_results
def path():
# Check file paths / permissions
read_and_write = (
settings.TMP_PATH,
settings.MEDIA_ROOT,
user_media_path('addons'),
user_media_path('guarded_addons'),
user_media_path('addon_icons'),
user_media_path('previews'),
user_media_path('userpics'),
)
read_only = [os.path.join(settings.ROOT, 'locale')]
filepaths = [
(path, os.R_OK | os.W_OK, 'We want read + write') for path in read_and_write
]
filepaths += [(path, os.R_OK, 'We want read') for path in read_only]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
if not isinstance(path, bytes):
notes += ' / should be a bytestring!'
filepath_results.append((path, path_exists, path_perms, notes))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms / values'
return status, filepath_results
def rabbitmq():
# Check rabbitmq
rabbitmq_results = []
status = ''
with Connection(settings.CELERY_BROKER_URL, connect_timeout=2) as broker:
hostname = broker.hostname
try:
broker.connect()
rabbitmq_results.append((hostname, True))
except Exception as e:
rabbitmq_results.append((hostname, False))
status = f'Failed to chat with rabbitmq {hostname}: {e}'
monitor_log.critical(status)
return status, rabbitmq_results
def signer():
# Check Signing Server Endpoint
signer_results = None
status = ''
autograph_url = settings.AUTOGRAPH_CONFIG['server_url']
if autograph_url:
try:
response = requests.get(
f'{autograph_url}/__heartbeat__',
timeout=settings.SIGNING_SERVER_MONITORING_TIMEOUT,
)
if response.status_code != 200:
status = (
'Failed to chat with signing service. '
'Invalid HTTP response code.'
)
monitor_log.critical(status)
signer_results = False
else:
signer_results = True
except Exception as exc:
status = 'Failed to chat with signing service: %s' % exc
monitor_log.critical(status)
signer_results = False
else:
status = 'server_url in AUTOGRAPH_CONFIG is not set'
monitor_log.critical(status)
signer_results = False
return status, signer_results
| 29.338889 | 84 | 0.599508 | import os
import io
import socket
import traceback
from django.conf import settings
import requests
from kombu import Connection
from PIL import Image
import olympia.core.logger
from olympia.amo import search
from olympia.amo.templatetags.jinja_helpers import user_media_path
monitor_log = olympia.core.logger.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception as e:
result = False
status = f'Failed to connect to memcached ({host}): {e}'
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if not using_twemproxy and len(memcache_results) < 2:
status = ('2+ memcache servers are required. %s available') % len(
memcache_results
)
monitor_log.warning(status)
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(io.BytesIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception as e:
msg = 'Failed to create a jpeg image: %s' % e
libraries_results.append(('PIL+JPEG', False, msg))
missing_libs = [lib for lib, success, _ in libraries_results if not success]
if missing_libs:
status = 'missing libs: %s' % ','.join(missing_libs)
return status, libraries_results
def elastic():
elastic_results = None
status = ''
try:
es = search.get_es()
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except Exception:
elastic_results = {'exception': traceback.format_exc()}
return status, elastic_results
def path():
read_and_write = (
settings.TMP_PATH,
settings.MEDIA_ROOT,
user_media_path('addons'),
user_media_path('guarded_addons'),
user_media_path('addon_icons'),
user_media_path('previews'),
user_media_path('userpics'),
)
read_only = [os.path.join(settings.ROOT, 'locale')]
filepaths = [
(path, os.R_OK | os.W_OK, 'We want read + write') for path in read_and_write
]
filepaths += [(path, os.R_OK, 'We want read') for path in read_only]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
if not isinstance(path, bytes):
notes += ' / should be a bytestring!'
filepath_results.append((path, path_exists, path_perms, notes))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms / values'
return status, filepath_results
def rabbitmq():
rabbitmq_results = []
status = ''
with Connection(settings.CELERY_BROKER_URL, connect_timeout=2) as broker:
hostname = broker.hostname
try:
broker.connect()
rabbitmq_results.append((hostname, True))
except Exception as e:
rabbitmq_results.append((hostname, False))
status = f'Failed to chat with rabbitmq {hostname}: {e}'
monitor_log.critical(status)
return status, rabbitmq_results
def signer():
signer_results = None
status = ''
autograph_url = settings.AUTOGRAPH_CONFIG['server_url']
if autograph_url:
try:
response = requests.get(
f'{autograph_url}/__heartbeat__',
timeout=settings.SIGNING_SERVER_MONITORING_TIMEOUT,
)
if response.status_code != 200:
status = (
'Failed to chat with signing service. '
'Invalid HTTP response code.'
)
monitor_log.critical(status)
signer_results = False
else:
signer_results = True
except Exception as exc:
status = 'Failed to chat with signing service: %s' % exc
monitor_log.critical(status)
signer_results = False
else:
status = 'server_url in AUTOGRAPH_CONFIG is not set'
monitor_log.critical(status)
signer_results = False
return status, signer_results
| true | true |
f721c3109896f56431a1bd85112d79800d195b90 | 4,113 | py | Python | ironic/drivers/irmc.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | null | null | null | ironic/drivers/irmc.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | null | null | null | ironic/drivers/irmc.py | NaohiroTamura/ironic | 1fcb6c52a22c9c025dbf27931720ce2eda08704f | [
"Apache-2.0"
] | 1 | 2022-03-25T14:26:10.000Z | 2022-03-25T14:26:10.000Z | # Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
iRMC Driver for managing FUJITSU PRIMERGY BX S4 or RX S8 generation
of FUJITSU PRIMERGY servers, and above servers.
"""
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers import base
from ironic.drivers import generic
from ironic.drivers.modules import agent
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import boot
from ironic.drivers.modules.irmc import inspect
from ironic.drivers.modules.irmc import management
from ironic.drivers.modules.irmc import power
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
class IRMCVirtualMediaIscsiDriver(base.BaseDriver):
"""iRMC Driver using SCCI.
This driver implements the `core` functionality using
:class:ironic.drivers.modules.irmc.power.IRMCPower for power management.
and
:class:ironic.drivers.modules.iscsi_deploy.ISCSIDeploy for deploy.
"""
def __init__(self):
if not importutils.try_import('scciclient.irmc.scci'):
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to import python-scciclient library"))
self.power = power.IRMCPower()
self.boot = boot.IRMCVirtualMediaBoot()
self.deploy = iscsi_deploy.ISCSIDeploy()
self.console = ipmitool.IPMIShellinaboxConsole()
self.management = management.IRMCManagement()
self.inspect = inspect.IRMCInspect()
class IRMCVirtualMediaAgentDriver(base.BaseDriver):
"""iRMC Driver using SCCI.
This driver implements the `core` functionality using
:class:ironic.drivers.modules.irmc.power.IRMCPower for power management
and
:class:ironic.drivers.modules.irmc.deploy.IRMCVirtualMediaAgentDriver for
deploy.
"""
def __init__(self):
if not importutils.try_import('scciclient.irmc.scci'):
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to import python-scciclient library"))
self.power = power.IRMCPower()
self.boot = boot.IRMCVirtualMediaBoot()
self.deploy = agent.AgentDeploy()
self.console = ipmitool.IPMIShellinaboxConsole()
self.management = management.IRMCManagement()
self.inspect = inspect.IRMCInspect()
class IRMCHardware(generic.GenericHardware):
"""iRMC hardware type.
iRMC hardware type is targeted for FUJITSU PRIMERGY servers which
have iRMC S4 management system.
"""
@property
def supported_boot_interfaces(self):
"""List of supported boot interfaces."""
return [boot.IRMCVirtualMediaBoot, pxe.PXEBoot]
@property
def supported_console_interfaces(self):
"""List of supported console interfaces."""
return [ipmitool.IPMISocatConsole, ipmitool.IPMIShellinaboxConsole,
noop.NoConsole]
@property
def supported_inspect_interfaces(self):
"""List of supported inspect interfaces."""
return [inspect.IRMCInspect, inspector.Inspector,
noop.NoInspect]
@property
def supported_management_interfaces(self):
"""List of supported management interfaces."""
return [management.IRMCManagement]
@property
def supported_power_interfaces(self):
"""List of supported power interfaces."""
return [power.IRMCPower]
| 35.153846 | 77 | 0.722587 |
from oslo_utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers import base
from ironic.drivers import generic
from ironic.drivers.modules import agent
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules.irmc import boot
from ironic.drivers.modules.irmc import inspect
from ironic.drivers.modules.irmc import management
from ironic.drivers.modules.irmc import power
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import pxe
class IRMCVirtualMediaIscsiDriver(base.BaseDriver):
def __init__(self):
if not importutils.try_import('scciclient.irmc.scci'):
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to import python-scciclient library"))
self.power = power.IRMCPower()
self.boot = boot.IRMCVirtualMediaBoot()
self.deploy = iscsi_deploy.ISCSIDeploy()
self.console = ipmitool.IPMIShellinaboxConsole()
self.management = management.IRMCManagement()
self.inspect = inspect.IRMCInspect()
class IRMCVirtualMediaAgentDriver(base.BaseDriver):
def __init__(self):
if not importutils.try_import('scciclient.irmc.scci'):
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_("Unable to import python-scciclient library"))
self.power = power.IRMCPower()
self.boot = boot.IRMCVirtualMediaBoot()
self.deploy = agent.AgentDeploy()
self.console = ipmitool.IPMIShellinaboxConsole()
self.management = management.IRMCManagement()
self.inspect = inspect.IRMCInspect()
class IRMCHardware(generic.GenericHardware):
@property
def supported_boot_interfaces(self):
return [boot.IRMCVirtualMediaBoot, pxe.PXEBoot]
@property
def supported_console_interfaces(self):
return [ipmitool.IPMISocatConsole, ipmitool.IPMIShellinaboxConsole,
noop.NoConsole]
@property
def supported_inspect_interfaces(self):
return [inspect.IRMCInspect, inspector.Inspector,
noop.NoInspect]
@property
def supported_management_interfaces(self):
return [management.IRMCManagement]
@property
def supported_power_interfaces(self):
return [power.IRMCPower]
| true | true |
f721c578750ba0a7105c0bada589a4631a8b372e | 1,845 | py | Python | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | 2 | 2019-12-22T13:05:08.000Z | 2020-02-02T13:05:31.000Z | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | 71 | 2016-10-31T15:41:10.000Z | 2022-03-21T14:26:22.000Z | pili/email.py | pilosus/pili | 8eb51e79420b7a2e4148f3b819e787cf6711e8cd | [
"MIT"
] | null | null | null | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from pili.app import celery, mail
def send_email(to, subject, template, **kwargs):
"""Send email using either Celery, or Thread.
Selection depends on CELERY_INSTEAD_THREADING config variable.
"""
app = current_app._get_current_object()
if app.config['CELERY_INSTEAD_THREADING']:
send_email_celery(to, subject, template, countdown=None, **kwargs)
else:
send_email_thread(to, subject, template, **kwargs)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email_thread(to, subject, template, **kwargs):
"""Send async email using threading.
"""
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
@celery.task(serializer='pickle')
def send_celery_async_email(msg):
mail.send(msg)
# NOTE rename to send_email in production if Thread support is not needed
def send_email_celery(to, subject, template, countdown=None, **kwargs):
"""Send async email using Celery.
"""
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
send_celery_async_email.apply_async(args=[msg], countdown=countdown)
| 30.75 | 74 | 0.688347 | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from pili.app import celery, mail
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
if app.config['CELERY_INSTEAD_THREADING']:
send_email_celery(to, subject, template, countdown=None, **kwargs)
else:
send_email_thread(to, subject, template, **kwargs)
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email_thread(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
@celery.task(serializer='pickle')
def send_celery_async_email(msg):
mail.send(msg)
def send_email_celery(to, subject, template, countdown=None, **kwargs):
app = current_app._get_current_object()
msg = Message(
app.config['PILI_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['PILI_MAIL_SENDER'],
recipients=[to],
)
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
send_celery_async_email.apply_async(args=[msg], countdown=countdown)
| true | true |
f721c5dd97e769836ffa40231cf86c6f09797352 | 3,757 | py | Python | day1/kapua-python-client/swagger_client/models/kapua_data_payload.py | liang-faan/SmartIOT-Diec | 8336a4b558295295f10a82cf350d8b7ff3fb9f5c | [
"MIT"
] | 5 | 2019-05-30T02:55:16.000Z | 2020-03-03T14:18:23.000Z | day1/kapua-python-client/swagger_client/models/kapua_data_payload.py | liang-faan/SmartIOT-Diec | 8336a4b558295295f10a82cf350d8b7ff3fb9f5c | [
"MIT"
] | 3 | 2019-12-27T00:53:23.000Z | 2020-02-17T05:29:19.000Z | day1/kapua-python-client/swagger_client/models/kapua_data_payload.py | liang-faan/SmartIOT-Diec | 8336a4b558295295f10a82cf350d8b7ff3fb9f5c | [
"MIT"
] | 4 | 2019-06-04T06:26:14.000Z | 2021-01-07T04:25:32.000Z | # coding: utf-8
"""
Eclipse Kapua REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class KapuaDataPayload(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'metrics': 'dict(str, object)',
'body': 'str'
}
attribute_map = {
'metrics': 'metrics',
'body': 'body'
}
def __init__(self, metrics=None, body=None): # noqa: E501
"""KapuaDataPayload - a model defined in Swagger""" # noqa: E501
self._metrics = None
self._body = None
self.discriminator = None
if metrics is not None:
self.metrics = metrics
if body is not None:
self.body = body
@property
def metrics(self):
"""Gets the metrics of this KapuaDataPayload. # noqa: E501
:return: The metrics of this KapuaDataPayload. # noqa: E501
:rtype: dict(str, object)
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""Sets the metrics of this KapuaDataPayload.
:param metrics: The metrics of this KapuaDataPayload. # noqa: E501
:type: dict(str, object)
"""
self._metrics = metrics
@property
def body(self):
"""Gets the body of this KapuaDataPayload. # noqa: E501
:return: The body of this KapuaDataPayload. # noqa: E501
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this KapuaDataPayload.
:param body: The body of this KapuaDataPayload. # noqa: E501
:type: str
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(KapuaDataPayload, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, KapuaDataPayload):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.457746 | 119 | 0.556029 |
import pprint
import re
import six
class KapuaDataPayload(object):
swagger_types = {
'metrics': 'dict(str, object)',
'body': 'str'
}
attribute_map = {
'metrics': 'metrics',
'body': 'body'
}
def __init__(self, metrics=None, body=None):
self._metrics = None
self._body = None
self.discriminator = None
if metrics is not None:
self.metrics = metrics
if body is not None:
self.body = body
@property
def metrics(self):
return self._metrics
@metrics.setter
def metrics(self, metrics):
self._metrics = metrics
@property
def body(self):
return self._body
@body.setter
def body(self, body):
self._body = body
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(KapuaDataPayload, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, KapuaDataPayload):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f721c6532584cb15c212bf15c6507558686dc532 | 125 | py | Python | starlette_inertia/__init__.py | roganartu/fastapi-inertia | 2e0e902b9e0369d00dab628d23ba74bb3fccd3d5 | [
"MIT"
] | 2 | 2022-02-25T21:28:36.000Z | 2022-03-05T14:42:56.000Z | starlette_inertia/__init__.py | roganartu/starlette-inertia | 2e0e902b9e0369d00dab628d23ba74bb3fccd3d5 | [
"MIT"
] | null | null | null | starlette_inertia/__init__.py | roganartu/starlette-inertia | 2e0e902b9e0369d00dab628d23ba74bb3fccd3d5 | [
"MIT"
] | null | null | null | from starlette_inertia.inertia import InertiaMiddleware, InertiaResponse
__all__ = ["InertiaMiddleware", "InertiaResponse"]
| 31.25 | 72 | 0.84 | from starlette_inertia.inertia import InertiaMiddleware, InertiaResponse
__all__ = ["InertiaMiddleware", "InertiaResponse"]
| true | true |
f721c699cb9f49dec818c048ce6c2572c20ac6d0 | 4,179 | py | Python | src/python/pants/backend/jvm/subsystems/java.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/subsystems/java.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/jvm/subsystems/java.py | AllClearID/pants | c4fdf00a3bdf9f26f876e85c46909d0729f7132c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.subsystems.zinc_language_mixin import ZincLanguageMixin
from pants.backend.jvm.targets.tools_jar import ToolsJar
from pants.build_graph.address import Address
from pants.build_graph.injectables_mixin import InjectablesMixin
from pants.subsystem.subsystem import Subsystem
# TODO: Sort out JVM compile config model: https://github.com/pantsbuild/pants/issues/4483.
class Java(JvmToolMixin, ZincLanguageMixin, InjectablesMixin, Subsystem):
"""A subsystem to encapsulate compile-time settings and features for the Java language.
Runtime options are captured by the JvmPlatform subsystem.
"""
options_scope = 'java'
_javac_tool_name = 'javac'
_default_javac_spec = '//:{}'.format(_javac_tool_name)
@classmethod
def register_options(cls, register):
super(Java, cls).register_options(register)
# This target, if specified, serves as both a tool (for compiling java code) and a
# dependency (for javac plugins). See below for the different methods for accessing
# classpath entries (in the former case) or target specs (in the latter case).
#
# Javac plugins can access basically all of the compiler internals, so we don't shade anything.
# Hence the unspecified main= argument. This tool is optional, hence the empty classpath list.
cls.register_jvm_tool(register,
cls._javac_tool_name,
classpath=[],
help='Java compiler to use. If unspecified, we use the compiler '
'embedded in the Java distribution we run on.')
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from javac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'javac-plugin-dep', classpath=[],
help='Search for javac plugins here, as well as in any '
'explicit dependencies.')
def injectables(self, build_graph):
tools_jar_address = Address.parse(self._tools_jar_spec)
if not build_graph.contains_address(tools_jar_address):
build_graph.inject_synthetic_target(tools_jar_address, ToolsJar)
elif not build_graph.get_target(tools_jar_address).is_synthetic:
raise build_graph.ManualSyntheticTargetError(tools_jar_address)
@property
def injectables_spec_mapping(self):
return {
# Zinc directly accesses the javac tool.
'javac': [self._javac_spec],
# The ProvideToolsJar task will first attempt to use the (optional) configured
# javac tool, and then fall back to injecting a classpath entry linking to the current
# distribution's `tools.jar`.
'tools.jar': [self._tools_jar_spec],
}
@classmethod
def global_javac_classpath(cls, products):
"""Returns a classpath entry for the java compiler library, useable as a tool.
If no javac library is specified, will return an empty list. The caller must handle
this case by defaulting to the JDK's tools.jar. We can't provide that jar here
because we'd have to know about a Distribution.
"""
return cls.global_instance().javac_classpath(products)
def __init__(self, *args, **kwargs):
super(Java, self).__init__(*args, **kwargs)
opts = self.get_options()
# TODO: These checks are a continuation of the hack that allows tests to pass without
# caring about this subsystem.
self._javac_spec = getattr(opts, 'javac', self._default_javac_spec)
self._tools_jar_spec = '//:tools-jar-synthetic'
def javac_classpath(self, products):
return self.tool_classpath_from_products(products, 'javac', self.options_scope)
| 47.488636 | 99 | 0.718832 |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.subsystems.zinc_language_mixin import ZincLanguageMixin
from pants.backend.jvm.targets.tools_jar import ToolsJar
from pants.build_graph.address import Address
from pants.build_graph.injectables_mixin import InjectablesMixin
from pants.subsystem.subsystem import Subsystem
class Java(JvmToolMixin, ZincLanguageMixin, InjectablesMixin, Subsystem):
options_scope = 'java'
_javac_tool_name = 'javac'
_default_javac_spec = '//:{}'.format(_javac_tool_name)
@classmethod
def register_options(cls, register):
super(Java, cls).register_options(register)
# Hence the unspecified main= argument. This tool is optional, hence the empty classpath list.
cls.register_jvm_tool(register,
cls._javac_tool_name,
classpath=[],
help='Java compiler to use. If unspecified, we use the compiler '
'embedded in the Java distribution we run on.')
register('--javac-plugins', advanced=True, type=list, fingerprint=True,
help='Use these javac plugins.')
register('--javac-plugin-args', advanced=True, type=dict, default={}, fingerprint=True,
help='Map from javac plugin name to list of arguments for that plugin.')
cls.register_jvm_tool(register, 'javac-plugin-dep', classpath=[],
help='Search for javac plugins here, as well as in any '
'explicit dependencies.')
def injectables(self, build_graph):
tools_jar_address = Address.parse(self._tools_jar_spec)
if not build_graph.contains_address(tools_jar_address):
build_graph.inject_synthetic_target(tools_jar_address, ToolsJar)
elif not build_graph.get_target(tools_jar_address).is_synthetic:
raise build_graph.ManualSyntheticTargetError(tools_jar_address)
@property
def injectables_spec_mapping(self):
return {
# Zinc directly accesses the javac tool.
'javac': [self._javac_spec],
# The ProvideToolsJar task will first attempt to use the (optional) configured
# javac tool, and then fall back to injecting a classpath entry linking to the current
# distribution's `tools.jar`.
'tools.jar': [self._tools_jar_spec],
}
@classmethod
def global_javac_classpath(cls, products):
return cls.global_instance().javac_classpath(products)
def __init__(self, *args, **kwargs):
super(Java, self).__init__(*args, **kwargs)
opts = self.get_options()
self._javac_spec = getattr(opts, 'javac', self._default_javac_spec)
self._tools_jar_spec = '//:tools-jar-synthetic'
def javac_classpath(self, products):
return self.tool_classpath_from_products(products, 'javac', self.options_scope)
| true | true |
f721c756563d3d333e7e005fc2811ca05bf35a8b | 1,131 | py | Python | taxdata/puf/preppuf.py | jdebacker/taxdata | c32d401a10a6c8f6e889d87c6cc72fd4338017b2 | [
"CC0-1.0"
] | 12 | 2019-02-07T14:06:28.000Z | 2021-12-04T19:19:50.000Z | taxdata/puf/preppuf.py | jdebacker/taxdata | c32d401a10a6c8f6e889d87c6cc72fd4338017b2 | [
"CC0-1.0"
] | 230 | 2015-10-20T18:38:10.000Z | 2018-12-05T16:04:04.000Z | taxdata/puf/preppuf.py | jdebacker/taxdata | c32d401a10a6c8f6e889d87c6cc72fd4338017b2 | [
"CC0-1.0"
] | 19 | 2015-12-21T18:25:11.000Z | 2018-11-10T16:53:38.000Z | """
Scripts to clean up the raw PUF before matching
"""
import numpy as np
# RECIDs for aggregate variables by PUF year
AGG_VARS = {
2009: [999999],
2010: [999998, 999999],
2011: [999996, 999997, 999998, 999999],
}
def preppuf(puf, year):
"""Prepares the PUF for mathcing
Args:
puf (DataFrame): the raw PUF file
"""
puf.columns = map(str.lower, puf.columns)
# drop aggregate variables
puf = puf[~puf["recid"].isin(AGG_VARS[year])].copy()
puf["filer"] = 1
puf["depne"] = puf[["xocah", "xocawh", "xoodep", "xopar"]].sum(axis=1)
adjust = (
puf["e03150"]
+ puf["e03210"]
+ puf["e03220"]
+ puf["e03230"]
+ puf["e03260"]
+ puf["e03270"]
+ puf["e03240"]
+ puf["e03290"]
+ puf["e03300"]
+ puf["e03400"]
+ puf["e03500"]
)
puf["totincx"] = puf["e00100"] + adjust
puf["sequence"] = puf.index + 1
puf["soiseq"] = puf.index + 1
puf["s006"] /= 100
puf["s006"] *= 1.03
puf["dep_stat"] = puf["dsi"]
puf["agede"] = np.where(puf["e02400"] > 0, 1, 0)
return puf
| 22.62 | 74 | 0.531388 | import numpy as np
AGG_VARS = {
2009: [999999],
2010: [999998, 999999],
2011: [999996, 999997, 999998, 999999],
}
def preppuf(puf, year):
puf.columns = map(str.lower, puf.columns)
puf = puf[~puf["recid"].isin(AGG_VARS[year])].copy()
puf["filer"] = 1
puf["depne"] = puf[["xocah", "xocawh", "xoodep", "xopar"]].sum(axis=1)
adjust = (
puf["e03150"]
+ puf["e03210"]
+ puf["e03220"]
+ puf["e03230"]
+ puf["e03260"]
+ puf["e03270"]
+ puf["e03240"]
+ puf["e03290"]
+ puf["e03300"]
+ puf["e03400"]
+ puf["e03500"]
)
puf["totincx"] = puf["e00100"] + adjust
puf["sequence"] = puf.index + 1
puf["soiseq"] = puf.index + 1
puf["s006"] /= 100
puf["s006"] *= 1.03
puf["dep_stat"] = puf["dsi"]
puf["agede"] = np.where(puf["e02400"] > 0, 1, 0)
return puf
| true | true |
f721c76e15725be651a3bdc5d01f71ceb6748e0f | 3,046 | py | Python | nova/scheduler/filters/io_ops_filter.py | bopopescu/nested_quota_final | 7c3454883de9f5368fa943924540eebe157a319d | [
"Apache-2.0"
] | 5 | 2017-06-23T07:37:39.000Z | 2020-10-21T07:07:50.000Z | nova/scheduler/filters/io_ops_filter.py | bopopescu/nested_quota_final | 7c3454883de9f5368fa943924540eebe157a319d | [
"Apache-2.0"
] | null | null | null | nova/scheduler/filters/io_ops_filter.py | bopopescu/nested_quota_final | 7c3454883de9f5368fa943924540eebe157a319d | [
"Apache-2.0"
] | 4 | 2017-06-23T07:37:43.000Z | 2020-12-28T09:57:22.000Z | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Tells filters to ignore hosts that have "
"this many or more instances currently in "
"build, resize, snapshot, migrate, rescue or unshelve "
"task states")
CONF = cfg.CONF
CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
"""Filter out hosts with too many concurrent I/O operations."""
def _get_max_io_ops_per_host(self, host_state, filter_properties):
return CONF.max_io_ops_per_host
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
compute node statistics to decide whether to filter.
"""
num_io_ops = host_state.num_io_ops
max_io_ops = self._get_max_io_ops_per_host(
host_state, filter_properties)
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
class AggregateIoOpsFilter(IoOpsFilter):
"""AggregateIoOpsFilter with per-aggregate the max io operations.
Fall back to global max_io_ops_per_host if no per-aggregate setting found.
"""
def _get_max_io_ops_per_host(self, host_state, filter_properties):
# TODO(uni): DB query in filter is a performance hit, especially for
# system with lots of hosts. Will need a general solution here to fix
# all filters with aggregate DB call things.
aggregate_vals = utils.aggregate_values_from_db(
filter_properties['context'],
host_state.host,
'max_io_ops_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, CONF.max_io_ops_per_host, cast_to=int)
except ValueError as e:
LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
value = CONF.max_io_ops_per_host
return value
| 38.075 | 78 | 0.68155 |
from oslo_config import cfg
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
max_io_ops_per_host_opt = cfg.IntOpt("max_io_ops_per_host",
default=8,
help="Tells filters to ignore hosts that have "
"this many or more instances currently in "
"build, resize, snapshot, migrate, rescue or unshelve "
"task states")
CONF = cfg.CONF
CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
def _get_max_io_ops_per_host(self, host_state, filter_properties):
return CONF.max_io_ops_per_host
def host_passes(self, host_state, filter_properties):
num_io_ops = host_state.num_io_ops
max_io_ops = self._get_max_io_ops_per_host(
host_state, filter_properties)
passes = num_io_ops < max_io_ops
if not passes:
LOG.debug("%(host_state)s fails I/O ops check: Max IOs per host "
"is set to %(max_io_ops)s",
{'host_state': host_state,
'max_io_ops': max_io_ops})
return passes
class AggregateIoOpsFilter(IoOpsFilter):
def _get_max_io_ops_per_host(self, host_state, filter_properties):
aggregate_vals = utils.aggregate_values_from_db(
filter_properties['context'],
host_state.host,
'max_io_ops_per_host')
try:
value = utils.validate_num_values(
aggregate_vals, CONF.max_io_ops_per_host, cast_to=int)
except ValueError as e:
LOG.warning(_LW("Could not decode max_io_ops_per_host: '%s'"), e)
value = CONF.max_io_ops_per_host
return value
| true | true |
f721c81f71df81d9b3a0633b6dd15524b15378d2 | 511 | py | Python | src/web/toga_web/widgets/box.py | luizoti/toga | 3c49e685f325f1aba2ce048b253402d7e4519f97 | [
"BSD-3-Clause"
] | 1,261 | 2019-03-31T16:28:47.000Z | 2022-03-31T09:01:23.000Z | src/web/toga_web/widgets/box.py | luizoti/toga | 3c49e685f325f1aba2ce048b253402d7e4519f97 | [
"BSD-3-Clause"
] | 597 | 2019-04-02T20:02:42.000Z | 2022-03-30T10:28:47.000Z | src/web/toga_web/widgets/box.py | luizoti/toga | 3c49e685f325f1aba2ce048b253402d7e4519f97 | [
"BSD-3-Clause"
] | 318 | 2019-03-31T18:32:00.000Z | 2022-03-30T18:07:13.000Z | from .base import Widget
class Box(Widget):
def __html__(self):
return """
<div id="toga_{id}" class="toga box container" style="{style}">
{content}
</div>
""".format(
id=self.interface.id,
content="\n".join(
child._impl.__html__()
for child in self.interface.children
),
style=''
)
def create(self):
pass
def add_child(self, child):
pass
| 21.291667 | 75 | 0.46771 | from .base import Widget
class Box(Widget):
def __html__(self):
return """
<div id="toga_{id}" class="toga box container" style="{style}">
{content}
</div>
""".format(
id=self.interface.id,
content="\n".join(
child._impl.__html__()
for child in self.interface.children
),
style=''
)
def create(self):
pass
def add_child(self, child):
pass
| true | true |
f721c883b5846e17b7af1b266eb11d6fa07f59b0 | 1,769 | py | Python | tests/test_chop_chains_with_plotting.py | jorgellop/orbitize | 82826ac3ddf345198f58cfaaf0662d4e6f5bd135 | [
"BSD-3-Clause-Clear"
] | 60 | 2018-01-12T17:16:53.000Z | 2022-02-14T01:39:39.000Z | tests/test_chop_chains_with_plotting.py | jorgellop/orbitize | 82826ac3ddf345198f58cfaaf0662d4e6f5bd135 | [
"BSD-3-Clause-Clear"
] | 278 | 2018-01-12T17:25:47.000Z | 2022-03-31T21:28:27.000Z | tests/test_chop_chains_with_plotting.py | jorgellop/orbitize | 82826ac3ddf345198f58cfaaf0662d4e6f5bd135 | [
"BSD-3-Clause-Clear"
] | 49 | 2018-10-30T19:34:05.000Z | 2021-10-31T13:28:45.000Z | '''
Make sure orbit plotting can still occur after chopping chains.
'''
import orbitize
from orbitize import driver, DATADIR
import multiprocessing as mp
def verify_results_data(res, sys):
# Make data attribute from System is carried forward to Result class
assert res.data is not None
# Make sure the data tables are equivalent between Result and System class
res_data = res.data.to_pandas()
sys_data = sys.data_table.to_pandas()
assert res_data.equals(sys_data) == True
# Make sure no error results when making the final orbit plot
try:
epochs = sys.data_table['epoch']
res.plot_orbits(
object_to_plot = 1,
num_orbits_to_plot = 10,
start_mjd = epochs[0]
)
except:
raise Exception("Plotting orbits failed.")
def test_chop_chains():
'''
First run MCMC sampler to generate results object and make a call to 'chop_chains'
function afterwards.
'''
filename = "{}/HD4747.csv".format(DATADIR)
num_secondary_bodies = 1
system_mass = 0.84
plx = 53.18
mass_err = 0.04
plx_err = 0.12
num_temps = 5
num_walkers = 40
num_threads = mp.cpu_count()
total_orbits = 5000
burn_steps = 10
thin = 2
my_driver = driver.Driver(
filename, 'MCMC', num_secondary_bodies, system_mass, plx, mass_err=mass_err, plx_err=plx_err,
system_kwargs={'fit_secondary_mass':True, 'tau_ref_epoch':0},
mcmc_kwargs={'num_temps':num_temps, 'num_walkers':num_walkers, 'num_threads':num_threads})
my_driver.sampler.run_sampler(total_orbits, burn_steps=burn_steps, thin=thin)
my_driver.sampler.chop_chains(burn=25, trim=25)
mcmc_sys = my_driver.system
mcmc_result = my_driver.sampler.results
verify_results_data(mcmc_result, mcmc_sys)
if __name__ == '__main__':
test_chop_chains()
| 27.215385 | 96 | 0.726964 | import orbitize
from orbitize import driver, DATADIR
import multiprocessing as mp
def verify_results_data(res, sys):
assert res.data is not None
res_data = res.data.to_pandas()
sys_data = sys.data_table.to_pandas()
assert res_data.equals(sys_data) == True
try:
epochs = sys.data_table['epoch']
res.plot_orbits(
object_to_plot = 1,
num_orbits_to_plot = 10,
start_mjd = epochs[0]
)
except:
raise Exception("Plotting orbits failed.")
def test_chop_chains():
filename = "{}/HD4747.csv".format(DATADIR)
num_secondary_bodies = 1
system_mass = 0.84
plx = 53.18
mass_err = 0.04
plx_err = 0.12
num_temps = 5
num_walkers = 40
num_threads = mp.cpu_count()
total_orbits = 5000
burn_steps = 10
thin = 2
my_driver = driver.Driver(
filename, 'MCMC', num_secondary_bodies, system_mass, plx, mass_err=mass_err, plx_err=plx_err,
system_kwargs={'fit_secondary_mass':True, 'tau_ref_epoch':0},
mcmc_kwargs={'num_temps':num_temps, 'num_walkers':num_walkers, 'num_threads':num_threads})
my_driver.sampler.run_sampler(total_orbits, burn_steps=burn_steps, thin=thin)
my_driver.sampler.chop_chains(burn=25, trim=25)
mcmc_sys = my_driver.system
mcmc_result = my_driver.sampler.results
verify_results_data(mcmc_result, mcmc_sys)
if __name__ == '__main__':
test_chop_chains()
| true | true |
f721caa6d4640590b6074dd36aeb070aab2ffcff | 627 | py | Python | minoristaAPI/minorista/models.py | OttoOctavius/Mayorium | 3389b0950047a8b0ae9441f6c0c4c283c319f998 | [
"MIT"
] | null | null | null | minoristaAPI/minorista/models.py | OttoOctavius/Mayorium | 3389b0950047a8b0ae9441f6c0c4c283c319f998 | [
"MIT"
] | 4 | 2020-09-26T12:57:31.000Z | 2020-10-10T14:29:38.000Z | minoristaAPI/minorista/models.py | OttoOctavius/Mayorium | 3389b0950047a8b0ae9441f6c0c4c283c319f998 | [
"MIT"
] | null | null | null | from djongo import models
from django.contrib.auth.models import User
class Minorista(models.Model):
readonly_fields = ('id',)
user = models.OneToOneField(User)
#first_name = models.CharField(max_length=100, default="", editable=False)
#last_name = models.CharField(max_length=100, default="", editable=False)
#username = models.CharField(max_length=100, default="", editable=False)
contacto = models.CharField(max_length=30, default="", editable=False)
#email = models.CharField(max_length=50, default="", editable=False)
#password = models.CharField(max_length=50, default="", editable=False) | 52.25 | 78 | 0.735247 | from djongo import models
from django.contrib.auth.models import User
class Minorista(models.Model):
readonly_fields = ('id',)
user = models.OneToOneField(User)
contacto = models.CharField(max_length=30, default="", editable=False)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.