hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7086e93ee1b53d1996a2bb80c4d634d913be312
| 1,136
|
py
|
Python
|
diarypro/diarypro/urls.py
|
abhim4536/remote_repository
|
abbe3eb4fef1eb2b7ca08b98261354c913a1a171
|
[
"MIT"
] | null | null | null |
diarypro/diarypro/urls.py
|
abhim4536/remote_repository
|
abbe3eb4fef1eb2b7ca08b98261354c913a1a171
|
[
"MIT"
] | null | null | null |
diarypro/diarypro/urls.py
|
abhim4536/remote_repository
|
abbe3eb4fef1eb2b7ca08b98261354c913a1a171
|
[
"MIT"
] | null | null | null |
"""diarypro URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from diaryapp import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home_view, name='home'),
path('event/',views.diary_view, name='create event'),
path('update/<pk>',views.update_diary, name='update'),
path('delete/<pk>',views.delete_diary, name='delete')
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| 36.645161
| 77
| 0.71831
|
from django.contrib import admin
from django.urls import path
from diaryapp import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',views.home_view, name='home'),
path('event/',views.diary_view, name='create event'),
path('update/<pk>',views.update_diary, name='update'),
path('delete/<pk>',views.delete_diary, name='delete')
] + static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| true
| true
|
f708705384503395a107c1f697f4709d5586f514
| 27
|
py
|
Python
|
dataset_generator/learning/imitation/iil-dagger/algorithms/__init__.py
|
rjean/duckie-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | 1
|
2021-02-03T02:23:34.000Z
|
2021-02-03T02:23:34.000Z
|
dataset_generator/learning/imitation/iil-dagger/algorithms/__init__.py
|
rjean/mobile-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | null | null | null |
dataset_generator/learning/imitation/iil-dagger/algorithms/__init__.py
|
rjean/mobile-segmentation
|
5e720e1a96ef61c4560823030549ac1d5d16e2a4
|
[
"Apache-2.0"
] | null | null | null |
from .dagger import DAgger
| 13.5
| 26
| 0.814815
|
from .dagger import DAgger
| true
| true
|
f708712d2f1c53c86869ad65b24cc045d49f7d91
| 16,068
|
py
|
Python
|
src/biotite/sequence/io/gff/file.py
|
padix-key/biopython2experimental
|
d88ab895469f0ab0911056cc5fa16dde5d07fd63
|
[
"BSD-3-Clause"
] | null | null | null |
src/biotite/sequence/io/gff/file.py
|
padix-key/biopython2experimental
|
d88ab895469f0ab0911056cc5fa16dde5d07fd63
|
[
"BSD-3-Clause"
] | null | null | null |
src/biotite/sequence/io/gff/file.py
|
padix-key/biopython2experimental
|
d88ab895469f0ab0911056cc5fa16dde5d07fd63
|
[
"BSD-3-Clause"
] | null | null | null |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.sequence.io.gff"
__author__ = "Patrick Kunzmann"
__all__ = ["GFFFile"]
import copy
import string
from urllib.parse import quote, unquote
import warnings
from ....file import TextFile, InvalidFileError
from ...annotation import Location
# All punctuation characters except
# percent, semicolon, equals, ampersand, comma
_NOT_QUOTED = "".join(
[char for char in string.punctuation if char not in "%;=&,"]
) + " "
class GFFFile(TextFile):
"""
This class represents a file in *Generic Feature Format 3*
(`GFF3 <https://github.com/The-Sequence-Ontology/Specifications/blob/master/gff3.md>`_)
format.
Similar to GenBank files, GFF3 files contain information about
features of a reference sequence, but in a more concise and better
parsable way.
However, it does not provide additional meta information.
This class serves as low-level API for accessing GFF3 files.
It is used as a sequence of entries, where each entry is defined as
a non-comment and non-directive line.
Each entry consists of values corresponding to the 9 columns of
GFF3:
============== =============================== ==========================================================
**seqid** ``str`` The ID of the reference sequence
**source** ``str`` Source of the data (e.g. ``Genbank``)
**type** ``str`` Type of the feature (e.g. ``CDS``)
**start** ``int`` Start coordinate of feature on the reference sequence
**end** ``int`` End coordinate of feature on the reference sequence
**score** ``float`` or ``None`` Optional score (e.g. an E-value)
**strand** ``Location.Strand`` or ``None`` Strand of the feature, ``None`` if feature is not stranded
**phase** ``int`` or ``None`` Reading frame shift, ``None`` for non-CDS features
**attributes** ``dict`` Additional properties of the feature
============== =============================== ==========================================================
Note that the entry index may not be equal to the line index,
because GFF3 files can contain comment and directive lines.
Notes
-----
Although the GFF3 specification allows mixing in reference sequence
data in FASTA format via the ``##FASTA`` directive, this class does
not support extracting the sequence information.
The content after the ``##FASTA`` directive is simply ignored.
Please provide the sequence via a separate file or read the FASTA
data directly via the :attr:`lines` attribute:
>>> import os.path
>>> from io import StringIO
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "indexing_test.gff3"))
>>> fasta_start_index = None
>>> for directive, line_index in gff_file.directives():
... if directive == "FASTA":
... fasta_start_index = line_index + 1
>>> fasta_data = StringIO("\\n".join(gff_file.lines[fasta_start_index:]))
>>> fasta_file = FastaFile.read(fasta_data)
>>> for seq_string in fasta_file.values():
... print(seq_string[:60] + "...")
TACGTAGCTAGCTGATCGATGTTGTGTGTATCGATCTAGCTAGCTAGCTGACTACACAAT...
Examples
--------
Reading and editing of an existing GFF3 file:
>>> import os.path
>>> gff_file = GFFFile.read(os.path.join(path_to_sequences, "gg_avidin.gff3"))
>>> # Get content of first entry
>>> seqid, source, type, start, end, score, strand, phase, attrib = gff_file[0]
>>> print(seqid)
AJ311647.1
>>> print(source)
EMBL
>>> print(type)
region
>>> print(start)
1
>>> print(end)
1224
>>> print(score)
None
>>> print(strand)
Strand.FORWARD
>>> print(phase)
None
>>> print(attrib)
{'ID': 'AJ311647.1:1..1224', 'Dbxref': 'taxon:9031', 'Name': 'Z', 'chromosome': 'Z', 'gbkey': 'Src', 'mol_type': 'genomic DNA'}
>>> # Edit the first entry: Simply add a score
>>> score = 1.0
>>> gff_file[0] = seqid, source, type, start, end, score, strand, phase, attrib
>>> # Delete first entry
>>> del gff_file[0]
Writing a new GFF3 file:
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> gff_file.append(
... "SomeSeqID", "Biotite", "CDS", 1, 99,
... None, Location.Strand.FORWARD, 0,
... {"ID": "FeatureID", "product":"A protein"}
... )
>>> print(gff_file) #doctest: +NORMALIZE_WHITESPACE
##gff-version 3
##Example directive param1 param2
SomeSeqID Biotite CDS 1 99 . + 0 ID=FeatureID;product=A protein
"""
def __init__(self):
super().__init__()
# Maps entry indices to line indices
self._entries = None
# Stores the directives as (directive text, line index)-tuple
self._directives = None
# Stores whether the file has FASTA data
self._has_fasta = None
self._index_entries()
self.append_directive("gff-version", "3")
@classmethod
def read(cls, file):
"""
Read a GFF3 file.
Parameters
----------
file : file-like object or str
The file to be read.
Alternatively a file path can be supplied.
Returns
-------
file_object : GFFFile
The parsed file.
"""
file = super().read(file)
file._index_entries()
return file
def insert(self, index, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Insert an entry at the given index.
Parameters
----------
index : int
Index where the entry is inserted.
If the index is equal to the length of the file, the entry
is appended at the end of the file.
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if index == len(self):
self.append(seqid, source, type, start, end,
score, strand, phase, attributes)
else:
line_index = self._entries[index]
line = GFFFile._create_line(
seqid, source, type, start, end,
score, strand, phase, attributes
)
self.lines.insert(line_index, line)
self._index_entries()
def append(self, seqid, source, type, start, end,
score, strand, phase, attributes=None):
"""
Append an entry to the end of the file.
Parameters
----------
seqid : str
The ID of the reference sequence.
source : str
Source of the data (e.g. ``Genbank``).
type : str
Type of the feature (e.g. ``CDS``).
start : int
Start coordinate of feature on the reference sequence.
end : int
End coordinate of feature on the reference sequence.
score : float or None
Optional score (e.g. an E-value).
strand : Location.Strand or None
Strand of the feature, ``None`` if feature is not stranded.
phase : int or None
Reading frame shift, ``None`` for non-CDS features.
attributes : dict, optional
Additional properties of the feature.
"""
if self._has_fasta:
raise NotImplementedError(
"Cannot append feature entries, "
"as this file contains additional FASTA data"
)
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attributes
)
self.lines.append(line)
# Fast update of entry index by adding last line
self._entries.append(len(self.lines) - 1)
def append_directive(self, directive, *args):
"""
Append a directive line to the end of the file.
Parameters
----------
directive : str
Name of the directive.
*args : str
Optional parameters for the directive.
Each argument is simply appended to the directive, separated
by a single space character.
Raises
------
NotImplementedError
If the ``##FASTA`` directive is used, which is not
supported.
Examples
--------
>>> gff_file = GFFFile()
>>> gff_file.append_directive("Example directive", "param1", "param2")
>>> print(gff_file)
##gff-version 3
##Example directive param1 param2
"""
if directive.startswith("FASTA"):
raise NotImplementedError(
"Adding FASTA information is not supported"
)
directive_line = "##" + directive + " " + " ".join(args)
self._directives.append((directive_line[2:], len(self.lines)))
self.lines.append(directive_line)
def directives(self):
"""
Get the directives in the file.
Returns
-------
directives : list of tuple(str, int)
A list of directives, sorted by their line order.
The first element of each tuple is the name of the
directive (without ``##``), the second element is the index
of the corresponding line.
"""
# Sort in line order
return sorted(self._directives, key=lambda directive: directive[1])
def __setitem__(self, index, item):
seqid, source, type, start, end, score, strand, phase, attrib = item
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attrib
)
line_index = self._entries[index]
self.lines[line_index] = line
def __getitem__(self, index):
if (index >= 0 and index >= len(self)) or \
(index < 0 and -index > len(self)):
raise IndexError(
f"Index {index} is out of range for GFFFile with "
f"{len(self)} entries"
)
line_index = self._entries[index]
# Columns are tab separated
s = self.lines[line_index].strip().split("\t")
if len(s) != 9:
raise InvalidFileError(f"Expected 9 columns, but got {len(s)}")
seqid, source, type, start, end, score, strand, phase, attrib = s
seqid = unquote(seqid)
source = unquote(source)
type = unquote(type)
start = int(start)
end = int(end)
score = None if score == "." else float(score)
if strand == "+":
strand = Location.Strand.FORWARD
elif strand == "-":
strand = Location.Strand.REVERSE
else:
strand = None
phase = None if phase == "." else int(phase)
attrib = GFFFile._parse_attributes(attrib)
return seqid, source, type, start, end, score, strand, phase, attrib
def __delitem__(self, index):
line_index = self._entries[index]
del self.lines[line_index]
self._index_entries()
def __len__(self):
return len(self._entries)
def _index_entries(self):
"""
Parse the file for comment and directive lines.
Count these lines cumulatively, so that entry indices can be
mapped onto line indices.
Additionally track the line index of directive lines.
"""
self._directives = []
# Worst case allocation -> all lines contain actual entries
self._entries = [None] * len(self.lines)
self._has_fasta = False
entry_counter = 0
for line_i, line in enumerate(self.lines):
if len(line) == 0 or line[0] == " ":
# Empty line -> do nothing
pass
elif line.startswith("#"):
# Comment or directive
if line.startswith("##"):
# Directive
# Omit the leading '##'
self._directives.append((line[2:], line_i))
if line[2:] == "FASTA":
self._has_fasta = True
# This parser does not support bundled FASTA
# data
warnings.warn(
"Biotite does not support FASTA data mixed into "
"GFF files, the FASTA data will be ignored"
)
# To ignore the following FASTA data, stop
# parsing at this point
break
else:
# Actual entry
self._entries[entry_counter] = line_i
entry_counter += 1
# Trim to correct size
self._entries = self._entries[:entry_counter]
@staticmethod
def _create_line(seqid, source, type, start, end,
score, strand, phase, attributes):
"""
Create a line for a newly created entry.
"""
seqid = quote(seqid.strip(), safe=_NOT_QUOTED) \
if seqid is not None else "."
source = quote(source.strip(), safe=_NOT_QUOTED) \
if source is not None else "."
type = type.strip()
# Perform checks
if len(seqid) == 0:
raise ValueError("'seqid' must not be empty")
if len(source) == 0:
raise ValueError("'source' must not be empty")
if len(type) == 0:
raise ValueError("'type' must not be empty")
if seqid[0] == ">":
raise ValueError("'seqid' must not start with '>'")
score = str(score) if score is not None else "."
if strand == Location.Strand.FORWARD:
strand = "+"
elif strand == Location.Strand.REVERSE:
strand = "-"
else:
strand = "."
phase = str(phase) if phase is not None else "."
attributes = ";".join(
[quote(key, safe=_NOT_QUOTED) + "=" + quote(val, safe=_NOT_QUOTED)
for key, val in attributes.items()]
) if attributes is not None and len(attributes) > 0 else "."
return "\t".join(
[seqid, source, type, str(start), str(end),
str(score), strand, phase, attributes]
)
@staticmethod
def _parse_attributes(attributes):
"""
Parse the *attributes* string into a dictionary.
"""
if attributes == ".":
return {}
attrib_dict = {}
attrib_entries = attributes.split(";")
for entry in attrib_entries:
compounds = entry.split("=")
if len(compounds) != 2:
raise InvalidFileError(
f"Attribute entry '{entry}' is invalid"
)
key, val = compounds
attrib_dict[unquote(key)] = unquote(val)
return attrib_dict
| 37.023041
| 131
| 0.545681
|
__name__ = "biotite.sequence.io.gff"
__author__ = "Patrick Kunzmann"
__all__ = ["GFFFile"]
import copy
import string
from urllib.parse import quote, unquote
import warnings
from ....file import TextFile, InvalidFileError
from ...annotation import Location
_NOT_QUOTED = "".join(
[char for char in string.punctuation if char not in "%;=&,"]
) + " "
class GFFFile(TextFile):
def __init__(self):
super().__init__()
self._entries = None
self._directives = None
self._has_fasta = None
self._index_entries()
self.append_directive("gff-version", "3")
@classmethod
def read(cls, file):
file = super().read(file)
file._index_entries()
return file
def insert(self, index, seqid, source, type, start, end,
score, strand, phase, attributes=None):
if index == len(self):
self.append(seqid, source, type, start, end,
score, strand, phase, attributes)
else:
line_index = self._entries[index]
line = GFFFile._create_line(
seqid, source, type, start, end,
score, strand, phase, attributes
)
self.lines.insert(line_index, line)
self._index_entries()
def append(self, seqid, source, type, start, end,
score, strand, phase, attributes=None):
if self._has_fasta:
raise NotImplementedError(
"Cannot append feature entries, "
"as this file contains additional FASTA data"
)
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attributes
)
self.lines.append(line)
self._entries.append(len(self.lines) - 1)
def append_directive(self, directive, *args):
if directive.startswith("FASTA"):
raise NotImplementedError(
"Adding FASTA information is not supported"
)
directive_line = "##" + directive + " " + " ".join(args)
self._directives.append((directive_line[2:], len(self.lines)))
self.lines.append(directive_line)
def directives(self):
return sorted(self._directives, key=lambda directive: directive[1])
def __setitem__(self, index, item):
seqid, source, type, start, end, score, strand, phase, attrib = item
line = GFFFile._create_line(
seqid, source, type, start, end, score, strand, phase, attrib
)
line_index = self._entries[index]
self.lines[line_index] = line
def __getitem__(self, index):
if (index >= 0 and index >= len(self)) or \
(index < 0 and -index > len(self)):
raise IndexError(
f"Index {index} is out of range for GFFFile with "
f"{len(self)} entries"
)
line_index = self._entries[index]
s = self.lines[line_index].strip().split("\t")
if len(s) != 9:
raise InvalidFileError(f"Expected 9 columns, but got {len(s)}")
seqid, source, type, start, end, score, strand, phase, attrib = s
seqid = unquote(seqid)
source = unquote(source)
type = unquote(type)
start = int(start)
end = int(end)
score = None if score == "." else float(score)
if strand == "+":
strand = Location.Strand.FORWARD
elif strand == "-":
strand = Location.Strand.REVERSE
else:
strand = None
phase = None if phase == "." else int(phase)
attrib = GFFFile._parse_attributes(attrib)
return seqid, source, type, start, end, score, strand, phase, attrib
def __delitem__(self, index):
line_index = self._entries[index]
del self.lines[line_index]
self._index_entries()
def __len__(self):
return len(self._entries)
def _index_entries(self):
self._directives = []
self._entries = [None] * len(self.lines)
self._has_fasta = False
entry_counter = 0
for line_i, line in enumerate(self.lines):
if len(line) == 0 or line[0] == " ":
pass
elif line.startswith("#"):
if line.startswith("##"):
self._directives.append((line[2:], line_i))
if line[2:] == "FASTA":
self._has_fasta = True
warnings.warn(
"Biotite does not support FASTA data mixed into "
"GFF files, the FASTA data will be ignored"
)
break
else:
self._entries[entry_counter] = line_i
entry_counter += 1
self._entries = self._entries[:entry_counter]
@staticmethod
def _create_line(seqid, source, type, start, end,
score, strand, phase, attributes):
seqid = quote(seqid.strip(), safe=_NOT_QUOTED) \
if seqid is not None else "."
source = quote(source.strip(), safe=_NOT_QUOTED) \
if source is not None else "."
type = type.strip()
if len(seqid) == 0:
raise ValueError("'seqid' must not be empty")
if len(source) == 0:
raise ValueError("'source' must not be empty")
if len(type) == 0:
raise ValueError("'type' must not be empty")
if seqid[0] == ">":
raise ValueError("'seqid' must not start with '>'")
score = str(score) if score is not None else "."
if strand == Location.Strand.FORWARD:
strand = "+"
elif strand == Location.Strand.REVERSE:
strand = "-"
else:
strand = "."
phase = str(phase) if phase is not None else "."
attributes = ";".join(
[quote(key, safe=_NOT_QUOTED) + "=" + quote(val, safe=_NOT_QUOTED)
for key, val in attributes.items()]
) if attributes is not None and len(attributes) > 0 else "."
return "\t".join(
[seqid, source, type, str(start), str(end),
str(score), strand, phase, attributes]
)
@staticmethod
def _parse_attributes(attributes):
if attributes == ".":
return {}
attrib_dict = {}
attrib_entries = attributes.split(";")
for entry in attrib_entries:
compounds = entry.split("=")
if len(compounds) != 2:
raise InvalidFileError(
f"Attribute entry '{entry}' is invalid"
)
key, val = compounds
attrib_dict[unquote(key)] = unquote(val)
return attrib_dict
| true
| true
|
f708725b346619d5750d7805256a417ecccab059
| 3,436
|
py
|
Python
|
tests/st/auto_monad/test_auto_monad_layer.py
|
PowerOlive/mindspore
|
bda20724a94113cedd12c3ed9083141012da1f15
|
[
"Apache-2.0"
] | 3,200
|
2020-02-17T12:45:41.000Z
|
2022-03-31T20:21:16.000Z
|
tests/st/auto_monad/test_auto_monad_layer.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 176
|
2020-02-12T02:52:11.000Z
|
2022-03-28T22:15:55.000Z
|
tests/st/auto_monad/test_auto_monad_layer.py
|
zimo-geek/mindspore
|
665ec683d4af85c71b2a1f0d6829356f2bc0e1ff
|
[
"Apache-2.0"
] | 621
|
2020-03-09T01:31:41.000Z
|
2022-03-30T03:43:19.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tqdm import tqdm
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.dataset import NumpySlicesDataset
from mindspore import context, Tensor
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class AutoEncoderTrainNetwork(nn.Cell):
def __init__(self):
super(AutoEncoderTrainNetwork, self).__init__()
self.loss_fun = nn.MSELoss()
self.net = nn.CellList([nn.Dense(2, 32), nn.Dense(32, 2)])
self.relu = nn.ReLU()
def reconstruct_sample(self, x: Tensor):
for _, layer in enumerate(self.net):
x = layer(x)
x = self.relu(x)
return x
def construct(self, x: Tensor):
recon_x = self.reconstruct_sample(x)
return self.loss_fun(recon_x, x)
def sample_2d_data(self, n_normals=2000, n_outliers=400):
z = np.random.randn(n_normals, 2)
outliers = np.random.uniform(low=-6, high=6, size=(n_outliers, 2))
centers = np.array([(2., 0), (-2., 0)])
sigma = 0.3
normal_points = sigma * z + centers[np.random.randint(len(centers), size=(n_normals,))]
return np.vstack((normal_points, outliers))
def create_synthetic_dataset(self):
transformed_dataset = self.sample_2d_data()
for dim in range(transformed_dataset.shape[1]):
min_val = transformed_dataset[:, dim].min()
max_val = transformed_dataset[:, dim].max()
if min_val != max_val:
transformed_dataset[:, dim] = (transformed_dataset[:, dim] - min_val) / (max_val - min_val)
elif min_val != 1:
transformed_dataset[:, dim] = transformed_dataset[:, dim] / min_val
transformed_dataset = transformed_dataset.astype(np.float32)
return transformed_dataset
def test_auto_monad_layer():
ae_with_loss = AutoEncoderTrainNetwork()
transformed_dataset = ae_with_loss.create_synthetic_dataset()
dataloader = NumpySlicesDataset(data=(transformed_dataset,), shuffle=True)
dataloader = dataloader.batch(batch_size=16)
optim = nn.RMSProp(params=ae_with_loss.trainable_params(), learning_rate=0.002,)
train_net = nn.TrainOneStepCell(ae_with_loss, optim)
train_net.set_train()
gen_samples = dict()
num_epoch = 21
for epoch in tqdm(range(num_epoch)):
loss = []
for _, (batch,) in enumerate(dataloader):
batch = Tensor(batch, dtype=ms.float32)
loss_ = train_net(batch)
loss.append(loss_.asnumpy())
avg_loss = np.array(loss).mean()
if epoch % 10 == 0:
gen_samples[epoch] = ae_with_loss.reconstruct_sample(Tensor(transformed_dataset)).asnumpy()
print(f"epoch: {epoch}/{num_epoch}, avg loss: {avg_loss}")
| 41.902439
| 107
| 0.659488
|
from tqdm import tqdm
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.dataset import NumpySlicesDataset
from mindspore import context, Tensor
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class AutoEncoderTrainNetwork(nn.Cell):
def __init__(self):
super(AutoEncoderTrainNetwork, self).__init__()
self.loss_fun = nn.MSELoss()
self.net = nn.CellList([nn.Dense(2, 32), nn.Dense(32, 2)])
self.relu = nn.ReLU()
def reconstruct_sample(self, x: Tensor):
for _, layer in enumerate(self.net):
x = layer(x)
x = self.relu(x)
return x
def construct(self, x: Tensor):
recon_x = self.reconstruct_sample(x)
return self.loss_fun(recon_x, x)
def sample_2d_data(self, n_normals=2000, n_outliers=400):
z = np.random.randn(n_normals, 2)
outliers = np.random.uniform(low=-6, high=6, size=(n_outliers, 2))
centers = np.array([(2., 0), (-2., 0)])
sigma = 0.3
normal_points = sigma * z + centers[np.random.randint(len(centers), size=(n_normals,))]
return np.vstack((normal_points, outliers))
def create_synthetic_dataset(self):
transformed_dataset = self.sample_2d_data()
for dim in range(transformed_dataset.shape[1]):
min_val = transformed_dataset[:, dim].min()
max_val = transformed_dataset[:, dim].max()
if min_val != max_val:
transformed_dataset[:, dim] = (transformed_dataset[:, dim] - min_val) / (max_val - min_val)
elif min_val != 1:
transformed_dataset[:, dim] = transformed_dataset[:, dim] / min_val
transformed_dataset = transformed_dataset.astype(np.float32)
return transformed_dataset
def test_auto_monad_layer():
ae_with_loss = AutoEncoderTrainNetwork()
transformed_dataset = ae_with_loss.create_synthetic_dataset()
dataloader = NumpySlicesDataset(data=(transformed_dataset,), shuffle=True)
dataloader = dataloader.batch(batch_size=16)
optim = nn.RMSProp(params=ae_with_loss.trainable_params(), learning_rate=0.002,)
train_net = nn.TrainOneStepCell(ae_with_loss, optim)
train_net.set_train()
gen_samples = dict()
num_epoch = 21
for epoch in tqdm(range(num_epoch)):
loss = []
for _, (batch,) in enumerate(dataloader):
batch = Tensor(batch, dtype=ms.float32)
loss_ = train_net(batch)
loss.append(loss_.asnumpy())
avg_loss = np.array(loss).mean()
if epoch % 10 == 0:
gen_samples[epoch] = ae_with_loss.reconstruct_sample(Tensor(transformed_dataset)).asnumpy()
print(f"epoch: {epoch}/{num_epoch}, avg loss: {avg_loss}")
| true
| true
|
f708726a644aec2224002f120ebd80965a6d2684
| 2,156
|
py
|
Python
|
Receipt.py
|
michael-canaran/python-practice
|
cdd99db85be39e5b6d3241fe84f2501fba64f567
|
[
"MIT"
] | null | null | null |
Receipt.py
|
michael-canaran/python-practice
|
cdd99db85be39e5b6d3241fe84f2501fba64f567
|
[
"MIT"
] | null | null | null |
Receipt.py
|
michael-canaran/python-practice
|
cdd99db85be39e5b6d3241fe84f2501fba64f567
|
[
"MIT"
] | 1
|
2020-01-05T06:49:03.000Z
|
2020-01-05T06:49:03.000Z
|
from datetime import *
class Receipt:
def __init__(self, member_number):
# Initialize the receipt as a list for future modifications (adding and removing items)
self.member_items = []
self.member_number = member_number
self.total = 0.0
self.total_tax = 0.0
# Adds items to the member's receipt and displays the current total with tax
def add_item(self, item):
self.member_items.append(item)
item_price = item.floatPrice
self.total_tax += item.tax
self.total += item_price + item.tax
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
# Removes items from the receipt and displays the current total with tax
def remove_item(self, item):
if item in self.member_items:
self.member_items.remove(item)
self.total_tax -= item.tax
self.total -= item.floatPrice
print("REMOVED")
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
elif len(self.member_items) == 0:
print("No items in the receipt")
else:
print("Item does not exist in member's receipt")
# Finalizes the receipt string and returns it to the POS
def finalize_receipt(self):
# Initialize the receipt string
final_receipt = " RECEIPT\nMembership Number: " + (self.member_number + "\n")
total = 0.0
total_tax = 0.0
final_receipt += "ITEMS:\n"
for item in self.member_items:
final_receipt += ("{0:<20} {1:>10}\n".format(item.name, str(item.floatPrice)))
total_tax += item.tax
total += total_tax + item.floatPrice
final_receipt += ("\nTAX: %26.2f\n" % (self.total_tax))
final_receipt += ("TOTAL: %24.2f\n" % (self.total))
final_receipt += str(date.today())
return final_receipt
| 39.925926
| 98
| 0.579314
|
from datetime import *
class Receipt:
def __init__(self, member_number):
self.member_items = []
self.member_number = member_number
self.total = 0.0
self.total_tax = 0.0
def add_item(self, item):
self.member_items.append(item)
item_price = item.floatPrice
self.total_tax += item.tax
self.total += item_price + item.tax
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
# Removes items from the receipt and displays the current total with tax
def remove_item(self, item):
if item in self.member_items:
self.member_items.remove(item)
self.total_tax -= item.tax
self.total -= item.floatPrice
print("REMOVED")
print("{0:<20} {1:>10}".format(item.name, str(item.floatPrice)))
print("TAX: %26.2f" % (self.total_tax))
print("TOTAL: %24.2f" % (self.total))
elif len(self.member_items) == 0:
print("No items in the receipt")
else:
print("Item does not exist in member's receipt")
def finalize_receipt(self):
final_receipt = " RECEIPT\nMembership Number: " + (self.member_number + "\n")
total = 0.0
total_tax = 0.0
final_receipt += "ITEMS:\n"
for item in self.member_items:
final_receipt += ("{0:<20} {1:>10}\n".format(item.name, str(item.floatPrice)))
total_tax += item.tax
total += total_tax + item.floatPrice
final_receipt += ("\nTAX: %26.2f\n" % (self.total_tax))
final_receipt += ("TOTAL: %24.2f\n" % (self.total))
final_receipt += str(date.today())
return final_receipt
| true
| true
|
f708727a0170e03a37c3bf2596268f06778aa9bf
| 15,458
|
py
|
Python
|
tests/hikari/internal/test_routes.py
|
Lunarmagpie/hikari
|
3f4fed67f76c655845d379066f9d192e7dffd0b0
|
[
"MIT"
] | null | null | null |
tests/hikari/internal/test_routes.py
|
Lunarmagpie/hikari
|
3f4fed67f76c655845d379066f9d192e7dffd0b0
|
[
"MIT"
] | null | null | null |
tests/hikari/internal/test_routes.py
|
Lunarmagpie/hikari
|
3f4fed67f76c655845d379066f9d192e7dffd0b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import mock
import pytest
from hikari import files
from hikari.internal import routes
from tests.hikari import hikari_test_helpers
class TestCompiledRoute:
@pytest.fixture()
def compiled_route(self):
return routes.CompiledRoute(
major_param_hash="abc123", route=mock.Mock(method="GET"), compiled_path="/some/endpoint"
)
def test_method(self, compiled_route):
assert compiled_route.method == "GET"
def test_create_url(self, compiled_route):
assert compiled_route.create_url("https://some.url/api") == "https://some.url/api/some/endpoint"
def test_create_real_bucket_hash(self, compiled_route):
assert compiled_route.create_real_bucket_hash("UNKNOWN") == "UNKNOWN;abc123"
def test__str__(self, compiled_route):
assert str(compiled_route) == "GET /some/endpoint"
class TestRoute:
@pytest.mark.parametrize(
("route", "params"),
[
(routes.DELETE_CHANNEL, frozenset(("channel",))),
(routes.PATCH_GUILD, frozenset(("guild",))),
(routes.POST_WEBHOOK_WITH_TOKEN, frozenset(("webhook", "token"))),
(routes.GET_INVITE, None),
],
)
def test_major_params(self, route, params):
assert route.major_params == params
def test_compile_with_no_major_params(self):
route = routes.Route(method="GET", path_template="/some/endpoint/{baguette}")
expected = routes.CompiledRoute(route=route, compiled_path="/some/endpoint/1234", major_param_hash="-")
assert route.compile(baguette=1234) == expected
def test_compile_with_channel_major_params(self):
route = routes.Route(method="GET", path_template="/channels/{channel}")
expected = routes.CompiledRoute(route=route, compiled_path="/channels/4325", major_param_hash="4325")
assert route.compile(channel=4325) == expected
def test_compile_with_guild_major_params(self):
route = routes.Route(method="GET", path_template="/guilds/{guild}")
expected = routes.CompiledRoute(route=route, compiled_path="/guilds/5555", major_param_hash="5555")
assert route.compile(guild=5555) == expected
def test_compile_with_webhook_major_params(self):
route = routes.Route(method="GET", path_template="/webhooks/{webhook}/{token}")
expected = routes.CompiledRoute(
route=route, compiled_path="/webhooks/123/okfdkdfkdf", major_param_hash="123:okfdkdfkdf"
)
assert route.compile(webhook=123, token="okfdkdfkdf") == expected
def test__str__(self):
assert str(routes.Route(method="GET", path_template="/some/endpoint/{channel}")) == "/some/endpoint/{channel}"
class TestCDNRoute:
def test_zero_formats_results_in_error(self):
with pytest.raises(ValueError, match="/foo/bar must have at least one valid format set"):
routes.CDNRoute("/foo/bar", set())
def test_any_formats_results_in_no_error(self):
routes.CDNRoute("/foo/bar", {"do", "ray", "me"})
def test_formats_converted_to_frozenset(self):
route = routes.CDNRoute("/foo/bar", {"i", "really", "like", "cats"})
assert isinstance(route.valid_formats, frozenset)
assert route.valid_formats == {"i", "really", "like", "cats"}
def test_formats_converted_to_lower(self):
route = routes.CDNRoute("/foo/bar", {"FOO", "BaR", "bAz", "bork"})
assert route.valid_formats == {"foo", "bar", "baz", "bork"}
def test_eq_operator__considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert route1 == route2
assert route1 == route3
assert route1 != route4
assert route2 == route3
assert route2 != route4
assert route3 != route4
def test_hash_operator_considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert hash(route1) == hash(route2)
assert hash(route1) == hash(route3)
assert hash(route1) != hash(route4)
assert hash(route2) == hash(route3)
assert hash(route2) != hash(route4)
assert hash(route3) != hash(route4)
@pytest.mark.parametrize(
("input_file_format", "expected_file_format"),
[
("jpg", "jpg"),
("JPG", "jpg"),
("png", "png"),
("PNG", "png"),
],
)
def test_compile_uses_lowercase_file_format_always(self, input_file_format, expected_file_format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
compiled_url = route.compile("http://example.com", file_format=input_file_format)
assert compiled_url.endswith(f".{expected_file_format}"), f"compiled_url={compiled_url}"
def test_disallowed_file_format_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif")
def test_allowed_file_format_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
route.compile("http://example.com", file_format="png")
def test_requesting_gif_on_non_animated_hash_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif", hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_non_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="a_boooob")
def test_requesting_gif_on_animated_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif", hash="a_boooob")
def test_requesting_gif_without_passing_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif")
def test_passing_size_on_non_sizable_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_no_size_on_non_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="png", hash="boooob")
def test_passing_no_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob")
@pytest.mark.parametrize("size", [*range(17, 32)])
def test_passing_non_power_of_2_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in [1, *range(17, 25)]])
def test_passing_invalid_magnitude_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [*range(-10, 0)])
def test_passing_negative_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be positive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in range(4, 13)])
def test_passing_valid_sizes_to_sizable_does_not_raise_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
def test_passing_size_adds_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=128)
assert compiled_url.endswith(".png?size=128"), f"compiled_url={compiled_url}"
def test_passing_None_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=None)
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
def test_passing_no_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob")
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
@pytest.mark.parametrize(
("base_url", "template", "format", "size_kwds", "foo", "bar", "expected_url"),
[
(
"http://example.com",
"/{foo}/{bar}",
"PNG",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.png?size=128",
),
(
"http://example.com",
"/{foo}/bar",
"jpg",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bar.jpg?size=128",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{"size": None},
"baz",
123456,
"http://example.com/baz/123456.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{"size": None},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
],
)
def test_compile_generates_expected_url(self, base_url, template, format, size_kwds, foo, bar, expected_url):
route = routes.CDNRoute(template, {"png", "gif", "jpg", "webp"}, sizable=True)
actual_url = route.compile(base_url=base_url, file_format=format, foo=foo, bar=bar, **size_kwds)
assert actual_url == expected_url
@pytest.mark.parametrize("format", ["png", "jpg"])
@pytest.mark.parametrize("size", [64, 256, 2048])
def test_compile_to_file_calls_compile(self, format, size):
with mock.patch.object(files, "URL", autospec=files.URL):
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile)
route.compile_to_file("https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb")
route.compile.assert_called_once_with(
"https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb"
)
def test_compile_to_file_passes_compile_result_to_URL_and_returns_constructed_url(self):
resultant_url_str = "http://blep.com/hello/world/weeb/oyy%20lumo"
resultant_url = files.URL("http://blep.com/hello/world/weeb/oyy%20lumo")
with mock.patch.object(files, "URL", autospec=files.URL, return_value=resultant_url) as URL:
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world/{nya}/{boop}", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile, return_value=resultant_url_str)
result = route.compile_to_file("https://blep.com", file_format="png", size=64, boop="oyy lumo", nya="weeb")
URL.assert_called_once_with(resultant_url_str)
assert result is resultant_url
| 47.709877
| 119
| 0.630677
|
import mock
import pytest
from hikari import files
from hikari.internal import routes
from tests.hikari import hikari_test_helpers
class TestCompiledRoute:
@pytest.fixture()
def compiled_route(self):
return routes.CompiledRoute(
major_param_hash="abc123", route=mock.Mock(method="GET"), compiled_path="/some/endpoint"
)
def test_method(self, compiled_route):
assert compiled_route.method == "GET"
def test_create_url(self, compiled_route):
assert compiled_route.create_url("https://some.url/api") == "https://some.url/api/some/endpoint"
def test_create_real_bucket_hash(self, compiled_route):
assert compiled_route.create_real_bucket_hash("UNKNOWN") == "UNKNOWN;abc123"
def test__str__(self, compiled_route):
assert str(compiled_route) == "GET /some/endpoint"
class TestRoute:
@pytest.mark.parametrize(
("route", "params"),
[
(routes.DELETE_CHANNEL, frozenset(("channel",))),
(routes.PATCH_GUILD, frozenset(("guild",))),
(routes.POST_WEBHOOK_WITH_TOKEN, frozenset(("webhook", "token"))),
(routes.GET_INVITE, None),
],
)
def test_major_params(self, route, params):
assert route.major_params == params
def test_compile_with_no_major_params(self):
route = routes.Route(method="GET", path_template="/some/endpoint/{baguette}")
expected = routes.CompiledRoute(route=route, compiled_path="/some/endpoint/1234", major_param_hash="-")
assert route.compile(baguette=1234) == expected
def test_compile_with_channel_major_params(self):
route = routes.Route(method="GET", path_template="/channels/{channel}")
expected = routes.CompiledRoute(route=route, compiled_path="/channels/4325", major_param_hash="4325")
assert route.compile(channel=4325) == expected
def test_compile_with_guild_major_params(self):
route = routes.Route(method="GET", path_template="/guilds/{guild}")
expected = routes.CompiledRoute(route=route, compiled_path="/guilds/5555", major_param_hash="5555")
assert route.compile(guild=5555) == expected
def test_compile_with_webhook_major_params(self):
route = routes.Route(method="GET", path_template="/webhooks/{webhook}/{token}")
expected = routes.CompiledRoute(
route=route, compiled_path="/webhooks/123/okfdkdfkdf", major_param_hash="123:okfdkdfkdf"
)
assert route.compile(webhook=123, token="okfdkdfkdf") == expected
def test__str__(self):
assert str(routes.Route(method="GET", path_template="/some/endpoint/{channel}")) == "/some/endpoint/{channel}"
class TestCDNRoute:
def test_zero_formats_results_in_error(self):
with pytest.raises(ValueError, match="/foo/bar must have at least one valid format set"):
routes.CDNRoute("/foo/bar", set())
def test_any_formats_results_in_no_error(self):
routes.CDNRoute("/foo/bar", {"do", "ray", "me"})
def test_formats_converted_to_frozenset(self):
route = routes.CDNRoute("/foo/bar", {"i", "really", "like", "cats"})
assert isinstance(route.valid_formats, frozenset)
assert route.valid_formats == {"i", "really", "like", "cats"}
def test_formats_converted_to_lower(self):
route = routes.CDNRoute("/foo/bar", {"FOO", "BaR", "bAz", "bork"})
assert route.valid_formats == {"foo", "bar", "baz", "bork"}
def test_eq_operator__considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert route1 == route2
assert route1 == route3
assert route1 != route4
assert route2 == route3
assert route2 != route4
assert route3 != route4
def test_hash_operator_considers_path_template_only(self):
route1 = routes.CDNRoute("/foo/bar", {"hello", "world"}, sizable=False)
route2 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=True)
route3 = routes.CDNRoute("/foo/bar", {"i", "said", "meow"}, sizable=False)
route4 = routes.CDNRoute("/foo/bar/baz", {"i", "said", "meow"}, sizable=True)
assert hash(route1) == hash(route2)
assert hash(route1) == hash(route3)
assert hash(route1) != hash(route4)
assert hash(route2) == hash(route3)
assert hash(route2) != hash(route4)
assert hash(route3) != hash(route4)
@pytest.mark.parametrize(
("input_file_format", "expected_file_format"),
[
("jpg", "jpg"),
("JPG", "jpg"),
("png", "png"),
("PNG", "png"),
],
)
def test_compile_uses_lowercase_file_format_always(self, input_file_format, expected_file_format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
compiled_url = route.compile("http://example.com", file_format=input_file_format)
assert compiled_url.endswith(f".{expected_file_format}"), f"compiled_url={compiled_url}"
def test_disallowed_file_format_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif")
def test_allowed_file_format_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg"}, sizable=False)
route.compile("http://example.com", file_format="png")
def test_requesting_gif_on_non_animated_hash_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="gif", hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_non_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="boooob")
@pytest.mark.parametrize("format", ["png", "jpg", "webp"])
def test_requesting_non_gif_on_animated_hash_does_not_raise_TypeError(self, format):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "webp", "gif"}, sizable=False)
route.compile("http://example.com", file_format=format, hash="a_boooob")
def test_requesting_gif_on_animated_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif", hash="a_boooob")
def test_requesting_gif_without_passing_hash_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="gif")
def test_passing_size_on_non_sizable_raises_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
with pytest.raises(TypeError):
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=128)
def test_passing_no_size_on_non_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=False)
route.compile("http://example.com", file_format="png", hash="boooob")
def test_passing_no_size_on_sizable_does_not_raise_TypeError(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob")
@pytest.mark.parametrize("size", [*range(17, 32)])
def test_passing_non_power_of_2_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in [1, *range(17, 25)]])
def test_passing_invalid_magnitude_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be an integer power of 2 between 16 and 4096 inclusive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [*range(-10, 0)])
def test_passing_negative_sizes_to_sizable_raises_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "png"}, sizable=True)
with pytest.raises(ValueError, match="size must be positive"):
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
@pytest.mark.parametrize("size", [int(2 ** size) for size in range(4, 13)])
def test_passing_valid_sizes_to_sizable_does_not_raise_ValueError(self, size):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
route.compile("http://example.com", file_format="png", hash="boooob", size=size)
def test_passing_size_adds_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=128)
assert compiled_url.endswith(".png?size=128"), f"compiled_url={compiled_url}"
def test_passing_None_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob", size=None)
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
def test_passing_no_size_does_not_add_query_string(self):
route = routes.CDNRoute("/foo/bar", {"png", "jpg", "gif"}, sizable=True)
compiled_url = route.compile("http://example.com", file_format="png", hash="boooob")
assert "?size=" not in compiled_url, f"compiled_url={compiled_url}"
@pytest.mark.parametrize(
("base_url", "template", "format", "size_kwds", "foo", "bar", "expected_url"),
[
(
"http://example.com",
"/{foo}/{bar}",
"PNG",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.png?size=128",
),
(
"http://example.com",
"/{foo}/bar",
"jpg",
{"size": 128},
"baz",
"bork qux",
"http://example.com/baz/bar.jpg?size=128",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{"size": None},
"baz",
123456,
"http://example.com/baz/123456.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{"size": None},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
(
"http://example.com",
"/{foo}/{bar}",
"WEBP",
{},
"baz",
"bork qux",
"http://example.com/baz/bork%20qux.webp",
),
(
"http://example.com",
"/{foo}/bar",
"GIF",
{},
"baz",
"bork qux",
"http://example.com/baz/bar.gif",
),
],
)
def test_compile_generates_expected_url(self, base_url, template, format, size_kwds, foo, bar, expected_url):
route = routes.CDNRoute(template, {"png", "gif", "jpg", "webp"}, sizable=True)
actual_url = route.compile(base_url=base_url, file_format=format, foo=foo, bar=bar, **size_kwds)
assert actual_url == expected_url
@pytest.mark.parametrize("format", ["png", "jpg"])
@pytest.mark.parametrize("size", [64, 256, 2048])
def test_compile_to_file_calls_compile(self, format, size):
with mock.patch.object(files, "URL", autospec=files.URL):
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile)
route.compile_to_file("https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb")
route.compile.assert_called_once_with(
"https://blep.com", file_format=format, size=size, boop="oyy lumo", nya="weeb"
)
def test_compile_to_file_passes_compile_result_to_URL_and_returns_constructed_url(self):
resultant_url_str = "http://blep.com/hello/world/weeb/oyy%20lumo"
resultant_url = files.URL("http://blep.com/hello/world/weeb/oyy%20lumo")
with mock.patch.object(files, "URL", autospec=files.URL, return_value=resultant_url) as URL:
route = hikari_test_helpers.mock_class_namespace(routes.CDNRoute, slots_=False)(
"/hello/world/{nya}/{boop}", {"png", "jpg"}, sizable=True
)
route.compile = mock.Mock(spec_set=route.compile, return_value=resultant_url_str)
result = route.compile_to_file("https://blep.com", file_format="png", size=64, boop="oyy lumo", nya="weeb")
URL.assert_called_once_with(resultant_url_str)
assert result is resultant_url
| true
| true
|
f70872dd11437754839b9ea7a1f1e156077dfca1
| 3,023
|
py
|
Python
|
linked-list/linked_list.py
|
souparnabose99/data-structures-python
|
a98dd261644a59438fb75d7dc2b21cf159d5c41b
|
[
"MIT"
] | null | null | null |
linked-list/linked_list.py
|
souparnabose99/data-structures-python
|
a98dd261644a59438fb75d7dc2b21cf159d5c41b
|
[
"MIT"
] | null | null | null |
linked-list/linked_list.py
|
souparnabose99/data-structures-python
|
a98dd261644a59438fb75d7dc2b21cf159d5c41b
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.no_of_nodes = 0
# O(1) for insertion at the start of LL
def insert_at_start(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
# O(N) for insertion at the end of LL
def insert_at_end(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
actual_node = self.head
while actual_node.next_node is not None:
actual_node = actual_node.next_node
actual_node.next_node = new_node
# O(1)
def size_of_ll(self):
return self.no_of_nodes
# O(N)
def traverse_ll(self):
actual_node = self.head
while actual_node is not None:
print(actual_node.data)
actual_node = actual_node.next_node
def remove_from_ll(self, data):
if self.head is None:
return
actual_node = self.head
previous_node = None
while actual_node is not None and actual_node.data != data:
previous_node = actual_node
actual_node = actual_node.next_node
# Item not present in Linked List
if actual_node is None:
return
# Decrease node count for deletion
self.no_of_nodes = self.no_of_nodes - 1
if previous_node is None:
self.head = actual_node.next_node
else:
previous_node.next_node = actual_node.next_node
# O(N) runtime complexity
def find_middle_node(self):
fast_pointer = self.head
slow_pointer = self.head
while fast_pointer.next_node and fast_pointer.next_node.next_node:
fast_pointer = fast_pointer.next_node.next_node
slow_pointer = slow_pointer.next_node
return slow_pointer.data
#O(N) runtime complexity
def reverse_ll_in_place(self):
previous_node = None
current_node = self.head
next_node = None
while current_node is not None:
next_node = current_node.next_node
current_node.next_node = previous_node
previous_node = current_node
current_node = next_node
self.head = previous_node
return
ll = LinkedList()
ll.insert_at_start(15)
ll.insert_at_start(8)
ll.insert_at_start(5)
ll.insert_at_end(6)
ll.insert_at_end(76)
ll.insert_at_end(43)
ll.insert_at_start("Yo")
ll.traverse_ll()
print("Size : ", ll.size_of_ll())
# ll.remove_from_ll(8)
print("---------")
# ll.traverse_ll()
print("Size : ", ll.size_of_ll())
print(ll.find_middle_node())
ll.reverse_ll_in_place()
ll.traverse_ll()
| 26.752212
| 75
| 0.604036
|
class Node:
def __init__(self, data):
self.data = data
self.next_node = None
class LinkedList:
def __init__(self):
self.head = None
self.no_of_nodes = 0
def insert_at_start(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
new_node.next_node = self.head
self.head = new_node
def insert_at_end(self, data):
self.no_of_nodes = self.no_of_nodes + 1
new_node = Node(data)
actual_node = self.head
while actual_node.next_node is not None:
actual_node = actual_node.next_node
actual_node.next_node = new_node
def size_of_ll(self):
return self.no_of_nodes
def traverse_ll(self):
actual_node = self.head
while actual_node is not None:
print(actual_node.data)
actual_node = actual_node.next_node
def remove_from_ll(self, data):
if self.head is None:
return
actual_node = self.head
previous_node = None
while actual_node is not None and actual_node.data != data:
previous_node = actual_node
actual_node = actual_node.next_node
if actual_node is None:
return
self.no_of_nodes = self.no_of_nodes - 1
if previous_node is None:
self.head = actual_node.next_node
else:
previous_node.next_node = actual_node.next_node
def find_middle_node(self):
fast_pointer = self.head
slow_pointer = self.head
while fast_pointer.next_node and fast_pointer.next_node.next_node:
fast_pointer = fast_pointer.next_node.next_node
slow_pointer = slow_pointer.next_node
return slow_pointer.data
def reverse_ll_in_place(self):
previous_node = None
current_node = self.head
next_node = None
while current_node is not None:
next_node = current_node.next_node
current_node.next_node = previous_node
previous_node = current_node
current_node = next_node
self.head = previous_node
return
ll = LinkedList()
ll.insert_at_start(15)
ll.insert_at_start(8)
ll.insert_at_start(5)
ll.insert_at_end(6)
ll.insert_at_end(76)
ll.insert_at_end(43)
ll.insert_at_start("Yo")
ll.traverse_ll()
print("Size : ", ll.size_of_ll())
print("---------")
print("Size : ", ll.size_of_ll())
print(ll.find_middle_node())
ll.reverse_ll_in_place()
ll.traverse_ll()
| true
| true
|
f70873fcb4f81fac7415f87caf48b171944f2b25
| 4,959
|
py
|
Python
|
src/reporter/query_NTNENA.py
|
cnoelle/ngsi-timeseries-api
|
77ed420c0a7532bcc13d941c0402f457cc40407a
|
[
"MIT"
] | null | null | null |
src/reporter/query_NTNENA.py
|
cnoelle/ngsi-timeseries-api
|
77ed420c0a7532bcc13d941c0402f457cc40407a
|
[
"MIT"
] | null | null | null |
src/reporter/query_NTNENA.py
|
cnoelle/ngsi-timeseries-api
|
77ed420c0a7532bcc13d941c0402f457cc40407a
|
[
"MIT"
] | null | null | null |
from exceptions.exceptions import NGSIUsageError
from utils.jsondict import lookup_string_match
from flask import request
from reporter.reporter import _validate_query_params
from translators.crate import CrateTranslatorInstance
import logging
from .geo_query_handler import handle_geo_query
def query_NTNENA(id_=None, # In Query
attrs=None,
type_=None,
aggr_method=None,
aggr_period=None,
aggr_scope=None,
options=None,
from_date=None,
to_date=None,
last_n=None,
limit=10000,
offset=0,
georel=None,
geometry=None,
coords=None):
"""
See /v2/attrs in API Specification
quantumleap.yml
"""
r, c = _validate_query_params(attrs, aggr_period, aggr_method, aggr_scope,
options)
if c != 200:
return r, c
r, c, geo_query = handle_geo_query(georel, geometry, coords)
if r:
return r, c
if attrs is not None:
attrs = attrs.split(',')
fiware_s = request.headers.get('fiware-service', None)
fiware_sp = request.headers.get('fiware-servicepath', None)
entities = None
entity_ids = None
if id_:
entity_ids = [s.strip() for s in id_.split(',') if s]
try:
with CrateTranslatorInstance() as trans:
entities = trans.query(attr_names=attrs,
entity_type=type_,
entity_ids=entity_ids,
aggr_method=aggr_method,
aggr_period=aggr_period,
aggr_scope=aggr_scope,
from_date=from_date,
to_date=to_date,
last_n=last_n,
limit=limit,
offset=offset,
fiware_service=fiware_s,
fiware_servicepath=fiware_sp,
geo_query=geo_query)
except NGSIUsageError as e:
msg = "Bad Request Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 400
except Exception as e:
msg = "Something went wrong with QL. Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 500
attributes = []
entries = []
attrs_names = []
attrs_values = []
ignore = ('id', 'index', 'type')
if entities:
for e in entities:
attrs = [at for at in sorted(e.keys()) if at not in ignore]
for at in attrs:
if at not in attrs_names:
attrs_names.append(at)
for at in attrs_names:
entity_type = []
entity_types = []
entity_value = []
for e in entities:
matched_attr = lookup_string_match(e, at)
if matched_attr is not None:
index = [from_date or '', to_date or ''] if aggr_method and not aggr_period else e['index']
entity = {
'entityId': e['id'],
'index': index,
'values': matched_attr['values'] if matched_attr else [],
}
if e['type'] not in entity_types:
entity_value = []
entity_value.append(entity)
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
entity_types.append(e['type'])
else:
entity_value.append(entity)
entity_type.pop()
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
attrs_value = {
'attrName': at,
'types': entity_type
}
attrs_values.append(attrs_value)
res = {
'attrs': attrs_values
}
return res
r = {
"error": "Not Found",
"description": "No records were found for such query."
}
return r, 404
def query_NTNENA_value(*args, **kwargs):
res = query_NTNENA(*args, **kwargs)
if isinstance(res, dict):
res['values'] = res['attrs']
res.pop('attrs', None)
return res
| 35.676259
| 111
| 0.461182
|
from exceptions.exceptions import NGSIUsageError
from utils.jsondict import lookup_string_match
from flask import request
from reporter.reporter import _validate_query_params
from translators.crate import CrateTranslatorInstance
import logging
from .geo_query_handler import handle_geo_query
def query_NTNENA(id_=None,
attrs=None,
type_=None,
aggr_method=None,
aggr_period=None,
aggr_scope=None,
options=None,
from_date=None,
to_date=None,
last_n=None,
limit=10000,
offset=0,
georel=None,
geometry=None,
coords=None):
r, c = _validate_query_params(attrs, aggr_period, aggr_method, aggr_scope,
options)
if c != 200:
return r, c
r, c, geo_query = handle_geo_query(georel, geometry, coords)
if r:
return r, c
if attrs is not None:
attrs = attrs.split(',')
fiware_s = request.headers.get('fiware-service', None)
fiware_sp = request.headers.get('fiware-servicepath', None)
entities = None
entity_ids = None
if id_:
entity_ids = [s.strip() for s in id_.split(',') if s]
try:
with CrateTranslatorInstance() as trans:
entities = trans.query(attr_names=attrs,
entity_type=type_,
entity_ids=entity_ids,
aggr_method=aggr_method,
aggr_period=aggr_period,
aggr_scope=aggr_scope,
from_date=from_date,
to_date=to_date,
last_n=last_n,
limit=limit,
offset=offset,
fiware_service=fiware_s,
fiware_servicepath=fiware_sp,
geo_query=geo_query)
except NGSIUsageError as e:
msg = "Bad Request Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 400
except Exception as e:
msg = "Something went wrong with QL. Error: {}".format(e)
logging.getLogger().error(msg, exc_info=True)
return msg, 500
attributes = []
entries = []
attrs_names = []
attrs_values = []
ignore = ('id', 'index', 'type')
if entities:
for e in entities:
attrs = [at for at in sorted(e.keys()) if at not in ignore]
for at in attrs:
if at not in attrs_names:
attrs_names.append(at)
for at in attrs_names:
entity_type = []
entity_types = []
entity_value = []
for e in entities:
matched_attr = lookup_string_match(e, at)
if matched_attr is not None:
index = [from_date or '', to_date or ''] if aggr_method and not aggr_period else e['index']
entity = {
'entityId': e['id'],
'index': index,
'values': matched_attr['values'] if matched_attr else [],
}
if e['type'] not in entity_types:
entity_value = []
entity_value.append(entity)
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
entity_types.append(e['type'])
else:
entity_value.append(entity)
entity_type.pop()
entity_ty = {
'entityType': e['type'],
'entities': entity_value
}
entity_type.append(entity_ty)
attrs_value = {
'attrName': at,
'types': entity_type
}
attrs_values.append(attrs_value)
res = {
'attrs': attrs_values
}
return res
r = {
"error": "Not Found",
"description": "No records were found for such query."
}
return r, 404
def query_NTNENA_value(*args, **kwargs):
res = query_NTNENA(*args, **kwargs)
if isinstance(res, dict):
res['values'] = res['attrs']
res.pop('attrs', None)
return res
| true
| true
|
f708740d1be7649c7d1e4311e0c6417e165b7497
| 5,009
|
py
|
Python
|
lab4/text_recognizer/models/line_cnn.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | 1
|
2021-03-16T11:00:42.000Z
|
2021-03-16T11:00:42.000Z
|
lab4/text_recognizer/models/line_cnn.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab4/text_recognizer/models/line_cnn.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
CONV_DIM = 64
FC_DIM = 128
WINDOW_WIDTH = 28
WINDOW_STRIDE = 28
class ConvBlock(nn.Module):
"""
Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.
"""
def __init__(self, input_channels: int, output_channels: int, kernel_size: int = 3, stride: int = 1) -> None:
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=1)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
of dimensions (B, C, H, W)
Returns
-------
torch.Tensor
of dimensions (B, C, H, W)
"""
c = self.conv(x)
r = self.relu(c)
return r
class LineCNN(nn.Module):
"""
Model that uses a simple CNN to process an image of a line of characters with a window, outputting a sequence of logits.
"""
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.data_config = data_config
self.args = vars(args) if args is not None else {}
self.num_classes = len(data_config["mapping"])
self.output_length = data_config["output_dims"][0]
self.limit_output_length = self.args.get("limit_output_length", False)
_C, H, _W = data_config["input_dims"]
conv_dim = self.args.get("conv_dim", CONV_DIM)
fc_dim = self.args.get("fc_dim", FC_DIM)
self.WW = self.args.get("window_width", WINDOW_WIDTH)
self.WS = self.args.get("window_stride", WINDOW_STRIDE)
# Input is (1, H, W)
self.conv1 = ConvBlock(1, conv_dim)
self.conv2 = ConvBlock(conv_dim, conv_dim)
self.conv3 = ConvBlock(conv_dim, conv_dim, stride=2)
# Conv math! https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
# OW = torch.floor((W // 2 - WW // 2) + 1)
self.conv4 = ConvBlock(conv_dim, fc_dim, kernel_size=(H // 2, self.WW // 2), stride=(H // 2, self.WS // 2))
self.dropout = nn.Dropout(0.25)
self.fc1 = nn.Linear(fc_dim, fc_dim)
self.fc2 = nn.Linear(fc_dim, self.num_classes)
self._init_weights()
def _init_weights(self):
"""
A better weight initialization scheme than PyTorch default.
See https://github.com/pytorch/pytorch/issues/18182
"""
for m in self.modules():
if type(m) in {
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
nn.Linear,
}:
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
_fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / math.sqrt(fan_out)
nn.init.normal_(m.bias, -bound, bound)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
(B, 1, H, W) input image
Returns
-------
torch.Tensor
(B, C, S) logits, where S is the length of the sequence and C is the number of classes
S can be computed from W and self.window_width
C is self.num_classes
"""
_B, _C, _H, W = x.shape
x = self.conv1(x) # -> (B, CONV_DIM, H, W)
x = self.conv2(x) # -> (B, CONV_DIM, H, W)
x = self.conv3(x) # -> (B, CONV_DIM, H//2, W//2)
OW = math.floor((W // 2 + 2 - self.WW // 2) / (self.WS // 2) + 1)
x = self.conv4(x) # -> (B, FC_DIM, 1, OW)
assert x.shape[-1] == OW
x = x.squeeze().permute(0, 2, 1) # -> (B, OW, FC_DIM)
x = F.relu(self.fc1(x)) # -> (B, OW, FC_DIM)
x = self.dropout(x)
x = self.fc2(x) # -> (B, OW, self.C)
x = x.permute(0, 2, 1) # -> (B, self.C, OW)
if self.limit_output_length:
x = x[:, :, : self.output_length]
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--conv_dim", type=int, default=CONV_DIM)
parser.add_argument("--fc_dim", type=int, default=FC_DIM)
parser.add_argument(
"--window_width",
type=int,
default=WINDOW_WIDTH,
help="Width of the window that will slide over the input image.",
)
parser.add_argument(
"--window_stride",
type=int,
default=WINDOW_STRIDE,
help="Stride of the window that will slide over the input image.",
)
parser.add_argument("--limit_output_length", action="store_true", default=False)
return parser
| 34.544828
| 124
| 0.5556
|
from typing import Any, Dict
import argparse
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
CONV_DIM = 64
FC_DIM = 128
WINDOW_WIDTH = 28
WINDOW_STRIDE = 28
class ConvBlock(nn.Module):
def __init__(self, input_channels: int, output_channels: int, kernel_size: int = 3, stride: int = 1) -> None:
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=stride, padding=1)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
c = self.conv(x)
r = self.relu(c)
return r
class LineCNN(nn.Module):
def __init__(
self,
data_config: Dict[str, Any],
args: argparse.Namespace = None,
) -> None:
super().__init__()
self.data_config = data_config
self.args = vars(args) if args is not None else {}
self.num_classes = len(data_config["mapping"])
self.output_length = data_config["output_dims"][0]
self.limit_output_length = self.args.get("limit_output_length", False)
_C, H, _W = data_config["input_dims"]
conv_dim = self.args.get("conv_dim", CONV_DIM)
fc_dim = self.args.get("fc_dim", FC_DIM)
self.WW = self.args.get("window_width", WINDOW_WIDTH)
self.WS = self.args.get("window_stride", WINDOW_STRIDE)
self.conv1 = ConvBlock(1, conv_dim)
self.conv2 = ConvBlock(conv_dim, conv_dim)
self.conv3 = ConvBlock(conv_dim, conv_dim, stride=2)
self.conv4 = ConvBlock(conv_dim, fc_dim, kernel_size=(H // 2, self.WW // 2), stride=(H // 2, self.WS // 2))
self.dropout = nn.Dropout(0.25)
self.fc1 = nn.Linear(fc_dim, fc_dim)
self.fc2 = nn.Linear(fc_dim, self.num_classes)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if type(m) in {
nn.Conv2d,
nn.Conv3d,
nn.ConvTranspose2d,
nn.ConvTranspose3d,
nn.Linear,
}:
nn.init.kaiming_normal_(m.weight.data, a=0, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
_fan_in, fan_out = nn.init._calculate_fan_in_and_fan_out(m.weight.data)
bound = 1 / math.sqrt(fan_out)
nn.init.normal_(m.bias, -bound, bound)
def forward(self, x: torch.Tensor) -> torch.Tensor:
_B, _C, _H, W = x.shape
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
OW = math.floor((W // 2 + 2 - self.WW // 2) / (self.WS // 2) + 1)
x = self.conv4(x)
assert x.shape[-1] == OW
x = x.squeeze().permute(0, 2, 1)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
x = x.permute(0, 2, 1)
if self.limit_output_length:
x = x[:, :, : self.output_length]
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--conv_dim", type=int, default=CONV_DIM)
parser.add_argument("--fc_dim", type=int, default=FC_DIM)
parser.add_argument(
"--window_width",
type=int,
default=WINDOW_WIDTH,
help="Width of the window that will slide over the input image.",
)
parser.add_argument(
"--window_stride",
type=int,
default=WINDOW_STRIDE,
help="Stride of the window that will slide over the input image.",
)
parser.add_argument("--limit_output_length", action="store_true", default=False)
return parser
| true
| true
|
f708744ed8de3fca2afaf6f8806fb9da1e654edb
| 3,110
|
py
|
Python
|
murano-7.0.0/contrib/plugins/cloudify_plugin/murano_cloudify_plugin/cloudify_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 91
|
2015-04-26T16:05:03.000Z
|
2021-12-28T07:12:33.000Z
|
murano-7.0.0/contrib/plugins/cloudify_plugin/murano_cloudify_plugin/cloudify_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
murano-7.0.0/contrib/plugins/cloudify_plugin/murano_cloudify_plugin/cloudify_client.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 61
|
2015-05-19T22:56:34.000Z
|
2021-06-01T05:38:53.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import cloudify_rest_client
import cloudify_rest_client.exceptions as cloudify_exceptions
from murano.dsl import dsl
from oslo_config import cfg as config
from yaql.language import specs
from yaql.language import yaqltypes
import cfg
CONF = config.CONF
archive_upload_lock = threading.Lock()
class CloudifyClient(object):
@specs.parameter('app', dsl.MuranoObjectParameter('io.murano.Application'))
def __init__(self, app):
cloudify_manager = self.CONF.cloudify_manager
self._client = cloudify_rest_client.CloudifyClient(cloudify_manager)
self._blueprint_id = '{0}-{1}'.format(app.type.name, app.type.version)
self._deployment_id = app.id
self._application_package = app.package
@specs.parameter('entry_point', yaqltypes.String())
def publish_blueprint(self, entry_point):
global archive_upload_lock
if self._check_blueprint_exists():
return
path = self._application_package.get_resource(entry_point)
with archive_upload_lock:
try:
self._client.blueprints.upload(
path, self._blueprint_id)
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code != 409:
raise
def _check_blueprint_exists(self):
try:
self._client.blueprints.get(self._blueprint_id)
return True
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code == 404:
return False
raise
@specs.parameter('parameters', dict)
def create_deployment(self, parameters=None):
self._client.deployments.create(
self._blueprint_id, self._deployment_id, parameters)
def delete_deployment(self):
self._client.deployments.delete(self._deployment_id)
def wait_deployment_ready(self):
while True:
executions = self._client.executions.list(self._deployment_id)
if any(t.status in ('pending', 'started') for t in executions):
time.sleep(3)
else:
deployment = self._client.deployments.get(self._deployment_id)
return deployment.outputs
@specs.parameter('name', yaqltypes.String())
@specs.parameter('parameters', dict)
def execute_workflow(self, name, parameters=None):
self._client.executions.start(self._deployment_id, name, parameters)
@classmethod
def init_plugin(cls):
cls.CONF = cfg.init_config(CONF)
| 34.94382
| 79
| 0.688424
|
import threading
import time
import cloudify_rest_client
import cloudify_rest_client.exceptions as cloudify_exceptions
from murano.dsl import dsl
from oslo_config import cfg as config
from yaql.language import specs
from yaql.language import yaqltypes
import cfg
CONF = config.CONF
archive_upload_lock = threading.Lock()
class CloudifyClient(object):
@specs.parameter('app', dsl.MuranoObjectParameter('io.murano.Application'))
def __init__(self, app):
cloudify_manager = self.CONF.cloudify_manager
self._client = cloudify_rest_client.CloudifyClient(cloudify_manager)
self._blueprint_id = '{0}-{1}'.format(app.type.name, app.type.version)
self._deployment_id = app.id
self._application_package = app.package
@specs.parameter('entry_point', yaqltypes.String())
def publish_blueprint(self, entry_point):
global archive_upload_lock
if self._check_blueprint_exists():
return
path = self._application_package.get_resource(entry_point)
with archive_upload_lock:
try:
self._client.blueprints.upload(
path, self._blueprint_id)
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code != 409:
raise
def _check_blueprint_exists(self):
try:
self._client.blueprints.get(self._blueprint_id)
return True
except cloudify_exceptions.CloudifyClientError as e:
if e.status_code == 404:
return False
raise
@specs.parameter('parameters', dict)
def create_deployment(self, parameters=None):
self._client.deployments.create(
self._blueprint_id, self._deployment_id, parameters)
def delete_deployment(self):
self._client.deployments.delete(self._deployment_id)
def wait_deployment_ready(self):
while True:
executions = self._client.executions.list(self._deployment_id)
if any(t.status in ('pending', 'started') for t in executions):
time.sleep(3)
else:
deployment = self._client.deployments.get(self._deployment_id)
return deployment.outputs
@specs.parameter('name', yaqltypes.String())
@specs.parameter('parameters', dict)
def execute_workflow(self, name, parameters=None):
self._client.executions.start(self._deployment_id, name, parameters)
@classmethod
def init_plugin(cls):
cls.CONF = cfg.init_config(CONF)
| true
| true
|
f708746a2f008b37d6cbf4a6b54de754b18a4b02
| 744
|
py
|
Python
|
get_some_food/users/forms.py
|
asergeenko/get_some_food
|
a9cfc776193287d2f375437420e985961688d6ed
|
[
"MIT"
] | null | null | null |
get_some_food/users/forms.py
|
asergeenko/get_some_food
|
a9cfc776193287d2f375437420e985961688d6ed
|
[
"MIT"
] | null | null | null |
get_some_food/users/forms.py
|
asergeenko/get_some_food
|
a9cfc776193287d2f375437420e985961688d6ed
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import forms as admin_forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class UserChangeForm(admin_forms.UserChangeForm):
class Meta(admin_forms.UserChangeForm.Meta):
model = User
class UserCreationForm(admin_forms.UserCreationForm):
class Meta(admin_forms.UserCreationForm.Meta):
model = User
error_messages = {
"username": {"unique": _("This username has already been taken.")}
}
labels = {
'username': _('Name of the user'),
'password': _('User password'),
'email': _('User email'),
'avatar': _('User avatar')
}
| 28.615385
| 78
| 0.645161
|
from django.contrib.auth import forms as admin_forms
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class UserChangeForm(admin_forms.UserChangeForm):
class Meta(admin_forms.UserChangeForm.Meta):
model = User
class UserCreationForm(admin_forms.UserCreationForm):
class Meta(admin_forms.UserCreationForm.Meta):
model = User
error_messages = {
"username": {"unique": _("This username has already been taken.")}
}
labels = {
'username': _('Name of the user'),
'password': _('User password'),
'email': _('User email'),
'avatar': _('User avatar')
}
| true
| true
|
f7087490f6a7e5fd6864626d44154053d111408a
| 998
|
py
|
Python
|
Keyless Keyed Transpositional Cipher/compare index and list.py
|
AshwinBalaji52/Mobile-Computing-and-Security
|
a0404f0835169f3496f0b8be4ea20f953503b0a0
|
[
"MIT"
] | null | null | null |
Keyless Keyed Transpositional Cipher/compare index and list.py
|
AshwinBalaji52/Mobile-Computing-and-Security
|
a0404f0835169f3496f0b8be4ea20f953503b0a0
|
[
"MIT"
] | null | null | null |
Keyless Keyed Transpositional Cipher/compare index and list.py
|
AshwinBalaji52/Mobile-Computing-and-Security
|
a0404f0835169f3496f0b8be4ea20f953503b0a0
|
[
"MIT"
] | null | null | null |
from random import shuffle
counter=1
#index = None
index = []
#indexlist = []
decrypt_list = []
intermediate = []
words = ['B', 'A', 'L', 'K','J','I']
newwords = words.copy() # Copy words
shuffle(newwords) # Shuffle newwords
for i in range(len(words)):
for j in range(len(newwords)):
if(words[i]==newwords[j]):
index.append(j)
print("Original list: ",words)
#zipped_lists = zip(index, newwords)
#print(zipped_lists)
'''
sorted_zipped_lists = sorted(zipped_lists)
decrypt_list = [element for _, element in sorted_zipped_lists]
'''
print("Index: ",index)
print("New list: ",newwords)
#print("Decrypted List :", decrypt_list)
for i in range(len(newwords)):
intermediate.append((i,newwords[i]))
print(intermediate)
res = [tuple for x in index for tuple in intermediate if tuple[0] == x]
#print(res)
for i in res:
tuples = i
alphabet = tuples[1]
decrypt_list.append(alphabet)
print(res)
print(decrypt_list)
| 22.681818
| 72
| 0.643287
|
from random import shuffle
counter=1
index = []
decrypt_list = []
intermediate = []
words = ['B', 'A', 'L', 'K','J','I']
newwords = words.copy()
shuffle(newwords)
for i in range(len(words)):
for j in range(len(newwords)):
if(words[i]==newwords[j]):
index.append(j)
print("Original list: ",words)
print("Index: ",index)
print("New list: ",newwords)
for i in range(len(newwords)):
intermediate.append((i,newwords[i]))
print(intermediate)
res = [tuple for x in index for tuple in intermediate if tuple[0] == x]
for i in res:
tuples = i
alphabet = tuples[1]
decrypt_list.append(alphabet)
print(res)
print(decrypt_list)
| true
| true
|
f70875724f8a4b9bfc1141e67d5af7e864ca6a2f
| 637
|
py
|
Python
|
data/compute_rates.py
|
addschile/qtps
|
3220af82d409526463dc4fe9e4ea869d655c0bd8
|
[
"MIT"
] | null | null | null |
data/compute_rates.py
|
addschile/qtps
|
3220af82d409526463dc4fe9e4ea869d655c0bd8
|
[
"MIT"
] | null | null | null |
data/compute_rates.py
|
addschile/qtps
|
3220af82d409526463dc4fe9e4ea869d655c0bd8
|
[
"MIT"
] | null | null | null |
import numpy as np
from sys import argv
tobs = int(argv[1])
p0 = np.zeros(10)
p2 = np.zeros(10)
p1 = np.zeros(10)
Zab = np.zeros(10)
rate = np.zeros(10)
for i in range(10):
da = np.loadtxt('tobs%d/reweighted_hist_%d.dat'%(tobs,i))
p0[i] = np.exp(-da[-2,1])
p2[i] = np.exp(-da[-1,1])
p1[i] = np.exp(-da[-3,1])
Zab = p1/(p0+p2)
f = open('tobs%d/path_partition_function_%d.dat'%(tobs,tobs),'w')
for i in range(10):
f.write('%d %.16f\n'%(i,Zab[i]))
Zab_avg = np.sum(Zab[:])/10.
for i in range(10):
Zab[i] -= Zab_avg
Zab *= Zab
std_err = np.sqrt(np.sum(Zab[:])/10.)
f.write('%.16f %.16f\n'%(Zab_avg,std_err))
f.close()
| 21.965517
| 65
| 0.596546
|
import numpy as np
from sys import argv
tobs = int(argv[1])
p0 = np.zeros(10)
p2 = np.zeros(10)
p1 = np.zeros(10)
Zab = np.zeros(10)
rate = np.zeros(10)
for i in range(10):
da = np.loadtxt('tobs%d/reweighted_hist_%d.dat'%(tobs,i))
p0[i] = np.exp(-da[-2,1])
p2[i] = np.exp(-da[-1,1])
p1[i] = np.exp(-da[-3,1])
Zab = p1/(p0+p2)
f = open('tobs%d/path_partition_function_%d.dat'%(tobs,tobs),'w')
for i in range(10):
f.write('%d %.16f\n'%(i,Zab[i]))
Zab_avg = np.sum(Zab[:])/10.
for i in range(10):
Zab[i] -= Zab_avg
Zab *= Zab
std_err = np.sqrt(np.sum(Zab[:])/10.)
f.write('%.16f %.16f\n'%(Zab_avg,std_err))
f.close()
| true
| true
|
f70876132e6e96ac713b78db6125d8884610491b
| 1,428
|
py
|
Python
|
pythonforandroid/recipes/shapely/__init__.py
|
surbhicis/python-for-android
|
f8472bd3048b72e06ab5defea2f51ffc5c5e7bed
|
[
"MIT"
] | 4
|
2020-05-19T01:49:51.000Z
|
2021-11-08T09:41:05.000Z
|
pythonforandroid/recipes/shapely/__init__.py
|
basharbme/python-for-android
|
f8472bd3048b72e06ab5defea2f51ffc5c5e7bed
|
[
"MIT"
] | 6
|
2020-01-31T18:04:48.000Z
|
2021-06-05T10:53:55.000Z
|
pythonforandroid/recipes/shapely/__init__.py
|
basharbme/python-for-android
|
f8472bd3048b72e06ab5defea2f51ffc5c5e7bed
|
[
"MIT"
] | 8
|
2017-07-20T05:34:04.000Z
|
2021-08-03T08:21:32.000Z
|
from pythonforandroid.recipe import CythonRecipe
from os.path import join
class ShapelyRecipe(CythonRecipe):
version = '1.7a1'
url = 'https://github.com/Toblerity/Shapely/archive/{version}.tar.gz'
depends = ['setuptools', 'libgeos']
# Actually, this recipe seems to compile/install fine for python2, but it
# fails at runtime when importing module with:
# `[Errno 2] No such file or directory`
conflicts = ['python2']
call_hostpython_via_targetpython = False
# Patch to avoid libgeos check (because it fails), insert environment
# variables for our libgeos build (includes, lib paths...) and force
# the cython's compilation to raise an error in case that it fails
patches = ['setup.patch']
# Don't Force Cython
# setup_extra_args = ['sdist']
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ShapelyRecipe, self).get_recipe_env(arch)
libgeos_install = join(self.get_recipe(
'libgeos', self.ctx).get_build_dir(arch.arch), 'install_target')
# All this `GEOS_X` variables should be string types, separated
# by commas in case that we need to pass more than one value
env['GEOS_INCLUDE_DIRS'] = join(libgeos_install, 'include')
env['GEOS_LIBRARY_DIRS'] = join(libgeos_install, 'lib')
env['GEOS_LIBRARIES'] = 'geos_c,geos'
return env
recipe = ShapelyRecipe()
| 35.7
| 77
| 0.686275
|
from pythonforandroid.recipe import CythonRecipe
from os.path import join
class ShapelyRecipe(CythonRecipe):
version = '1.7a1'
url = 'https://github.com/Toblerity/Shapely/archive/{version}.tar.gz'
depends = ['setuptools', 'libgeos']
conflicts = ['python2']
call_hostpython_via_targetpython = False
patches = ['setup.patch']
# Don't Force Cython
def get_recipe_env(self, arch=None, with_flags_in_cc=True):
env = super(ShapelyRecipe, self).get_recipe_env(arch)
libgeos_install = join(self.get_recipe(
'libgeos', self.ctx).get_build_dir(arch.arch), 'install_target')
env['GEOS_INCLUDE_DIRS'] = join(libgeos_install, 'include')
env['GEOS_LIBRARY_DIRS'] = join(libgeos_install, 'lib')
env['GEOS_LIBRARIES'] = 'geos_c,geos'
return env
recipe = ShapelyRecipe()
| true
| true
|
f708761adcd1ea623e38e155f985cfb34bf530d5
| 8,240
|
py
|
Python
|
tools/test.py
|
TillBeemelmanns/OpenPCDet
|
b7553c879d0ba36477931efe07a55adbc39823b9
|
[
"Apache-2.0"
] | null | null | null |
tools/test.py
|
TillBeemelmanns/OpenPCDet
|
b7553c879d0ba36477931efe07a55adbc39823b9
|
[
"Apache-2.0"
] | null | null | null |
tools/test.py
|
TillBeemelmanns/OpenPCDet
|
b7553c879d0ba36477931efe07a55adbc39823b9
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
from tensorboardX import SummaryWriter
import time
import glob
import re
import datetime
import argparse
from pathlib import Path
import torch.distributed as dist
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from eval_utils import eval_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
# load checkpoint
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
# evaluated ckpt record
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
# tensorboard log
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
# check whether there is checkpoint which is not evaluated
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
# start evaluation
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
# record this epoch which has been evaluated
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# log to file
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| 42.916667
| 120
| 0.681917
|
import os
import torch
from tensorboardX import SummaryWriter
import time
import glob
import re
import datetime
import argparse
from pathlib import Path
import torch.distributed as dist
from pcdet.datasets import build_dataloader
from pcdet.models import build_network
from pcdet.utils import common_utils
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from eval_utils import eval_utils
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=16, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=80, required=False, help='Number of epochs to train for')
parser.add_argument('--workers', type=int, default=4, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--mgpus', action='store_true', default=False, help='whether to use multiple gpu')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=30, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--eval_tag', type=str, default='default', help='eval tag for this experiment')
parser.add_argument('--eval_all', action='store_true', default=False, help='whether to evaluate all checkpoints')
parser.add_argument('--ckpt_dir', type=str, default=None, help='specify a ckpt directory to be evaluated if needed')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1])
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=False):
model.load_params_from_file(filename=args.ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
eval_utils.eval_one_epoch(
cfg, model, test_loader, epoch_id, logger, dist_test=dist_test,
result_dir=eval_output_dir, save_to_file=args.save_to_file
)
def get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args):
ckpt_list = glob.glob(os.path.join(ckpt_dir, '*checkpoint_epoch_*.pth'))
ckpt_list.sort(key=os.path.getmtime)
evaluated_ckpt_list = [float(x.strip()) for x in open(ckpt_record_file, 'r').readlines()]
for cur_ckpt in ckpt_list:
num_list = re.findall('checkpoint_epoch_(.*).pth', cur_ckpt)
if num_list.__len__() == 0:
continue
epoch_id = num_list[-1]
if 'optim' in epoch_id:
continue
if float(epoch_id) not in evaluated_ckpt_list and int(float(epoch_id)) >= args.start_epoch:
return epoch_id, cur_ckpt
return -1, None
def repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=False):
ckpt_record_file = eval_output_dir / ('eval_list_%s.txt' % cfg.DATA_CONFIG.DATA_SPLIT['test'])
with open(ckpt_record_file, 'a'):
pass
if cfg.LOCAL_RANK == 0:
tb_log = SummaryWriter(log_dir=str(eval_output_dir / ('tensorboard_%s' % cfg.DATA_CONFIG.DATA_SPLIT['test'])))
total_time = 0
first_eval = True
while True:
cur_epoch_id, cur_ckpt = get_no_evaluated_ckpt(ckpt_dir, ckpt_record_file, args)
if cur_epoch_id == -1 or int(float(cur_epoch_id)) < args.start_epoch:
wait_second = 30
if cfg.LOCAL_RANK == 0:
print('Wait %s seconds for next check (progress: %.1f / %d minutes): %s \r'
% (wait_second, total_time * 1.0 / 60, args.max_waiting_mins, ckpt_dir), end='', flush=True)
time.sleep(wait_second)
total_time += 30
if total_time > args.max_waiting_mins * 60 and (first_eval is False):
break
continue
total_time = 0
first_eval = False
model.load_params_from_file(filename=cur_ckpt, logger=logger, to_cpu=dist_test)
model.cuda()
cur_result_dir = eval_output_dir / ('epoch_%s' % cur_epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
tb_dict = eval_utils.eval_one_epoch(
cfg, model, test_loader, cur_epoch_id, logger, dist_test=dist_test,
result_dir=cur_result_dir, save_to_file=args.save_to_file
)
if cfg.LOCAL_RANK == 0:
for key, val in tb_dict.items():
tb_log.add_scalar(key, val, cur_epoch_id)
with open(ckpt_record_file, 'a') as f:
print('%s' % cur_epoch_id, file=f)
logger.info('Epoch %s has been evaluated' % cur_epoch_id)
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_test = False
else:
args.batch_size, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.batch_size, args.tcp_port, args.local_rank, backend='nccl'
)
dist_test = True
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
output_dir.mkdir(parents=True, exist_ok=True)
eval_output_dir = output_dir / 'eval'
if not args.eval_all:
num_list = re.findall(r'\d+', args.ckpt) if args.ckpt is not None else []
epoch_id = num_list[-1] if num_list.__len__() > 0 else 'no_number'
eval_output_dir = eval_output_dir / ('epoch_%s' % epoch_id) / cfg.DATA_CONFIG.DATA_SPLIT['test']
else:
eval_output_dir = eval_output_dir / 'eval_all_default'
if args.eval_tag is not None:
eval_output_dir = eval_output_dir / args.eval_tag
eval_output_dir.mkdir(parents=True, exist_ok=True)
log_file = eval_output_dir / ('log_eval_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
logger.info('**********************Start logging**********************')
gpu_list = os.environ['CUDA_VISIBLE_DEVICES'] if 'CUDA_VISIBLE_DEVICES' in os.environ.keys() else 'ALL'
logger.info('CUDA_VISIBLE_DEVICES=%s' % gpu_list)
if dist_test:
total_gpus = dist.get_world_size()
logger.info('total_batch_size: %d' % (total_gpus * args.batch_size))
for key, val in vars(args).items():
logger.info('{:16} {}'.format(key, val))
log_config_to_file(cfg, logger=logger)
ckpt_dir = args.ckpt_dir if args.ckpt_dir is not None else output_dir / 'ckpt'
test_set, test_loader, sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_test, workers=args.workers, logger=logger, training=False
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=test_set)
with torch.no_grad():
if args.eval_all:
repeat_eval_ckpt(model, test_loader, args, eval_output_dir, logger, ckpt_dir, dist_test=dist_test)
else:
eval_single_ckpt(model, test_loader, args, eval_output_dir, logger, epoch_id, dist_test=dist_test)
if __name__ == '__main__':
main()
| true
| true
|
f708766a0136ddf59a2750659fdfb2ffb6f801b9
| 100
|
py
|
Python
|
scripts/factory_methods.py
|
dkorenci/ner_cro
|
86b8040e1f5e92ff89f53f6ca5825b944afa210b
|
[
"Apache-2.0"
] | null | null | null |
scripts/factory_methods.py
|
dkorenci/ner_cro
|
86b8040e1f5e92ff89f53f6ca5825b944afa210b
|
[
"Apache-2.0"
] | null | null | null |
scripts/factory_methods.py
|
dkorenci/ner_cro
|
86b8040e1f5e92ff89f53f6ca5825b944afa210b
|
[
"Apache-2.0"
] | null | null | null |
from scripts.bilstm_tagger import bilstm_tagger
from scripts.bilstm_tagger_model import build_model
| 33.333333
| 51
| 0.9
|
from scripts.bilstm_tagger import bilstm_tagger
from scripts.bilstm_tagger_model import build_model
| true
| true
|
f708774dd84ff1e94a2e5b0a67504f9c44ba42f3
| 55,175
|
py
|
Python
|
env/lib/python3.6/site-packages/pandas/core/panel.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 4
|
2018-11-27T01:35:30.000Z
|
2022-01-27T01:17:11.000Z
|
env/lib/python3.6/site-packages/pandas/core/panel.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 3
|
2020-03-24T15:38:23.000Z
|
2021-02-02T21:44:18.000Z
|
env/lib/python3.6/site-packages/pandas/core/panel.py
|
anthowen/duplify
|
846d01c1b21230937fdf0281b0cf8c0b08a8c24e
|
[
"MIT"
] | 3
|
2019-12-24T18:46:58.000Z
|
2021-09-04T11:57:13.000Z
|
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import warnings
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
_possibly_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.types.missing import notnull
import pandas.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import PandasError, _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.tools.util import cartesian_product
from pandas.util.decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as sparsifying is not
supported for Panel objects and will raise an error.
Convert to SparsePanel
"""
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
_possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
"""
Round each value in Panel to a specified number of decimal places.
.. versionadded:: 0.18.0
Parameters
----------
decimals : int
Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of
positions to the left of the decimal point.
Returns
-------
Panel object
See Also
--------
numpy.around
"""
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of major_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of minor_xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels and is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'),
DataFrames of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two
axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of
items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| 34.965146
| 79
| 0.557698
|
from __future__ import division
import warnings
import numpy as np
from pandas.types.cast import (_infer_dtype_from_scalar,
_possibly_cast_item)
from pandas.types.common import (is_integer, is_list_like,
is_string_like, is_scalar)
from pandas.types.missing import notnull
import pandas.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.core.missing as missing
from pandas import compat
from pandas.compat import (map, zip, range, u, OrderedDict, OrderedDefaultdict)
from pandas.compat.numpy import function as nv
from pandas.core.common import PandasError, _try_sort, _default_index
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.formats.printing import pprint_thing
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.tools.util import cartesian_product
from pandas.util.decorators import (deprecate, Appender)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n%s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels)
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=None):
if names is None:
names = ['time', 'panel']
time, panels = _ensure_like_indices(time, panels)
return MultiIndex.from_arrays([time, panels], sortorder=None, names=names)
class Panel(NDFrame):
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif is_scalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype,
copy=False)
copy = False
else:
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v)
for k, v in compat.iteritems(data)
if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
raxes = [self._extract_axis(self, data, axis=i) if a is None else a
for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items':
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
key = com._apply_if_callable(key, self)
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
def __unicode__(self):
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
pprint_thing(v[0]),
pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
return [self._get_axis(axi)
for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, *args, **kwargs):
raise NotImplementedError("sparsifying is not supported "
"for Panel objects")
def to_excel(self, path, na_rep='', engine=None, **kwargs):
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in self.iteritems():
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
def get_value(self, *args, **kwargs):
nargs = len(args)
nreq = self._AXIS_LEN
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(axes[0], self._info_axis)
if made_bigger:
_possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
key = com._apply_if_callable(key, self)
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError('shape of value must be {0}, shape of given '
'object was {1}'.format(
shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif is_scalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state):
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def round(self, decimals=0, *args, **kwargs):
nv.validate_round(args, kwargs)
if is_integer(decimals):
result = np.apply_along_axis(np.round, 0, self.values)
return self._wrap_result(result, axis=0)
raise TypeError("decimals must be an integer")
def _needs_reindex_multi(self, axes, method, level):
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
axis = self._get_axis_number(axis)
values = self.values
mask = notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif is_scalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError("%s is not supported in combine "
"operation with %s" %
(str(type(other)), str(type(self))))
def _combine_const(self, other, func):
with np.errstate(all='ignore'):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
with np.errstate(all='ignore'):
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
with np.errstate(all='ignore'):
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key):
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key):
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1):
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction
# (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
with np.errstate(all='ignore'):
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError('Panel.{0} does not implement '
'numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
with np.errstate(all='ignore'):
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
# return the construction dictionary for these axes
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None else
kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None else
kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# check if a list of axes was passed in instead as a
# single *args element
if (len(args) == 1 and hasattr(args[0], '__iter__') and
not is_string_like(args[0])):
axes = args[0]
else:
axes = args
if 'axes' in kwargs and axes:
raise TypeError("transpose() got multiple values for "
"keyword argument 'axes'")
elif not axes:
axes = kwargs.pop('axes', ())
return super(Panel, self).transpose(*axes, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast, **kwargs)
def count(self, axis='major'):
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
def shift(self, periods=1, freq=None, axis='major'):
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
return [self._extract_axis(self, data, axis=i, **kwargs)
for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(
self._AXIS_ORDERS[self._AXIS_LEN - len(axes):],
axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__,
cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'], info_axis=0,
stat_axis=1, aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
| true
| true
|
f7087785207517cbb4605c1c30cb4178a14909ce
| 888
|
py
|
Python
|
main/migrations/0002_auto_20200225_1930.py
|
MexsonFernandes/CustomYoloV3
|
0acde7613d3b202859b8bab21b9c3ee5432a61bf
|
[
"MIT"
] | null | null | null |
main/migrations/0002_auto_20200225_1930.py
|
MexsonFernandes/CustomYoloV3
|
0acde7613d3b202859b8bab21b9c3ee5432a61bf
|
[
"MIT"
] | 4
|
2021-06-04T23:17:43.000Z
|
2021-09-22T19:06:48.000Z
|
main/migrations/0002_auto_20200225_1930.py
|
MexsonFernandes/DjanYolo
|
ccdaa3ee45c55b8cc9ff00342d9d44f293e8500c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-02-25 19:30
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='annotationclass',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='objectclassmodel',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| 30.62069
| 125
| 0.650901
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='annotationclass',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.DO_NOTHING, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='objectclassmodel',
name='user',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| true
| true
|
f70877d517388341ba4068c28d5fd24a0f6420ac
| 3,440
|
py
|
Python
|
h5nastran/f06_reader.py
|
EmanueleCannizzaro/h5nastran
|
a4ac2e8e0600332a553048a79393f96bd090b2ea
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2019-09-18T06:37:13.000Z
|
2020-05-26T11:58:03.000Z
|
h5nastran/f06_reader.py
|
EmanueleCannizzaro/h5nastran
|
a4ac2e8e0600332a553048a79393f96bd090b2ea
|
[
"MIT",
"BSD-3-Clause"
] | null | null | null |
h5nastran/f06_reader.py
|
EmanueleCannizzaro/h5nastran
|
a4ac2e8e0600332a553048a79393f96bd090b2ea
|
[
"MIT",
"BSD-3-Clause"
] | 2
|
2018-08-11T16:46:37.000Z
|
2022-03-06T18:19:33.000Z
|
from __future__ import print_function, absolute_import
from six import iteritems, iterkeys, itervalues
from six.moves import range
from _file_reader import FileReader
from f06_table import F06Table
class _DummyTable(object):
def __init__(self):
self.header = []
self.data = []
self.line_number = -1
self.table_format = None
class TableFormat(object):
def __init__(self):
self.header_check = b'D I S P L A C E M E N T V E C T O R'
self.header_check_line = 2
self.header_lines = 5
class F06Reader(object):
def __init__(self, filename):
self.file = FileReader(filename)
self._done_reading = False
self._table_formats = [TableFormat()]
self._current_table = None
self._callback = None
def register_callback(self, callback):
assert callable(callback)
self._callback = callback
def read(self):
while not self._done_reading:
table_lines, line_number = self._read_table()
if self._done_reading:
break
table_format = F06Table.find_table(table_lines)
if table_format is None:
self._process_table(self._current_table)
self._current_table = None
continue
table = table_format()
table.set_data(table_lines)
table.line_number = line_number
for i in range(len(table.header)):
table.header[i] = table.header[i].strip()
if self._current_table is None:
self._current_table = table
else:
if self._current_table.header == table.header:
self._current_table.data.extend(table.data)
else:
self._process_table(self._current_table)
self._current_table = table
if self._current_table is not None:
self._process_table(self._current_table)
self._current_table = None
def _process_table(self, table):
if table is None:
return
pch_table = table.to_punch()
if isinstance(pch_table, (list, tuple)):
for table in pch_table:
self._callback(table)
else:
self._callback(pch_table)
def _read_table(self):
table_lines = []
first_line = self._find_next_table()
if self._done_reading:
return None, None
line_number = self.file.line_number()
while True:
if first_line is not None:
line = first_line
first_line = None
else:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
# print(line)
if line.startswith(b'1'):
break
table_lines.append(line)
return table_lines, line_number
def _find_next_table(self):
while True:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
if line.startswith(b'0') and b'SUBCASE' in line:
return line
return None
def _check_done_reading(self, line):
if line is None or b'END OF JOB' in line:
self._done_reading = True
| 26.259542
| 68
| 0.571512
|
from __future__ import print_function, absolute_import
from six import iteritems, iterkeys, itervalues
from six.moves import range
from _file_reader import FileReader
from f06_table import F06Table
class _DummyTable(object):
def __init__(self):
self.header = []
self.data = []
self.line_number = -1
self.table_format = None
class TableFormat(object):
def __init__(self):
self.header_check = b'D I S P L A C E M E N T V E C T O R'
self.header_check_line = 2
self.header_lines = 5
class F06Reader(object):
def __init__(self, filename):
self.file = FileReader(filename)
self._done_reading = False
self._table_formats = [TableFormat()]
self._current_table = None
self._callback = None
def register_callback(self, callback):
assert callable(callback)
self._callback = callback
def read(self):
while not self._done_reading:
table_lines, line_number = self._read_table()
if self._done_reading:
break
table_format = F06Table.find_table(table_lines)
if table_format is None:
self._process_table(self._current_table)
self._current_table = None
continue
table = table_format()
table.set_data(table_lines)
table.line_number = line_number
for i in range(len(table.header)):
table.header[i] = table.header[i].strip()
if self._current_table is None:
self._current_table = table
else:
if self._current_table.header == table.header:
self._current_table.data.extend(table.data)
else:
self._process_table(self._current_table)
self._current_table = table
if self._current_table is not None:
self._process_table(self._current_table)
self._current_table = None
def _process_table(self, table):
if table is None:
return
pch_table = table.to_punch()
if isinstance(pch_table, (list, tuple)):
for table in pch_table:
self._callback(table)
else:
self._callback(pch_table)
def _read_table(self):
table_lines = []
first_line = self._find_next_table()
if self._done_reading:
return None, None
line_number = self.file.line_number()
while True:
if first_line is not None:
line = first_line
first_line = None
else:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
if line.startswith(b'1'):
break
table_lines.append(line)
return table_lines, line_number
def _find_next_table(self):
while True:
line = self.file.next_line()
self._check_done_reading(line)
if self._done_reading:
break
if line.startswith(b'0') and b'SUBCASE' in line:
return line
return None
def _check_done_reading(self, line):
if line is None or b'END OF JOB' in line:
self._done_reading = True
| true
| true
|
f70878ad9644433e31d9c6e041bb1c6ce2f75b22
| 33,195
|
py
|
Python
|
fastlmm/inference/fastlmm_predictor.py
|
eric-czech/FaST-LMM
|
497ac732f0cb25e328282cff42045afb70a99076
|
[
"Apache-2.0"
] | null | null | null |
fastlmm/inference/fastlmm_predictor.py
|
eric-czech/FaST-LMM
|
497ac732f0cb25e328282cff42045afb70a99076
|
[
"Apache-2.0"
] | null | null | null |
fastlmm/inference/fastlmm_predictor.py
|
eric-czech/FaST-LMM
|
497ac732f0cb25e328282cff42045afb70a99076
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function #Python 2 & 3 compatibility
from __future__ import absolute_import
import numpy as np
import logging
import unittest
import os
import scipy.linalg as LA
import time
from pysnptools.snpreader import Bed,Pheno
from pysnptools.snpreader import SnpData,SnpReader
from pysnptools.kernelreader import KernelNpz
from pysnptools.kernelreader import SnpKernel
from pysnptools.kernelreader import KernelReader
from pysnptools.kernelreader import Identity as KernelIdentity
import pysnptools.util as pstutil
from pysnptools.standardizer import DiagKtoN,UnitTrained
from pysnptools.standardizer import Unit
from pysnptools.util import intersect_apply
from pysnptools.standardizer import Standardizer
from fastlmm.inference.lmm import LMM
from pysnptools.standardizer import Identity as StandardizerIdentity
from scipy.stats import multivariate_normal
from fastlmm.util.pickle_io import load, save
from pysnptools.pstreader import PstReader
from six.moves import range
class _SnpWholeTest(KernelReader):
'''
Warning: Assumes that if train and test contains the same iid, they have the same value.
'''
def __init__(self,train,test,standardizer,block_size,iid0=None):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if iid0 is not None:
_row = iid0
@property
def row(self):
if not hasattr(self,'_row'):
assert np.array_equal(self.train.sid,self.test.sid), "Expect train and test to have same sid in same order"
train_set = set(tuple(item) for item in self.train.iid)
test_unique = [item2 for item2 in (tuple(item) for item in self.test.iid) if item2 not in train_set]
self._row = np.r_[self.train.iid,np.array(test_unique,dtype='str').reshape(-1,2)]
return self._row
@property
def col(self):
return self.test.iid
def __getitem__(self, iid_indexer_and_snp_indexer):
if isinstance(iid_indexer_and_snp_indexer,tuple):
iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
else:
iid0_indexer = iid_indexer_and_snp_indexer
iid1_indexer = iid0_indexer
row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, iid0_indexer)
col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, iid1_indexer)
if row_index_or_none is None:
row_index_or_none = list(range(self.row_count))
assert not isinstance(row_index_or_none,str), "row_index_or_none should not be a string"
iid = self.row[row_index_or_none]
if col_index_or_none is None or np.array_equal(col_index_or_none,list(range(self.col_count))):
test = self.test
else:
test = self.test[col_index_or_none]
try: #case 1: asking for train x test
train = self.train[self.train.iid_to_index(iid),:]
is_ok = True
except:
is_ok = False
if is_ok:
return _SnpTrainTest(train=train,test=test,standardizer=self.standardizer,block_size=self.block_size)
#case 2: asking for train x test
if np.array_equal(test.iid,iid):
return SnpKernel(test,standardizer=self.standardizer,block_size=self.block_size)
#case 3: Just re-reordering the iids
if len(row_index_or_none) == self.row_count and (col_index_or_none is None or len(col_index_or_none) == self.col_count):
result = _SnpWholeTest(train=self.train,test=test,standardizer=self.standardizer,block_size=self.block_size,iid0=iid)
return result
raise Exception("When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test")
#!!! does it make sense to read from disk in to parts?
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
result = self[row_index_or_none,col_index_or_none]._read(row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok)
return result
def __repr__(self):
s = "_SnpWholeTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
#Doesn't need run_once
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
class _SnpTrainTest(KernelReader):
def __init__(self,train,test,standardizer,block_size):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if np.array_equal(train.iid,test.iid):
self._col = train.iid
else:
self._col = test.iid
@property
def row(self):
return self.train.iid
@property
def col(self):
return self._col
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
assert self.train.sid_count == self.test.sid_count, "real assert"
#case 1: asking for all of train x test
if (row_index_or_none is None or np.array_equal(row_index_or_none,np.arange(self.row_count))
and col_index_or_none is None or np.array_equal(col_index_or_none,np.arange(self.col_count))):
#Do all-at-once (not in blocks) if 1. No block size is given or 2. The #ofSNPs < Min(block_size,iid_count) #similar code elsewhere
if self.block_size is None or (self.train.sid_count <= self.block_size or self.train.sid_count <= self.train.iid_count+self.test.iid_count):
train_snps = self.train.read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test.read(dtype=dtype).standardize(self.standardizer)
if order == 'F': #numpy's 'dot' always returns 'C' order
k_val = test_snps.val.dot(train_snps.val.T).T
else:
k_val = train_snps.val.dot(test_snps.val.T)
return k_val
else: #Do in blocks
#Set the default order to 'C' because with kernels any order is fine and the Python .dot method likes 'C' best.
if order=='A':
order = 'C'
k_val = np.zeros([self.train.iid_count,self.test.iid_count],dtype=dtype,order=order)
ct = 0
ts = time.time()
for start in range(0, self.train.sid_count, self.block_size):
ct += self.block_size
train_snps = self.train[:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test [:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
if order == 'F': #numpy's 'dot' always returns 'C' order
k_val += test_snps.val.dot(train_snps.val.T).T
else:
k_val += train_snps.val.dot(test_snps.val.T)
if ct % self.block_size==0:
diff = time.time()-ts
if diff > 1: logging.info("read %s SNPs in %.2f seconds" % (ct, diff))
return k_val
else:
raise Exception("_SnpTrainTest currently only has code for reading all of train x test")
def __repr__(self):
s = "_SnpTrainTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
#Doesn't need run_once
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
def _snps_fixup(snp_input, iid_if_none=None,count_A1=None):
from pysnptools.snpreader import _snps_fixup as pst_snps_fixup
return pst_snps_fixup(snp_input,iid_if_none,count_A1)
def _pheno_fixup(pheno_input, iid_if_none=None, missing ='NaN',count_A1=None):
try:
ret = Pheno(pheno_input, iid_if_none, missing=missing)
ret.iid #doing this just to force file load
return ret
except:
return _snps_fixup(pheno_input, iid_if_none=iid_if_none,count_A1=count_A1)
def _kernel_fixup(input, iid_if_none, standardizer, test=None, test_iid_if_none=None, block_size=None, train_snps=None, count_A1=None):
if test is not None and input is None:
input = test
test = None
if isinstance(input, str) and input.endswith(".npz"):
return KernelNpz(input)
if isinstance(input, str):
input = Bed(input, count_A1=count_A1) #Note that we don't return here. Processing continues
if isinstance(test, str):
test = Bed(test, count_A1=count_A1) #Note that we don't return here. Processing continues
if isinstance(input,SnpReader):
if test is not None:
return _SnpWholeTest(train=train_snps,test=test,standardizer=standardizer,block_size=block_size)
else:
return SnpKernel(input,standardizer=standardizer, block_size=block_size)
if input is None:
return KernelIdentity(iid=iid_if_none,test=test_iid_if_none)
return input
class FastLMM(object):
'''
A predictor, somewhat in the style of scikit-learn, for learning and predicting with linear mixed models.
**Constructor:**
:Parameters: * **GB_goal** (int) -- gigabytes of memory the run should use, optional. If not given, will read the test_snps in blocks the same size as the kernel, which is memory efficient with little overhead on computation time.
* **force_full_rank** (bool) -- Even if kernels are defined with fewer SNPs than IIDs, create an explicit iid_count x iid_count kernel. Cannot be True if force_low_rank is True.
* **force_low_rank** (bool) -- Even if kernels are defined with fewer IIDs than SNPs, create a low-rank iid_count x sid_count kernel. Cannot be True if force_full_rank is True.
* **snp_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to SNP data. Choices include :class:`Standardizer.Unit` (Default. Makes values for each SNP have mean zero and standard deviation 1.0, then fills missing with zero) and :class:`Standardizer.Identity` (Do nothing)
* **covariate_standardizer** (:class:`Standardizer`) -- The PySnpTools standardizer to be apply to X, the covariate data. Some choices include :class:`Standardizer.Unit` (Default. Fills missing with zero) and :class:`Standardizer.Identity` (do nothing)
* **kernel_standardizer** (:class:`KernelStandardizer`) -- The PySnpTools kernel standardizer to be apply to the kernels. Some choices include :class:`KernelStandardizer.DiagKToN` (Default. Make the diagonal sum to iid_count) and :class:`KernelStandardizer.Identity` (Do nothing)
:Example:
>>> from __future__ import print_function #Python 2 & 3 compatibility
>>> import numpy as np
>>> import logging
>>> from pysnptools.snpreader import Bed, Pheno
>>> from fastlmm.inference import FastLMM
>>> logging.basicConfig(level=logging.INFO)
>>> snpreader = Bed('../feature_selection/examples/toydata.bed',count_A1=False)
>>> cov_fn = "../feature_selection/examples/toydata.cov"
>>> pheno_fn = "../feature_selection/examples/toydata.phe"
>>> train_idx = np.r_[10:snpreader.iid_count] # iids 10 and on
>>> test_idx = np.r_[0:10] # the first 10 iids
>>> fastlmm = FastLMM(GB_goal=2)
>>> #We give it phenotype and covariate information for extra examples, but it reorders and intersects the examples, so only training examples are used.
>>> _ = fastlmm.fit(K0_train=snpreader[train_idx,:],X=cov_fn,y=pheno_fn)
>>> mean, covariance = fastlmm.predict(K0_whole_test=snpreader[test_idx,:],X=cov_fn,count_A1=False)
>>> print(list(mean.iid[0]), round(mean.val[0,0],7), round(covariance.val[0,0],7))
['per0', 'per0'] 0.1791958 0.8995209
>>> nll = fastlmm.score(K0_whole_test=snpreader[test_idx,:],X=cov_fn,y=pheno_fn,count_A1=False)
>>> print(round(nll,7))
13.4623234
'''
def __init__(self, GB_goal=None, force_full_rank=False, force_low_rank=False, snp_standardizer=Unit(), covariate_standardizer=Unit(), kernel_standardizer=DiagKtoN()):
self.GB_goal = GB_goal
self.force_full_rank = force_full_rank
self.force_low_rank = force_low_rank
self.snp_standardizer = snp_standardizer
self.covariate_standardizer = covariate_standardizer
self.kernel_standardizer = kernel_standardizer
self.is_fitted = False
#!!!update doc to explain h2raw w.r.t h2
def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2raw=None, mixing=None,count_A1=None):#!!!is this h2 or h2corr????
"""
Method for training a :class:`FastLMM` predictor. If the examples in X, y, K0_train, K1_train are not the same, they will be reordered and intersected.
:param X: training covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__
(such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: training phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__
(such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_train: A similarity matrix or SNPs from which to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__.
If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.
If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or
`KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_train: A second similarity matrix or SNPs from which to construct such a second similarity matrix. (Also, see 'mixing').
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__.
If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_train: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or
`KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param h2raw: A parameter to LMM learning that tells how much weight to give the K's vs. the identity matrix, optional
If not given will search for best value.
If mixing is unspecified, then h2 must also be unspecified.
:type h2raw: number
:param mixing: Weight between 0.0 (inclusive, default) and 1.0 (inclusive) given to K1_train relative to K0_train.
If you give no mixing number and a K1_train is given, the best weight will be learned.
:type mixing: number
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: self, the fitted FastLMM predictor
"""
self.is_fitted = True
# should this have a cache file like 'single_snp'?
#!!!later what happens if missing values in pheno_train?
#!!!later add code so that X, y, etc can be array-like objects without iid information. In that case, make up iid info
assert y is not None, "y must be given"
y = _pheno_fixup(y,count_A1=count_A1)
assert y.sid_count == 1, "Expect y to be just one variable"
X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)
K0_train = _kernel_fixup(K0_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K1_train = _kernel_fixup(K1_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K0_train, K1_train, X, y = intersect_apply([K0_train, K1_train, X, y],intersect_before_standardize=True) #!!! test this on both K's as None
from fastlmm.association.single_snp import _set_block_size
K0_train, K1_train, block_size = _set_block_size(K0_train, K1_train, mixing, self.GB_goal, self.force_full_rank, self.force_low_rank)
X = X.read()
# If possible, unit standardize train and test together. If that is not possible, unit standardize only train and later apply
# the same linear transformation to test. Unit standardization is necessary for FastLMM to work correctly.
#!!!later is the calculation of the training data's stats done twice???
X, covar_unit_trained = X.standardize(self.covariate_standardizer,block_size=block_size,return_trained=True) #This also fills missing with the mean
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.val,np.ones((X.iid_count,1))],
name ="covariate_train w/ 1's")
y0 = y.read().val #!!!later would view_ok=True,order='A' be ok because this code already did a fresh read to look for any missing values
from fastlmm.association.single_snp import _Mixer #!!!move _combine_the_best_way to another file (e.g. this one)
K_train, h2raw, mixer = _Mixer.combine_the_best_way(K0_train,K1_train,X.val,y0,mixing,h2raw,force_full_rank=self.force_full_rank,force_low_rank=self.force_low_rank,kernel_standardizer=self.kernel_standardizer,block_size=block_size)
# do final prediction using lmm.py
lmm = LMM()
#Special case: The K kernel is defined implicitly with SNP data
if mixer.do_g:
assert isinstance(K_train.standardizer,StandardizerIdentity), "Expect Identity standardizer"
G_train = K_train.snpreader
lmm.setG(G0=K_train.snpreader.val)
else:
lmm.setK(K0=K_train.val)
lmm.setX(X.val)
lmm.sety(y0[:,0])
# Find the best h2 and also on covariates (not given from new model)
if h2raw is None:
res = lmm.findH2() #!!!why is REML true in the return???
else:
res = lmm.nLLeval(h2=h2raw)
#We compute sigma2 instead of using res['sigma2'] because res['sigma2'] is only the pure noise.
full_sigma2 = float(sum((np.dot(X.val,res['beta']).reshape(-1,1)-y0)**2))/y.iid_count #!!! this is non REML. Is that right?
###### all references to 'fastlmm_model' should be here so that we don't forget any
self.block_size = block_size
self.beta = res['beta']
self.h2raw = res['h2']
self.sigma2 = full_sigma2
self.U = lmm.U
self.S = lmm.S
self.K = lmm.K
self.G = lmm.G
self.y = lmm.y
self.Uy = lmm.Uy
self.X = lmm.X
self.UX = lmm.UX
self.mixer = mixer
self.covar_unit_trained = covar_unit_trained
self.K_train_iid = K_train.iid
self.covar_sid = X.sid
self.pheno_sid = y.sid
self.G0_train = K0_train.snpreader if isinstance(K0_train,SnpKernel) else None #!!!later expensive?
self.G1_train = K1_train.snpreader if isinstance(K1_train,SnpKernel) else None #!!!later expensive?
return self
@staticmethod
def _new_snp_name(snpreader):
new_snp = "always1"
while True:
if not new_snp in snpreader.sid:
return np.r_[snpreader.sid,[new_snp]]
new_snp += "_"
def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, return_per_iid=False, count_A1=None):
"""
Method for calculating the negative log likelihood of testing examples.
If the examples in X,y, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param y: testing phenotype:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type y: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:param return_mse_too: If true, will also return the mean squared error.
:type return_mse_too: bool
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:param count_A1: If it needs to read SNP data from a BED-formatted file, tells if it should count the number of A1
alleles (the PLINK standard) or the number of A2 alleles. False is the current default, but in the future the default will change to True.
:type count_A1: bool
:rtype: a float of the negative log likelihood and, optionally, a float of the mean squared error.
"""
mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)
y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)
mean, covar, y = intersect_apply([mean0, covar0, y])
mean = mean.read(order='A',view_ok=True).val
covar = covar.read(order='A',view_ok=True).val
y_actual = y.read().val
if not return_per_iid:
var = multivariate_normal(mean=mean.reshape(-1), cov=covar)
nll = -np.log(var.pdf(y_actual.reshape(-1)))
if not return_mse_too:
return nll
else:
mse = ((y_actual-mean)**2).sum()
return nll, mse
else:
if not return_mse_too:
result = SnpData(iid=y.iid,sid=['nLL'],val=np.empty((y.iid_count,1)),name="nLL")
for iid_index in range(y.iid_count):
var = multivariate_normal(mean=mean[iid_index], cov=covar[iid_index,iid_index])
nll = -np.log(var.pdf(y_actual[iid_index]))
result.val[iid_index,0] = nll
return result
else:
raise Exception("need code for mse_too")
def _extract_fixup(kernel):
assert kernel.iid0_count >= kernel.iid1_count, "Expect iid0 to be at least as long as iid1"
def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None, count_A1=None):
"""
Method for predicting from a fitted :class:`FastLMM` predictor.
If the examples in X, K0_whole_test, K1_whole_test are not the same, they will be reordered and intersected.
:param X: testing covariate information, optional:
If you give a string, it should be the file name of a PLINK phenotype-formatted file.
:type X: a PySnpTools `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ (such as `Pheno <http://fastlmm.github.io/PySnpTools/#snpreader-pheno>`__ or `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__) or string.
:param K0_whole_test: A similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K0_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param K1_whole_test: A second similarity matrix from all the examples to the test examples. Alternatively,
the test SNPs needed to construct such a similarity matrix.
Can be any `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__. If you give a string, can be the name of a PLINK-formated Bed file.
Can be PySnpTools `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__. If you give a string it can be the name of a `KernelNpz <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelnpz>`__ file.
:type K1_whole_test: `SnpReader <http://fastlmm.github.io/PySnpTools/#snpreader-snpreader>`__ or a string or `KernelReader <http://fastlmm.github.io/PySnpTools/#kernelreader-kernelreader>`__
:param iid_if_none: Examples to predict for if no X, K0_whole_test, K1_whole_test is provided.
:type iid_if_none: an ndarray of two strings
:rtype: A `SnpData <http://fastlmm.github.io/PySnpTools/#snpreader-snpdata>`__ of the means and a :class:`KernelData` of the covariance
"""
assert self.is_fitted, "Can only predict after predictor has been fitted"
#assert K0_whole_test is not None, "K0_whole_test must be given"
#!!!later is it too wasteful to keep both G0_train, G1_train, and lmm.G when storing to disk?
#!!!later all _kernel_fixup's should use block_size input
K0_whole_test_b = _kernel_fixup(K0_whole_test, train_snps=self.G0_train, iid_if_none=iid_if_none, standardizer=self.mixer.snp_trained0, test=K0_whole_test, test_iid_if_none=None, block_size=self.block_size,count_A1=count_A1)
K1_whole_test = _kernel_fixup(K1_whole_test, train_snps=self.G1_train, iid_if_none=K0_whole_test_b.iid0, standardizer=self.mixer.snp_trained1, test=K1_whole_test, test_iid_if_none=K0_whole_test_b.iid1, block_size=self.block_size,count_A1=count_A1)
X = _pheno_fixup(X,iid_if_none=K0_whole_test_b.iid1,count_A1=count_A1)
K0_whole_test_c, K1_whole_test, X = intersect_apply([K0_whole_test_b, K1_whole_test, X],intersect_before_standardize=True,is_test=True)
X = X.read().standardize(self.covar_unit_trained)
# add a column of 1's to cov to increase DOF of model (and accuracy) by allowing a constant offset
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.read().val,np.ones((X.iid_count,1))])
assert np.array_equal(X.sid,self.covar_sid), "Expect covar sids to be the same in train and test."
train_idx0 = K0_whole_test_c.iid0_to_index(self.K_train_iid)
K0_train_test = K0_whole_test_c[train_idx0,:]
train_idx1 = K1_whole_test.iid0_to_index(self.K_train_iid)
K1_train_test = K1_whole_test[train_idx1,:]
test_idx0 = K0_whole_test_c.iid0_to_index(K0_whole_test_c.iid1)
K0_test_test = K0_whole_test_c[test_idx0,:]
if K0_test_test.iid0 is not K0_test_test.iid1:
raise Exception("real assert")
test_idx1 = K1_whole_test.iid0_to_index(K0_whole_test_c.iid1)
K1_test_test = K1_whole_test[test_idx1,:]
if self.mixer.do_g:
###################################################
# low rank from Rasmussen eq 2.9 + noise term added to covar
###################################################
Gstar = self.mixer.g_mix(K0_train_test,K1_train_test)
varg = self.h2raw * self.sigma2
vare = (1.-self.h2raw) * self.sigma2
Ainv = LA.inv((1./vare) * np.dot(self.G.T,self.G) + (1./varg)*np.eye(self.G.shape[1]))
testAinv = np.dot(Gstar.test.val, Ainv)
pheno_predicted = np.dot(X.val,self.beta) + (1./vare) * np.dot(np.dot(testAinv,self.G.T),self.y-np.dot(self.X,self.beta))
pheno_predicted = pheno_predicted.reshape(-1,1)
covar = np.dot(testAinv,Gstar.test.val.T) + vare * np.eye(Gstar.test.val.shape[0])
else:
lmm = LMM()
lmm.U = self.U
lmm.S = self.S
lmm.G = self.G
lmm.y = self.y
lmm.Uy = self.Uy
lmm.X = self.X
lmm.UX = self.UX
Kstar = self.mixer.k_mix(K0_train_test,K1_train_test) #!!!later do we need/want reads here? how about view_OK?
lmm.setTestData(Xstar=X.val, K0star=Kstar.val.T)
Kstar_star = self.mixer.k_mix(K0_test_test,K1_test_test) #!!!later do we need/want reads here?how about view_OK?
pheno_predicted, covar = lmm.predict_mean_and_variance(beta=self.beta, h2=self.h2raw,sigma2=self.sigma2, Kstar_star=Kstar_star.val)
#pheno_predicted = lmm.predictMean(beta=self.beta, h2=self.h2,scale=self.sigma2).reshape(-1,1)
ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name="lmm Prediction")
from pysnptools.kernelreader import KernelData
ret1 = KernelData(iid=K0_test_test.iid,val=covar)
return ret0, ret1
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| 57.83101
| 323
| 0.674318
|
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import logging
import unittest
import os
import scipy.linalg as LA
import time
from pysnptools.snpreader import Bed,Pheno
from pysnptools.snpreader import SnpData,SnpReader
from pysnptools.kernelreader import KernelNpz
from pysnptools.kernelreader import SnpKernel
from pysnptools.kernelreader import KernelReader
from pysnptools.kernelreader import Identity as KernelIdentity
import pysnptools.util as pstutil
from pysnptools.standardizer import DiagKtoN,UnitTrained
from pysnptools.standardizer import Unit
from pysnptools.util import intersect_apply
from pysnptools.standardizer import Standardizer
from fastlmm.inference.lmm import LMM
from pysnptools.standardizer import Identity as StandardizerIdentity
from scipy.stats import multivariate_normal
from fastlmm.util.pickle_io import load, save
from pysnptools.pstreader import PstReader
from six.moves import range
class _SnpWholeTest(KernelReader):
def __init__(self,train,test,standardizer,block_size,iid0=None):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if iid0 is not None:
_row = iid0
@property
def row(self):
if not hasattr(self,'_row'):
assert np.array_equal(self.train.sid,self.test.sid), "Expect train and test to have same sid in same order"
train_set = set(tuple(item) for item in self.train.iid)
test_unique = [item2 for item2 in (tuple(item) for item in self.test.iid) if item2 not in train_set]
self._row = np.r_[self.train.iid,np.array(test_unique,dtype='str').reshape(-1,2)]
return self._row
@property
def col(self):
return self.test.iid
def __getitem__(self, iid_indexer_and_snp_indexer):
if isinstance(iid_indexer_and_snp_indexer,tuple):
iid0_indexer, iid1_indexer = iid_indexer_and_snp_indexer
else:
iid0_indexer = iid_indexer_and_snp_indexer
iid1_indexer = iid0_indexer
row_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.row_count, iid0_indexer)
col_index_or_none = PstReader._make_sparray_from_sparray_or_slice(self.col_count, iid1_indexer)
if row_index_or_none is None:
row_index_or_none = list(range(self.row_count))
assert not isinstance(row_index_or_none,str), "row_index_or_none should not be a string"
iid = self.row[row_index_or_none]
if col_index_or_none is None or np.array_equal(col_index_or_none,list(range(self.col_count))):
test = self.test
else:
test = self.test[col_index_or_none]
try:
train = self.train[self.train.iid_to_index(iid),:]
is_ok = True
except:
is_ok = False
if is_ok:
return _SnpTrainTest(train=train,test=test,standardizer=self.standardizer,block_size=self.block_size)
if np.array_equal(test.iid,iid):
return SnpKernel(test,standardizer=self.standardizer,block_size=self.block_size)
if len(row_index_or_none) == self.row_count and (col_index_or_none is None or len(col_index_or_none) == self.col_count):
result = _SnpWholeTest(train=self.train,test=test,standardizer=self.standardizer,block_size=self.block_size,iid0=iid)
return result
raise Exception("When reading from a _SnpWholeTest, can only ask to reorder iids or to access from train x test or test x test")
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
result = self[row_index_or_none,col_index_or_none]._read(row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok)
return result
def __repr__(self):
s = "_SnpWholeTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
class _SnpTrainTest(KernelReader):
def __init__(self,train,test,standardizer,block_size):
self.train = train
self.test = test
self.standardizer = standardizer
assert standardizer.is_constant, "Expect standardizer to be constant"
self.block_size = block_size
if np.array_equal(train.iid,test.iid):
self._col = train.iid
else:
self._col = test.iid
@property
def row(self):
return self.train.iid
@property
def col(self):
return self._col
def _read(self, row_index_or_none, col_index_or_none, order, dtype, force_python_only, view_ok):
assert self.train.sid_count == self.test.sid_count, "real assert"
#case 1: asking for all of train x test
if (row_index_or_none is None or np.array_equal(row_index_or_none,np.arange(self.row_count))
and col_index_or_none is None or np.array_equal(col_index_or_none,np.arange(self.col_count))):
#Do all-at-once (not in blocks) if 1. No block size is given or 2. The #ofSNPs < Min(block_size,iid_count) #similar code elsewhere
if self.block_size is None or (self.train.sid_count <= self.block_size or self.train.sid_count <= self.train.iid_count+self.test.iid_count):
train_snps = self.train.read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test.read(dtype=dtype).standardize(self.standardizer)
if order == 'F': #numpy's 'dot' always returns 'C' order
k_val = test_snps.val.dot(train_snps.val.T).T
else:
k_val = train_snps.val.dot(test_snps.val.T)
return k_val
else:
if order=='A':
order = 'C'
k_val = np.zeros([self.train.iid_count,self.test.iid_count],dtype=dtype,order=order)
ct = 0
ts = time.time()
for start in range(0, self.train.sid_count, self.block_size):
ct += self.block_size
train_snps = self.train[:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
test_snps = self.test [:,start:start+self.block_size].read(dtype=dtype).standardize(self.standardizer)
if order == 'F':
k_val += test_snps.val.dot(train_snps.val.T).T
else:
k_val += train_snps.val.dot(test_snps.val.T)
if ct % self.block_size==0:
diff = time.time()-ts
if diff > 1: logging.info("read %s SNPs in %.2f seconds" % (ct, diff))
return k_val
else:
raise Exception("_SnpTrainTest currently only has code for reading all of train x test")
def __repr__(self):
s = "_SnpTrainTest(train={0},test={1},standardizer={2}".format(self.train,self.test,self.standardizer)
if self.block_size is not None:
s += ",block_size={0}".format(self.block_size)
s += ")"
return s
def copyinputs(self, copier):
#Doesn't need run_once
copier.input(self.train)
copier.input(self.test)
copier.input(self.standardizer)
def _snps_fixup(snp_input, iid_if_none=None,count_A1=None):
from pysnptools.snpreader import _snps_fixup as pst_snps_fixup
return pst_snps_fixup(snp_input,iid_if_none,count_A1)
def _pheno_fixup(pheno_input, iid_if_none=None, missing ='NaN',count_A1=None):
try:
ret = Pheno(pheno_input, iid_if_none, missing=missing)
ret.iid
return ret
except:
return _snps_fixup(pheno_input, iid_if_none=iid_if_none,count_A1=count_A1)
def _kernel_fixup(input, iid_if_none, standardizer, test=None, test_iid_if_none=None, block_size=None, train_snps=None, count_A1=None):
if test is not None and input is None:
input = test
test = None
if isinstance(input, str) and input.endswith(".npz"):
return KernelNpz(input)
if isinstance(input, str):
input = Bed(input, count_A1=count_A1)
if isinstance(test, str):
test = Bed(test, count_A1=count_A1) #Note that we don't return here. Processing continues
if isinstance(input,SnpReader):
if test is not None:
return _SnpWholeTest(train=train_snps,test=test,standardizer=standardizer,block_size=block_size)
else:
return SnpKernel(input,standardizer=standardizer, block_size=block_size)
if input is None:
return KernelIdentity(iid=iid_if_none,test=test_iid_if_none)
return input
class FastLMM(object):
def __init__(self, GB_goal=None, force_full_rank=False, force_low_rank=False, snp_standardizer=Unit(), covariate_standardizer=Unit(), kernel_standardizer=DiagKtoN()):
self.GB_goal = GB_goal
self.force_full_rank = force_full_rank
self.force_low_rank = force_low_rank
self.snp_standardizer = snp_standardizer
self.covariate_standardizer = covariate_standardizer
self.kernel_standardizer = kernel_standardizer
self.is_fitted = False
def fit(self, X=None, y=None, K0_train=None, K1_train=None, h2raw=None, mixing=None,count_A1=None):
self.is_fitted = True
assert y is not None, "y must be given"
y = _pheno_fixup(y,count_A1=count_A1)
assert y.sid_count == 1, "Expect y to be just one variable"
X = _pheno_fixup(X, iid_if_none=y.iid,count_A1=count_A1)
K0_train = _kernel_fixup(K0_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K1_train = _kernel_fixup(K1_train, iid_if_none=y.iid, standardizer=self.snp_standardizer,count_A1=count_A1)
K0_train, K1_train, X, y = intersect_apply([K0_train, K1_train, X, y],intersect_before_standardize=True)
from fastlmm.association.single_snp import _set_block_size
K0_train, K1_train, block_size = _set_block_size(K0_train, K1_train, mixing, self.GB_goal, self.force_full_rank, self.force_low_rank)
X = X.read()
# If possible, unit standardize train and test together. If that is not possible, unit standardize only train and later apply
# the same linear transformation to test. Unit standardization is necessary for FastLMM to work correctly.
#!!!later is the calculation of the training data's stats done twice???
X, covar_unit_trained = X.standardize(self.covariate_standardizer,block_size=block_size,return_trained=True)
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.val,np.ones((X.iid_count,1))],
name ="covariate_train w/ 1's")
y0 = y.read().val
from fastlmm.association.single_snp import _Mixer
K_train, h2raw, mixer = _Mixer.combine_the_best_way(K0_train,K1_train,X.val,y0,mixing,h2raw,force_full_rank=self.force_full_rank,force_low_rank=self.force_low_rank,kernel_standardizer=self.kernel_standardizer,block_size=block_size)
lmm = LMM()
if mixer.do_g:
assert isinstance(K_train.standardizer,StandardizerIdentity), "Expect Identity standardizer"
G_train = K_train.snpreader
lmm.setG(G0=K_train.snpreader.val)
else:
lmm.setK(K0=K_train.val)
lmm.setX(X.val)
lmm.sety(y0[:,0])
if h2raw is None:
res = lmm.findH2()
else:
res = lmm.nLLeval(h2=h2raw)
full_sigma2 = float(sum((np.dot(X.val,res['beta']).reshape(-1,1)-y0)**2))/y.iid_count
ed
self.K_train_iid = K_train.iid
self.covar_sid = X.sid
self.pheno_sid = y.sid
self.G0_train = K0_train.snpreader if isinstance(K0_train,SnpKernel) else None #!!!later expensive?
self.G1_train = K1_train.snpreader if isinstance(K1_train,SnpKernel) else None #!!!later expensive?
return self
@staticmethod
def _new_snp_name(snpreader):
new_snp = "always1"
while True:
if not new_snp in snpreader.sid:
return np.r_[snpreader.sid,[new_snp]]
new_snp += "_"
def score(self, X=None, y=None, K0_whole_test=None, K1_whole_test=None, iid_if_none=None, return_mse_too=False, return_per_iid=False, count_A1=None):
mean0, covar0 = self.predict(K0_whole_test=K0_whole_test,K1_whole_test=K1_whole_test,X=X,iid_if_none=iid_if_none,count_A1=count_A1)
y = _pheno_fixup(y, iid_if_none=covar0.iid,count_A1=count_A1)
mean, covar, y = intersect_apply([mean0, covar0, y])
mean = mean.read(order='A',view_ok=True).val
covar = covar.read(order='A',view_ok=True).val
y_actual = y.read().val
if not return_per_iid:
var = multivariate_normal(mean=mean.reshape(-1), cov=covar)
nll = -np.log(var.pdf(y_actual.reshape(-1)))
if not return_mse_too:
return nll
else:
mse = ((y_actual-mean)**2).sum()
return nll, mse
else:
if not return_mse_too:
result = SnpData(iid=y.iid,sid=['nLL'],val=np.empty((y.iid_count,1)),name="nLL")
for iid_index in range(y.iid_count):
var = multivariate_normal(mean=mean[iid_index], cov=covar[iid_index,iid_index])
nll = -np.log(var.pdf(y_actual[iid_index]))
result.val[iid_index,0] = nll
return result
else:
raise Exception("need code for mse_too")
def _extract_fixup(kernel):
assert kernel.iid0_count >= kernel.iid1_count, "Expect iid0 to be at least as long as iid1"
def predict(self,X=None,K0_whole_test=None,K1_whole_test=None,iid_if_none=None, count_A1=None):
assert self.is_fitted, "Can only predict after predictor has been fitted"
#assert K0_whole_test is not None, "K0_whole_test must be given"
#!!!later is it too wasteful to keep both G0_train, G1_train, and lmm.G when storing to disk?
#!!!later all _kernel_fixup's should use block_size input
K0_whole_test_b = _kernel_fixup(K0_whole_test, train_snps=self.G0_train, iid_if_none=iid_if_none, standardizer=self.mixer.snp_trained0, test=K0_whole_test, test_iid_if_none=None, block_size=self.block_size,count_A1=count_A1)
K1_whole_test = _kernel_fixup(K1_whole_test, train_snps=self.G1_train, iid_if_none=K0_whole_test_b.iid0, standardizer=self.mixer.snp_trained1, test=K1_whole_test, test_iid_if_none=K0_whole_test_b.iid1, block_size=self.block_size,count_A1=count_A1)
X = _pheno_fixup(X,iid_if_none=K0_whole_test_b.iid1,count_A1=count_A1)
K0_whole_test_c, K1_whole_test, X = intersect_apply([K0_whole_test_b, K1_whole_test, X],intersect_before_standardize=True,is_test=True)
X = X.read().standardize(self.covar_unit_trained)
X = SnpData(iid=X.iid,
sid=self._new_snp_name(X),
val=np.c_[X.read().val,np.ones((X.iid_count,1))])
assert np.array_equal(X.sid,self.covar_sid), "Expect covar sids to be the same in train and test."
train_idx0 = K0_whole_test_c.iid0_to_index(self.K_train_iid)
K0_train_test = K0_whole_test_c[train_idx0,:]
train_idx1 = K1_whole_test.iid0_to_index(self.K_train_iid)
K1_train_test = K1_whole_test[train_idx1,:]
test_idx0 = K0_whole_test_c.iid0_to_index(K0_whole_test_c.iid1)
K0_test_test = K0_whole_test_c[test_idx0,:]
if K0_test_test.iid0 is not K0_test_test.iid1:
raise Exception("real assert")
test_idx1 = K1_whole_test.iid0_to_index(K0_whole_test_c.iid1)
K1_test_test = K1_whole_test[test_idx1,:]
if self.mixer.do_g:
###################################################
# low rank from Rasmussen eq 2.9 + noise term added to covar
###################################################
Gstar = self.mixer.g_mix(K0_train_test,K1_train_test)
varg = self.h2raw * self.sigma2
vare = (1.-self.h2raw) * self.sigma2
Ainv = LA.inv((1./vare) * np.dot(self.G.T,self.G) + (1./varg)*np.eye(self.G.shape[1]))
testAinv = np.dot(Gstar.test.val, Ainv)
pheno_predicted = np.dot(X.val,self.beta) + (1./vare) * np.dot(np.dot(testAinv,self.G.T),self.y-np.dot(self.X,self.beta))
pheno_predicted = pheno_predicted.reshape(-1,1)
covar = np.dot(testAinv,Gstar.test.val.T) + vare * np.eye(Gstar.test.val.shape[0])
else:
lmm = LMM()
lmm.U = self.U
lmm.S = self.S
lmm.G = self.G
lmm.y = self.y
lmm.Uy = self.Uy
lmm.X = self.X
lmm.UX = self.UX
Kstar = self.mixer.k_mix(K0_train_test,K1_train_test) #!!!later do we need/want reads here? how about view_OK?
lmm.setTestData(Xstar=X.val, K0star=Kstar.val.T)
Kstar_star = self.mixer.k_mix(K0_test_test,K1_test_test) #!!!later do we need/want reads here?how about view_OK?
pheno_predicted, covar = lmm.predict_mean_and_variance(beta=self.beta, h2=self.h2raw,sigma2=self.sigma2, Kstar_star=Kstar_star.val)
#pheno_predicted = lmm.predictMean(beta=self.beta, h2=self.h2,scale=self.sigma2).reshape(-1,1)
ret0 = SnpData(iid = X.iid, sid=self.pheno_sid,val=pheno_predicted,pos=np.array([[np.nan,np.nan,np.nan]]),name="lmm Prediction")
from pysnptools.kernelreader import KernelData
ret1 = KernelData(iid=K0_test_test.iid,val=covar)
return ret0, ret1
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
import doctest
doctest.testmod()
| true
| true
|
f70878f0501f62bdd0dcc9249e96933340f8f1f2
| 1,680
|
py
|
Python
|
practice/bst.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
practice/bst.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
practice/bst.py
|
haandol/dojo
|
c29dc54614bdfaf79eb4862ed9fa25974a0f5654
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
def insert(node, val):
if not node:
return Node(val)
if val <= node.val:
node.left = insert(node.left, val)
else:
node.right = insert(node.right, val)
return node
def search(node, val):
if not node:
return False
if val == node.val:
return True
elif val < node.val:
return search(node.left, val)
else:
return search(node.right, val)
def delete(node, val):
if not node:
return
if val < node.val:
node.left = delete(node.left, val)
elif node.val < val:
node.right = delete(node.right, val)
else:
if not node.left:
return node.right
if not node.right:
return node.left
successor = get_successor(node.right)
node.val = successor.val
node.right = delete(node.right, successor.val)
return node
def get_successor(node):
while node.left:
node = node.left
return node
def inorder(node):
if not node:
return
inorder(node.left)
print(node.val)
inorder(node.right)
if __name__ == '__main__':
root = Node(50)
insert(root, 30)
insert(root, 20)
insert(root, 40)
insert(root, 70)
insert(root, 60)
insert(root, 80)
inorder(root)
print()
assert search(root, 40)
assert not search(root, 45)
root = delete(root, 30)
assert root.val == 50
inorder(root)
print()
root = delete(root, 50)
assert root.val == 60
inorder(root)
print()
| 18.26087
| 54
| 0.567262
|
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
def insert(node, val):
if not node:
return Node(val)
if val <= node.val:
node.left = insert(node.left, val)
else:
node.right = insert(node.right, val)
return node
def search(node, val):
if not node:
return False
if val == node.val:
return True
elif val < node.val:
return search(node.left, val)
else:
return search(node.right, val)
def delete(node, val):
if not node:
return
if val < node.val:
node.left = delete(node.left, val)
elif node.val < val:
node.right = delete(node.right, val)
else:
if not node.left:
return node.right
if not node.right:
return node.left
successor = get_successor(node.right)
node.val = successor.val
node.right = delete(node.right, successor.val)
return node
def get_successor(node):
while node.left:
node = node.left
return node
def inorder(node):
if not node:
return
inorder(node.left)
print(node.val)
inorder(node.right)
if __name__ == '__main__':
root = Node(50)
insert(root, 30)
insert(root, 20)
insert(root, 40)
insert(root, 70)
insert(root, 60)
insert(root, 80)
inorder(root)
print()
assert search(root, 40)
assert not search(root, 45)
root = delete(root, 30)
assert root.val == 50
inorder(root)
print()
root = delete(root, 50)
assert root.val == 60
inorder(root)
print()
| true
| true
|
f708790a5ceb4681c6c0ae68240047ad1efa7ac7
| 3,205
|
py
|
Python
|
homeassistant/components/broadlink/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 2
|
2021-01-29T02:52:01.000Z
|
2021-05-15T04:23:18.000Z
|
homeassistant/components/broadlink/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 68
|
2020-07-23T07:13:53.000Z
|
2022-03-31T06:01:48.000Z
|
homeassistant/components/broadlink/sensor.py
|
switschel/core
|
0ecca246bdc3028c30bf8ccbf2b4c7f2a8b3f9aa
|
[
"Apache-2.0"
] | 7
|
2021-03-20T12:34:01.000Z
|
2021-12-02T10:13:52.000Z
|
"""Support for Broadlink sensors."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import CONF_HOST, PERCENTAGE, POWER_WATT, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
from .const import DOMAIN
from .entity import BroadlinkEntity
from .helpers import import_device
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"temperature": (
"Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
),
"air_quality": ("Air Quality", None, None, None),
"humidity": (
"Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
),
"light": ("Light", None, DEVICE_CLASS_ILLUMINANCE, None),
"noise": ("Noise", None, None, None),
"power": (
"Current power",
POWER_WATT,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string}, extra=vol.ALLOW_EXTRA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the device and discontinue platform.
This is for backward compatibility.
Do not use this method.
"""
import_device(hass, config[CONF_HOST])
_LOGGER.warning(
"The sensor platform is deprecated, please remove it from your configuration"
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Broadlink sensor."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
sensor_data = device.update_manager.coordinator.data
sensors = [
BroadlinkSensor(device, monitored_condition)
for monitored_condition in sensor_data
if monitored_condition in SENSOR_TYPES
and (
# These devices have optional sensors.
# We don't create entities if the value is 0.
sensor_data[monitored_condition] != 0
or device.api.type not in {"RM4PRO", "RM4MINI"}
)
]
async_add_entities(sensors)
class BroadlinkSensor(BroadlinkEntity, SensorEntity):
"""Representation of a Broadlink sensor."""
def __init__(self, device, monitored_condition):
"""Initialize the sensor."""
super().__init__(device)
self._monitored_condition = monitored_condition
self._attr_device_class = SENSOR_TYPES[monitored_condition][2]
self._attr_name = f"{device.name} {SENSOR_TYPES[monitored_condition][0]}"
self._attr_state_class = SENSOR_TYPES[monitored_condition][3]
self._attr_native_value = self._coordinator.data[monitored_condition]
self._attr_unique_id = f"{device.unique_id}-{monitored_condition}"
self._attr_native_unit_of_measurement = SENSOR_TYPES[monitored_condition][1]
def _update_state(self, data):
"""Update the state of the entity."""
self._attr_native_value = data[self._monitored_condition]
| 31.732673
| 86
| 0.699844
|
import logging
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
PLATFORM_SCHEMA,
STATE_CLASS_MEASUREMENT,
SensorEntity,
)
from homeassistant.const import CONF_HOST, PERCENTAGE, POWER_WATT, TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv
from .const import DOMAIN
from .entity import BroadlinkEntity
from .helpers import import_device
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"temperature": (
"Temperature",
TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
),
"air_quality": ("Air Quality", None, None, None),
"humidity": (
"Humidity",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
),
"light": ("Light", None, DEVICE_CLASS_ILLUMINANCE, None),
"noise": ("Noise", None, None, None),
"power": (
"Current power",
POWER_WATT,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
),
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string}, extra=vol.ALLOW_EXTRA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
import_device(hass, config[CONF_HOST])
_LOGGER.warning(
"The sensor platform is deprecated, please remove it from your configuration"
)
async def async_setup_entry(hass, config_entry, async_add_entities):
device = hass.data[DOMAIN].devices[config_entry.entry_id]
sensor_data = device.update_manager.coordinator.data
sensors = [
BroadlinkSensor(device, monitored_condition)
for monitored_condition in sensor_data
if monitored_condition in SENSOR_TYPES
and (
sensor_data[monitored_condition] != 0
or device.api.type not in {"RM4PRO", "RM4MINI"}
)
]
async_add_entities(sensors)
class BroadlinkSensor(BroadlinkEntity, SensorEntity):
def __init__(self, device, monitored_condition):
super().__init__(device)
self._monitored_condition = monitored_condition
self._attr_device_class = SENSOR_TYPES[monitored_condition][2]
self._attr_name = f"{device.name} {SENSOR_TYPES[monitored_condition][0]}"
self._attr_state_class = SENSOR_TYPES[monitored_condition][3]
self._attr_native_value = self._coordinator.data[monitored_condition]
self._attr_unique_id = f"{device.unique_id}-{monitored_condition}"
self._attr_native_unit_of_measurement = SENSOR_TYPES[monitored_condition][1]
def _update_state(self, data):
self._attr_native_value = data[self._monitored_condition]
| true
| true
|
f7087a5a75b94c165bd14d3b048d7d86623fcbe8
| 2,745
|
py
|
Python
|
blackmamba/script/toggle_comments.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | null | null | null |
blackmamba/script/toggle_comments.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | null | null | null |
blackmamba/script/toggle_comments.py
|
oz90210/blackmamba
|
65c82c8e99028d6fbb57098ce82d0a394df215a0
|
[
"MIT"
] | null | null | null |
#!python3
import re
from blackmamba.system import Pythonista
def _comment_line(line, hash_prefix=''):
stripped = line.strip()
if stripped.startswith('#'):
return line
if not stripped:
return hash_prefix + '# \n'
return hash_prefix + '# ' + line[len(hash_prefix):]
_UNCOMMENT_RE = re.compile('\A(\s*)#( ?)(.*)\Z', re.DOTALL)
def _uncomment_line(line):
if line.find('#') == -1:
return line
match = _UNCOMMENT_RE.search(line)
if match:
result = match.group(1) + match.group(3)
else:
result = line
if not result.strip():
result = '\n'
return result
_HASH_INDEX_RE = re.compile('\A(\s*)')
def _hash_prefix(lines):
prefix = None
for line in lines:
if not line.strip():
continue
match = _HASH_INDEX_RE.search(line)
if not match:
continue
if prefix is None or len(match.group(1)) < len(prefix):
prefix = match.group(1)
if prefix is None:
prefix = ''
return prefix
def _toggle_lines(lines):
if not lines:
return lines
if lines[0].strip().startswith('#'):
comment = False
hash_prefix = ''
else:
comment = True
hash_prefix = _hash_prefix(lines)
replacement = []
for line in lines:
if comment:
replacement.append(_comment_line(line, hash_prefix))
else:
replacement.append(_uncomment_line(line))
return replacement
@Pythonista()
def main():
import editor
selection_range = editor.get_selection()
if not selection_range:
# No file opened in the editor
return
text = editor.get_text()
selected_lines_range = editor.get_line_selection()
selected_lines_text = text[selected_lines_range[0]:selected_lines_range[1]]
selected_lines = selected_lines_text.splitlines(True)
last_line_deleted = False
if len(selected_lines) > 1:
# Ignore the last line selection if there's just cursor at the beginning of
# this line and nothing is selected
last_line = selected_lines[-1]
if selected_lines_range[1] - len(last_line) == selection_range[1]:
last_line_deleted = True
del selected_lines[-1]
selected_lines_range = (selected_lines_range[0], selected_lines_range[1] - len(last_line) - 1)
replacement = ''.join(_toggle_lines(selected_lines))
if last_line_deleted:
replacement = replacement[:-1]
editor.replace_text(selected_lines_range[0], selected_lines_range[1], replacement)
editor.set_selection(selected_lines_range[0], selected_lines_range[0] + len(replacement))
if __name__ == '__main__':
main()
| 22.5
| 106
| 0.63133
|
import re
from blackmamba.system import Pythonista
def _comment_line(line, hash_prefix=''):
stripped = line.strip()
if stripped.startswith('#'):
return line
if not stripped:
return hash_prefix + '# \n'
return hash_prefix + '# ' + line[len(hash_prefix):]
_UNCOMMENT_RE = re.compile('\A(\s*)#( ?)(.*)\Z', re.DOTALL)
def _uncomment_line(line):
if line.find('#') == -1:
return line
match = _UNCOMMENT_RE.search(line)
if match:
result = match.group(1) + match.group(3)
else:
result = line
if not result.strip():
result = '\n'
return result
_HASH_INDEX_RE = re.compile('\A(\s*)')
def _hash_prefix(lines):
prefix = None
for line in lines:
if not line.strip():
continue
match = _HASH_INDEX_RE.search(line)
if not match:
continue
if prefix is None or len(match.group(1)) < len(prefix):
prefix = match.group(1)
if prefix is None:
prefix = ''
return prefix
def _toggle_lines(lines):
if not lines:
return lines
if lines[0].strip().startswith('#'):
comment = False
hash_prefix = ''
else:
comment = True
hash_prefix = _hash_prefix(lines)
replacement = []
for line in lines:
if comment:
replacement.append(_comment_line(line, hash_prefix))
else:
replacement.append(_uncomment_line(line))
return replacement
@Pythonista()
def main():
import editor
selection_range = editor.get_selection()
if not selection_range:
return
text = editor.get_text()
selected_lines_range = editor.get_line_selection()
selected_lines_text = text[selected_lines_range[0]:selected_lines_range[1]]
selected_lines = selected_lines_text.splitlines(True)
last_line_deleted = False
if len(selected_lines) > 1:
# this line and nothing is selected
last_line = selected_lines[-1]
if selected_lines_range[1] - len(last_line) == selection_range[1]:
last_line_deleted = True
del selected_lines[-1]
selected_lines_range = (selected_lines_range[0], selected_lines_range[1] - len(last_line) - 1)
replacement = ''.join(_toggle_lines(selected_lines))
if last_line_deleted:
replacement = replacement[:-1]
editor.replace_text(selected_lines_range[0], selected_lines_range[1], replacement)
editor.set_selection(selected_lines_range[0], selected_lines_range[0] + len(replacement))
if __name__ == '__main__':
main()
| true
| true
|
f7087a9747ae148c97e1d86f50ba57ad6830aec3
| 33,897
|
py
|
Python
|
tests/test_primary.py
|
kerwindong/uptane
|
113be3c8a1f05a021625e8b73316696063bbae7e
|
[
"MIT"
] | 135
|
2016-11-22T17:54:01.000Z
|
2022-03-20T09:34:16.000Z
|
tests/test_primary.py
|
kerwindong/uptane
|
113be3c8a1f05a021625e8b73316696063bbae7e
|
[
"MIT"
] | 182
|
2016-11-28T16:34:09.000Z
|
2020-11-24T15:05:34.000Z
|
tests/test_primary.py
|
kerwindong/uptane
|
113be3c8a1f05a021625e8b73316696063bbae7e
|
[
"MIT"
] | 52
|
2016-11-23T02:26:57.000Z
|
2022-01-22T14:33:55.000Z
|
"""
<Program Name>
test_primary.py
<Purpose>
Unit testing for uptane/clients/primary.py
<Copyright>
See LICENSE for licensing information.
"""
from __future__ import unicode_literals
import uptane # Import before TUF modules; may change tuf.conf values.
import unittest
import os.path
import time
import copy
import shutil
import hashlib
import iso8601
from six.moves.urllib.error import URLError
import tuf
import tuf.formats
import tuf.conf
import tuf.client.updater # to test one of the fields in the Primary object
import uptane.formats
import uptane.clients.primary as primary
import uptane.common # verify sigs, create client dir structure, convert key
import uptane.encoding.asn1_codec as asn1_codec
from uptane.encoding.asn1_codec import DATATYPE_TIME_ATTESTATION
from uptane.encoding.asn1_codec import DATATYPE_ECU_MANIFEST
from uptane.encoding.asn1_codec import DATATYPE_VEHICLE_MANIFEST
# For temporary convenience:
import demo # for generate_key, import_public_key, import_private_key
import json
SAMPLE_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'samples')
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEST_DIRECTOR_METADATA_DIR = os.path.join(TEST_DATA_DIR, 'director_metadata')
TEST_IMAGE_REPO_METADATA_DIR = os.path.join(
TEST_DATA_DIR, 'image_repo_metadata')
TEST_DIRECTOR_ROOT_FNAME = os.path.join(
TEST_DIRECTOR_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_IMAGE_REPO_ROOT_FNAME = os.path.join(
TEST_IMAGE_REPO_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_PINNING_FNAME = os.path.join(TEST_DATA_DIR, 'pinned.json')
TEMP_CLIENT_DIR = os.path.join(TEST_DATA_DIR, 'temp_test_primary')
# Sample metadata and targets that will be copied to TEMP_CLIENT_DIR to use
# as a local repository for testing.
SAMPLE_METADATA = os.path.join(
uptane.WORKING_DIR, 'samples', 'metadata_samples_long_expiry',
'update_to_one_ecu', 'full_metadata_archive')
SAMPLE_TARGETS = os.path.join(uptane.WORKING_DIR, 'demo', 'images')
# Changing some of these values would require producing new signed sample data
# from the Timeserver or a Secondary.
NONCE = 5
VIN = 'democar'
PRIMARY_ECU_SERIAL = '00000'
def destroy_temp_dir():
# Clean up anything that may currently exist in the temp test directory.
if os.path.exists(TEMP_CLIENT_DIR):
shutil.rmtree(TEMP_CLIENT_DIR)
class TestPrimary(unittest.TestCase):
"""
"unittest"-style test class for the Primary module in the reference
implementation
Please note that these tests are NOT entirely independent of each other.
Several of them build on the results of previous tests. This is an unusual
pattern but saves code and works at least for now.
"""
# Class variables
ecu_key = None
key_timeserver_pub = None
key_timeserver_pri = None
initial_time = None
# I'll initialize instance in the first test, and use it for later tests so
# as to avoid repeated initialization.
instance = None
@classmethod
def setUpClass(cls):
"""
This is run once for the class, before all tests. Since there is only one
class, this runs once. It prepares some variables and stores them in the
class.
"""
destroy_temp_dir()
# Load the private key for this Primary ECU.
cls.ecu_key = uptane.common.canonical_key_from_pub_and_pri(
demo.import_public_key('primary'),
demo.import_private_key('primary'))
# Load the public timeserver key.
cls.key_timeserver_pub = demo.import_public_key('timeserver')
cls.key_timeserver_pri = demo.import_private_key('timeserver')
# Generate a trusted initial time for the Primary.
cls.initial_time = tuf.formats.unix_timestamp_to_datetime(
int(time.time())).isoformat() + 'Z'
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(cls.initial_time)
@classmethod
def tearDownClass(cls):
"""
This is run once for the class, after all tests. Since there is only one
class, this runs once.
"""
destroy_temp_dir()
def test_01_init(self):
"""
Note that this doesn't test the root files provided, as those aren't used
at all in the initialization; for that, we'll have to test the update cycle.
"""
# Set up a client directory first.
uptane.common.create_directory_structure_for_client(
TEMP_CLIENT_DIR,
TEST_PINNING_FNAME,
{'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,
'director': TEST_DIRECTOR_ROOT_FNAME})
# Create repository directories that will be accessed locally (using
# file:// URLs) from which to "download" test metadata and targets.
for repository in ["director", "imagerepo"]:
shutil.copytree(
os.path.join(SAMPLE_METADATA, repository),
os.path.join(TEMP_CLIENT_DIR, repository))
# Note that there may be extra targets available here.
shutil.copytree(
SAMPLE_TARGETS, os.path.join(TEMP_CLIENT_DIR, 'imagerepo', 'targets'))
# TODO: Test with invalid pinning file
# TODO: Test with pinning file lacking a Director repo.
# Now try creating a Primary with a series of bad arguments, expecting
# errors.
# TODO: Add test for my_secondaries argument.
# Invalid VIN:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=5, # INVALID
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Serial
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=500, # INVALID
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key={''}, # INVALID
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid time:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time='invalid because this is not a time', # INVALID
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid timeserver key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.initial_time, # INVALID
my_secondaries=[])
# Invalid format for Director Repository name
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=5, #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid name for Director repository
with self.assertRaises(uptane.Error):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name= "invalid", #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Try creating a Primary, expecting it to work.
# Initializes a Primary ECU, making a client directory and copying the root
# file from the repositories.
# Save the result for future tests, to save time and code.
TestPrimary.instance = primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub)
# Check the fields initialized in the instance to make sure they're correct.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
self.assertEqual(VIN, TestPrimary.instance.vin)
self.assertEqual(PRIMARY_ECU_SERIAL, TestPrimary.instance.ecu_serial)
self.assertEqual(TestPrimary.ecu_key, TestPrimary.instance.primary_key)
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
self.assertEqual(
TestPrimary.instance.full_client_dir, TEMP_CLIENT_DIR)
self.assertIsInstance(
TestPrimary.instance.updater, tuf.client.updater.Updater)
tuf.formats.ANYKEY_SCHEMA.check_match(
TestPrimary.instance.timeserver_public_key)
self.assertEqual([], TestPrimary.instance.my_secondaries)
# Now, fix the updater's pinned metadata to point it to the appropriate
# local directory, since the pinned metadata we fed in was actually for the
# live demo, connecting to localhost:30401. We instead want to use a
# local directory via file://.
# TODO: Determine if this code should be adjusted to use os.path.join(),
# or if that's not appropriate for file:// links.
image_repo_mirror = ['file://' + TEMP_CLIENT_DIR + '/imagerepo']
director_mirror = ['file://' + TEMP_CLIENT_DIR + '/director']
repository_urls = TestPrimary.instance.updater.pinned_metadata['repositories']
repository_urls['imagerepo']['mirrors'] = image_repo_mirror
repository_urls['director']['mirrors'] = director_mirror
# Also fix the copied pinned metadata in the individual repo updaters
# in the updater.
TestPrimary.instance.updater.repositories['imagerepo'].mirrors = image_repo_mirror
TestPrimary.instance.updater.repositories['director'].mirrors = director_mirror
def test_05_register_new_secondary(self):
self.assertEqual([], TestPrimary.instance.my_secondaries)
TestPrimary.instance.register_new_secondary('1352')
self.assertIn('1352', TestPrimary.instance.my_secondaries)
def test_10_register_ecu_manifest(self):
# Throughout this function, I'll use a different nonces in each call to
# register_ecu_manifest, and check that the ones in calls expected to
# succeed have been noted and that the ones in calls expected to fail have
# not been noted.
# Starting with an empty ecu manifest dictionary.
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
# Make sure we're starting with no nonces sent or to send.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Load the manifests we'll use in these tests.
# Note that the .json and .der manifest samples aren't identical; they're
# signed over different data, so to get the JSON version of the DER
# manifests, we'll convert them.
# We'll always need the JSON encodings for testing, and we'll load the
# ASN.1/DER manifests only if we're in DER mode.
# 1: Correctly signed ECU manifest from ECU TCUdemocar (good sample)
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# (demo's Image Repo timestamp key in particular, instead of demo's
# Secondary key)
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
if tuf.conf.METADATA_FORMAT == 'json':
manifest1 = manifest1_json = json.load(open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.json')))
manifest2 = manifest2_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em2_unknown_ecu_manifest.json')))
manifest3 = manifest3_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em3_ecu_manifest_signed_with_wrong_key.json')))
manifest4 = manifest4_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em4_attack_detected_in_ecu_manifest.json')))
else:
assert tuf.conf.METADATA_FORMAT == 'der', 'Test code is flawed.'
manifest1 = open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.der'), 'rb').read()
manifest1_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest1, DATATYPE_ECU_MANIFEST)
manifest2 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em2_unknown_ecu_manifest.der'), 'rb').read()
manifest2_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest2, DATATYPE_ECU_MANIFEST)
manifest3 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em3_ecu_manifest_signed_with_wrong_key.der'), 'rb').read()
manifest3_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest3, DATATYPE_ECU_MANIFEST)
manifest4 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em4_attack_detected_in_ecu_manifest.der'), 'rb').read()
manifest4_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest4, DATATYPE_ECU_MANIFEST)
# Register two Secondaries with the Primary.
TestPrimary.instance.register_new_secondary('TCUdemocar')
TestPrimary.instance.register_new_secondary('ecu11111')
# Start with a sequence of tests with bad arguments but an otherwise
# correct ECU Manifest, manifest1.
# Try using a VIN that is not the Primary's VIN (ECU Manifest apparently
# from another car!)
with self.assertRaises(uptane.UnknownVehicle):
TestPrimary.instance.register_ecu_manifest(
vin='13105941', # unexpected VIN
ecu_serial='TCUdemocar', nonce=1,
signed_ecu_manifest=manifest1)
# Try using the wrong ECU Serial - one that is registered, but which does
# not match the ECU Serial listed in the ECU Manifest itself.
with self.assertRaises(uptane.Spoofing):
TestPrimary.instance.register_ecu_manifest(
vin=VIN,
ecu_serial='ecu11111', # not the same ECU Serial in the manifest
nonce=2, signed_ecu_manifest=manifest1)
# Try using an ECU Serial that the Primary is not aware of.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
vin=VIN, # unexpected VIN
ecu_serial='an unknown secondary ecu serial', # unexpected ECU Serial
nonce=3,
signed_ecu_manifest=manifest1)
# Register the ECU Manifest correctly this time.
TestPrimary.instance.register_ecu_manifest(
vin=VIN, ecu_serial='TCUdemocar', nonce=10,
signed_ecu_manifest=manifest1)
# Make sure the provided manifest is now in the Primary's ecu manifests
# dictionary. Note that the Primary holds manifests as JSON-compatible
# Python dictionaries regardless of the format it receives them in.
self.assertIn('TCUdemocar', TestPrimary.instance.ecu_manifests)
self.assertIn(
manifest1_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Make sure the nonce provided was noted in the right place.
self.assertIn(10, TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Though this is not required functionality, test register_ecu_manifest
# with JSON manifests as well, even if we're running in DER mode.
# And make sure force_pydict=True doesn't break if we're already in JSON
# mode, either.
TestPrimary.instance.register_ecu_manifest(
VIN, 'TCUdemocar', nonce=11, signed_ecu_manifest=manifest1_json,
force_pydict=True)
# The next tests use ECU Manifests that contain problematic values.
# (We're now testing things beyond just the arguments provided.
# If we're running in DER mode, we'll try both DER and JSON manifests.
# If we're running in JSON mode, we'll only try JSON manifests
# (though in JSON mode, we'll run twice, once with force_pydict on
# to make sure that run doesn't break despite the redundant argument).
# The list again is:
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
# Case 2: We won't save the ECU Manifest from an unknown ECU Serial.
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn(
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=4, signed_ecu_manifest=manifest2)
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=5,
signed_ecu_manifest=manifest2_json, force_pydict=True)
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn( # Make sure it's not in the wrong list of ECU Manifests
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 3: ECU Manifest signed with the wrong key: we save it anyway and
# send it on to the Director like any other; Primaries don't check
# the signatures on ECU Manifests: they can't be expected to know
# the right public or symmetric keys.
self.assertNotIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=12, signed_ecu_manifest=manifest3)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=13, signed_ecu_manifest=manifest3_json,
force_pydict=True)
self.assertIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 4: ECU Manifest containing an attack report. Make sure it doesn't
# fail to be registered.
self.assertNotIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=14, signed_ecu_manifest=manifest4)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=15, signed_ecu_manifest=manifest4_json,
force_pydict=True)
self.assertIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Confirm that we've succeeded in registering the right nonces.
for this_nonce in [1, 2, 3, 4, 5]:
self.assertNotIn(this_nonce, TestPrimary.instance.nonces_to_send)
for this_nonce in [10, 11, 12, 13, 14, 15]:
self.assertIn(this_nonce, TestPrimary.instance.nonces_to_send)
def test_15_get_nonces_to_send_and_rotate(self):
# The Primary's list of nonces to send in the next request to the
# timeserver for a time attestation:
nonces_to_have_sent = TestPrimary.instance.nonces_to_send
# Double-check that one of the expected nonces from the previous test
# function is in the list of the Primary's nonces to send.
self.assertIn(10, nonces_to_have_sent)
# Cycle nonces: Request the list of nonces to send to the timeserver,
# triggering the rotation of nonces. Make sure the nonce list provided
# is as expected from the previous test, and then that the rotation has
# actually occurred (nonces_to_send emptied, contents moved to nonces_sent).
self.assertEqual(
sorted(nonces_to_have_sent),
sorted(TestPrimary.instance.get_nonces_to_send_and_rotate()))
self.assertEqual(nonces_to_have_sent, TestPrimary.instance.nonces_sent)
self.assertEqual([], TestPrimary.instance.nonces_to_send)
def test_20_update_time(self):
# First, confirm that we've never verified a timeserver attestation, and/or
# that that results in get_last_timeserver_attestation returning None.
self.assertIsNone(TestPrimary.instance.get_last_timeserver_attestation())
# Try a good time attestation first, signed by an expected timeserver key,
# with an expected nonce (previously "received" from a Secondary)
original_time_attestation = time_attestation = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': 'aabffcebaa57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
# Check expected base conditions before updating time:
# The only timeserver times registered should be one added during
# initialization. Because the clock override is a module variable in TUF,
# its value (whether None or already set) depends on whether or not other
# tests resulting in time attestation verification have occurred (e.g.
# those for the Primary).
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
initial_clock_override = tuf.conf.CLOCK_OVERRIDE
# In the previous functions, we added a variety of nonces in the nonce
# rotation. Verification of a time attestation confirms that the time
# attestation contains the nonces we've most recently sent to the
# timeserver. The sample attestation we have here does not have the nonces
# we've indicated to the Primary that we've sent, so this verification
# should fail:
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(time_attestation)
# Check results. The bad attestation should change none of these.
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(initial_clock_override, tuf.conf.CLOCK_OVERRIDE)
# Now we adjust the Primary's notion of what nonces we sent to the
# timeserver most recently, and then try the verification again, expecting
# it to succeed.
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.nonces_to_send = [NONCE]
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.update_time(time_attestation)
# Check results. Among other things, since the verification succeeded,
# get_last_timeserver_attestation should return the attestation we just
# provided.
self.assertEqual(
time_attestation,
TestPrimary.instance.get_last_timeserver_attestation())
self.assertEqual(2, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(
int(tuf.formats.datetime_to_unix_timestamp(iso8601.parse_date(
'2016-11-02T21:06:05Z'))), tuf.conf.CLOCK_OVERRIDE)
# Prepare to try again with a bad signature.
# This test we will conduct differently depending on TUF's current format:
if tuf.conf.METADATA_FORMAT == 'der':
# Fail to re-sign the DER, so that the signature is over JSON instead,
# which results in a bad signature.
time_attestation__badsig = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION, resign=False)
else: # 'json' format
# Rewrite the first 9 digits of the signature ('sig') to something
# invalid.
time_attestation__badsig = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': '987654321a57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
# Now actually perform the bad signature test.
with self.assertRaises(tuf.BadSignatureError):
TestPrimary.instance.update_time(time_attestation__badsig)
assert 500 not in original_time_attestation['signed']['nonces'], \
'Programming error: bad and good test nonces are equal.'
time_attestation__wrongnonce = {
'signed': {'nonces': [500], 'time': '2016-11-02T21:15:00Z'},
'signatures': [{
'method': 'ed25519',
'sig': '4d01df35ca829fd7ead1408c250950c444db8ac51fa929a7f0288578fbf81016f0e81ed35789689481aee6b7af28ab311306397ef38572732854fb6cf2072604',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation__wrongnonce = asn1_codec.convert_signed_metadata_to_der(
time_attestation__wrongnonce, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(
time_attestation__wrongnonce)
# TODO: Consider other tests here.
def test_25_generate_signed_vehicle_manifest(self):
vehicle_manifest = TestPrimary.instance.generate_signed_vehicle_manifest()
# If the vehicle manifest is in DER format, check its format and then
# convert back to JSON so that we can inspect it further.
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(vehicle_manifest)
vehicle_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
vehicle_manifest, DATATYPE_VEHICLE_MANIFEST)
# Now it's not in DER format, whether or not it started that way.
# Check its format and inspect it.
uptane.formats.SIGNABLE_VEHICLE_VERSION_MANIFEST_SCHEMA.check_match(
vehicle_manifest)
# Test contents of vehicle manifest.
# Make sure there is exactly one signature.
self.assertEqual(1, len(vehicle_manifest['signatures']))
# Make sure that the Secondary's ECU Manifest (from the register ECU
# ECU Manifest test above) is listed in the Vehicle Manifest.
self.assertIn(
'TCUdemocar', vehicle_manifest['signed']['ecu_version_manifests'])
# TODO: More testing of the contents of the vehicle manifest.
# Check the signature on the vehicle manifest.
self.assertTrue(uptane.common.verify_signature_over_metadata(
TestPrimary.ecu_key,
vehicle_manifest['signatures'][0], # TODO: Deal with 1-sig assumption?
vehicle_manifest['signed'],
DATATYPE_VEHICLE_MANIFEST))
def test_30_refresh_toplevel_metadata(self):
# Check that in the fresh temp directory for this test Primary client,
# there aren't any metadata files except root.json yet.
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_DIRECTOR_METADATA_DIR)))
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_IMAGE_REPO_METADATA_DIR)))
try:
TestPrimary.instance.refresh_toplevel_metadata()
except (URLError, tuf.NoWorkingMirrorError) as e:
pass
else:
# Check the resulting top-level metadata files in the client directory.
# Expect root, snapshot, targets, and timestamp for both director and
# image repo.
for repo in ['director', 'imagerepo']:
self.assertEqual(
['root.' + tuf.conf.METADATA_FORMAT,
'snapshot.' + tuf.conf.METADATA_FORMAT,
'targets.' + tuf.conf.METADATA_FORMAT,
'timestamp.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(TEMP_CLIENT_DIR, 'metadata', repo,
'current'))))
def test_35_get_target_list_from_director(self):
# TODO: Write this in a way that draws on saved sample Director metadata.
# Don't expect an actual server to be running.
# This will probably entail modification to the pinned.json file to
# point it to a local directory instead of a remote server.
#directed_targets = TestPrimary.instance.test_35_get_target_list_from_director
pass
def test_40_get_validated_target_info(self):
# TODO: Write this in a way that draws on saved sample metadata from the
# Director and Image Repo. Don't expect an actual server to be
# running. This will probably entail modification to the pinned.json
# file to point it to a local directory instead of a remote server.
pass
def test_55_update_exists_for_ecu(self):
# The various ECU Serials of Secondary ECUs we'll test:
# 1: Registered with the Primary but NOT listed in Director metadata
# (i.e. will not have any updates assigned)
known_secondary_with_no_updates = "secondary_without_updates"
# 2: NOT registered w/ the Primary and NOT listed in Director metadata
unknown_secondary = "unknown_ecu_serial"
# 3: Registered with the Primary and listed in Director metadata
normal_secondary = "TCUdemocar"
# 4: Invalid name for a Secondary (wrong format)
invalid_name_secondary = 5
# Register the Secondaries with the Primary and make sure registration
# succeeded.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
TestPrimary.instance.register_new_secondary(normal_secondary)
self.assertIn(
known_secondary_with_no_updates, TestPrimary.instance.my_secondaries)
self.assertIn(normal_secondary, TestPrimary.instance.my_secondaries)
# Try registering a Secondary that has already been registered with the
# Primary. Expect success??? # TODO: Clarify.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
# Try registering an invalid name.
with self.assertRaises(tuf.FormatError):
TestPrimary.instance.register_new_secondary(invalid_name_secondary)
# Confirm that unknown_secondary has not been registered.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance._check_ecu_serial(unknown_secondary)
# Run a primary update cycle so that the Primary fetches and validates
# metadata and targets from the "repositories" (in this test, the
# repositories sit in a local folder accessed by file://).
# This also processes the data acquired to populate fields accessed by
# Secondaries below.
TestPrimary.instance.primary_update_cycle()
# Try to find out if updates exist for an unknown ECU.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.update_exists_for_ecu(unknown_secondary)
# Find out if updates exist for a known ECU that has no updates assigned to
# it by the Director (expect empty list).
self.assertFalse(TestPrimary.instance.update_exists_for_ecu(
known_secondary_with_no_updates))
# Confirm that updates exist for a known ECU to which we've assigned
# updates (list is not empty).
self.assertTrue(TestPrimary.instance.update_exists_for_ecu(
normal_secondary))
# Run the update cycle again to test file/archive replacement when an
# update cycle has already occurred.
TestPrimary.instance.primary_update_cycle()
def test_60_get_image_fname_for_ecu(self):
# TODO: More thorough tests.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.get_image_fname_for_ecu('unknown')
# Expect an image.
image_fname = TestPrimary.instance.get_image_fname_for_ecu('TCUdemocar')
self.assertTrue(image_fname)
tuf.formats.RELPATH_SCHEMA.check_match(image_fname)
# Fetch the image filename for an ECU that has had no update assigned it,
# expecting None.
self.assertIsNone(TestPrimary.instance.get_image_fname_for_ecu(
'secondary_without_updates'))
def test_61_get_full_metadata_archive_fname(self):
# TODO: More thorough tests.
archive_fname = TestPrimary.instance.get_full_metadata_archive_fname()
self.assertTrue(archive_fname)
tuf.formats.RELPATH_SCHEMA.check_match(archive_fname)
def test_62_get_partial_metadata_fname(self):
# TODO: More thorough tests.
fname = TestPrimary.instance.get_partial_metadata_fname()
self.assertTrue(fname)
tuf.formats.RELPATH_SCHEMA.check_match(fname)
def test_65_get_metadata_for_ecu(self):
pass
def test_70_get_last_timeserver_attestation(self):
# get_last_timeserver_attestation is tested in more detail in a previous
# test, test_20_update_time.
attestation = TestPrimary.instance.get_last_timeserver_attestation()
# We expect to have verified an attestation in previous tests.
self.assertIsNotNone(attestation)
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(attestation)
else:
assert tuf.conf.METADATA_FORMAT == 'json', 'Coding error in test.'
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
attestation)
# Run unit test.
if __name__ == '__main__':
unittest.main()
| 36.764642
| 150
| 0.728354
|
from __future__ import unicode_literals
import uptane
import unittest
import os.path
import time
import copy
import shutil
import hashlib
import iso8601
from six.moves.urllib.error import URLError
import tuf
import tuf.formats
import tuf.conf
import tuf.client.updater
import uptane.formats
import uptane.clients.primary as primary
import uptane.common
import uptane.encoding.asn1_codec as asn1_codec
from uptane.encoding.asn1_codec import DATATYPE_TIME_ATTESTATION
from uptane.encoding.asn1_codec import DATATYPE_ECU_MANIFEST
from uptane.encoding.asn1_codec import DATATYPE_VEHICLE_MANIFEST
import demo
import json
SAMPLE_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'samples')
TEST_DATA_DIR = os.path.join(uptane.WORKING_DIR, 'tests', 'test_data')
TEST_DIRECTOR_METADATA_DIR = os.path.join(TEST_DATA_DIR, 'director_metadata')
TEST_IMAGE_REPO_METADATA_DIR = os.path.join(
TEST_DATA_DIR, 'image_repo_metadata')
TEST_DIRECTOR_ROOT_FNAME = os.path.join(
TEST_DIRECTOR_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_IMAGE_REPO_ROOT_FNAME = os.path.join(
TEST_IMAGE_REPO_METADATA_DIR, 'root.' + tuf.conf.METADATA_FORMAT)
TEST_PINNING_FNAME = os.path.join(TEST_DATA_DIR, 'pinned.json')
TEMP_CLIENT_DIR = os.path.join(TEST_DATA_DIR, 'temp_test_primary')
SAMPLE_METADATA = os.path.join(
uptane.WORKING_DIR, 'samples', 'metadata_samples_long_expiry',
'update_to_one_ecu', 'full_metadata_archive')
SAMPLE_TARGETS = os.path.join(uptane.WORKING_DIR, 'demo', 'images')
NONCE = 5
VIN = 'democar'
PRIMARY_ECU_SERIAL = '00000'
def destroy_temp_dir():
if os.path.exists(TEMP_CLIENT_DIR):
shutil.rmtree(TEMP_CLIENT_DIR)
class TestPrimary(unittest.TestCase):
ecu_key = None
key_timeserver_pub = None
key_timeserver_pri = None
initial_time = None
# as to avoid repeated initialization.
instance = None
@classmethod
def setUpClass(cls):
destroy_temp_dir()
# Load the private key for this Primary ECU.
cls.ecu_key = uptane.common.canonical_key_from_pub_and_pri(
demo.import_public_key('primary'),
demo.import_private_key('primary'))
# Load the public timeserver key.
cls.key_timeserver_pub = demo.import_public_key('timeserver')
cls.key_timeserver_pri = demo.import_private_key('timeserver')
# Generate a trusted initial time for the Primary.
cls.initial_time = tuf.formats.unix_timestamp_to_datetime(
int(time.time())).isoformat() + 'Z'
tuf.formats.ISO8601_DATETIME_SCHEMA.check_match(cls.initial_time)
@classmethod
def tearDownClass(cls):
destroy_temp_dir()
def test_01_init(self):
# Set up a client directory first.
uptane.common.create_directory_structure_for_client(
TEMP_CLIENT_DIR,
TEST_PINNING_FNAME,
{'imagerepo': TEST_IMAGE_REPO_ROOT_FNAME,
'director': TEST_DIRECTOR_ROOT_FNAME})
# Create repository directories that will be accessed locally (using
# file:// URLs) from which to "download" test metadata and targets.
for repository in ["director", "imagerepo"]:
shutil.copytree(
os.path.join(SAMPLE_METADATA, repository),
os.path.join(TEMP_CLIENT_DIR, repository))
# Note that there may be extra targets available here.
shutil.copytree(
SAMPLE_TARGETS, os.path.join(TEMP_CLIENT_DIR, 'imagerepo', 'targets'))
# TODO: Test with invalid pinning file
# TODO: Test with pinning file lacking a Director repo.
# Now try creating a Primary with a series of bad arguments, expecting
# errors.
# TODO: Add test for my_secondaries argument.
# Invalid VIN:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=5, # INVALID
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Serial
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=500, # INVALID
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid ECU Key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key={''}, # INVALID
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid time:
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time='invalid because this is not a time', # INVALID
timeserver_public_key=TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid timeserver key
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.initial_time, # INVALID
my_secondaries=[])
# Invalid format for Director Repository name
with self.assertRaises(tuf.FormatError):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=5, #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Invalid name for Director repository
with self.assertRaises(uptane.Error):
primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name= "invalid", #INVALID
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key, time=TestPrimary.initial_time,
timeserver_public_key = TestPrimary.key_timeserver_pub,
my_secondaries=[])
# Try creating a Primary, expecting it to work.
# Initializes a Primary ECU, making a client directory and copying the root
# file from the repositories.
# Save the result for future tests, to save time and code.
TestPrimary.instance = primary.Primary(
full_client_dir=TEMP_CLIENT_DIR,
director_repo_name=demo.DIRECTOR_REPO_NAME,
vin=VIN,
ecu_serial=PRIMARY_ECU_SERIAL,
primary_key=TestPrimary.ecu_key,
time=TestPrimary.initial_time,
timeserver_public_key=TestPrimary.key_timeserver_pub)
# Check the fields initialized in the instance to make sure they're correct.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
self.assertEqual(VIN, TestPrimary.instance.vin)
self.assertEqual(PRIMARY_ECU_SERIAL, TestPrimary.instance.ecu_serial)
self.assertEqual(TestPrimary.ecu_key, TestPrimary.instance.primary_key)
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
self.assertEqual(
TestPrimary.instance.full_client_dir, TEMP_CLIENT_DIR)
self.assertIsInstance(
TestPrimary.instance.updater, tuf.client.updater.Updater)
tuf.formats.ANYKEY_SCHEMA.check_match(
TestPrimary.instance.timeserver_public_key)
self.assertEqual([], TestPrimary.instance.my_secondaries)
# local directory, since the pinned metadata we fed in was actually for the
# live demo, connecting to localhost:30401. We instead want to use a
# local directory via file://.
# TODO: Determine if this code should be adjusted to use os.path.join(),
# or if that's not appropriate for file:// links.
image_repo_mirror = ['file://' + TEMP_CLIENT_DIR + '/imagerepo']
director_mirror = ['file://' + TEMP_CLIENT_DIR + '/director']
repository_urls = TestPrimary.instance.updater.pinned_metadata['repositories']
repository_urls['imagerepo']['mirrors'] = image_repo_mirror
repository_urls['director']['mirrors'] = director_mirror
TestPrimary.instance.updater.repositories['imagerepo'].mirrors = image_repo_mirror
TestPrimary.instance.updater.repositories['director'].mirrors = director_mirror
def test_05_register_new_secondary(self):
self.assertEqual([], TestPrimary.instance.my_secondaries)
TestPrimary.instance.register_new_secondary('1352')
self.assertIn('1352', TestPrimary.instance.my_secondaries)
def test_10_register_ecu_manifest(self):
# register_ecu_manifest, and check that the ones in calls expected to
# succeed have been noted and that the ones in calls expected to fail have
# not been noted.
# Starting with an empty ecu manifest dictionary.
self.assertEqual(dict(), TestPrimary.instance.ecu_manifests)
# Make sure we're starting with no nonces sent or to send.
self.assertEqual([], TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Note that the .json and .der manifest samples aren't identical; they're
# signed over different data, so to get the JSON version of the DER
# manifests, we'll convert them.
# 1: Correctly signed ECU manifest from ECU TCUdemocar (good sample)
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# (demo's Image Repo timestamp key in particular, instead of demo's
# Secondary key)
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
if tuf.conf.METADATA_FORMAT == 'json':
manifest1 = manifest1_json = json.load(open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.json')))
manifest2 = manifest2_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em2_unknown_ecu_manifest.json')))
manifest3 = manifest3_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em3_ecu_manifest_signed_with_wrong_key.json')))
manifest4 = manifest4_json = json.load(open(os.path.join(TEST_DATA_DIR,
'flawed_manifests', 'em4_attack_detected_in_ecu_manifest.json')))
else:
assert tuf.conf.METADATA_FORMAT == 'der', 'Test code is flawed.'
manifest1 = open(os.path.join(SAMPLE_DATA_DIR,
'sample_ecu_manifest_TCUdemocar.der'), 'rb').read()
manifest1_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest1, DATATYPE_ECU_MANIFEST)
manifest2 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em2_unknown_ecu_manifest.der'), 'rb').read()
manifest2_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest2, DATATYPE_ECU_MANIFEST)
manifest3 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em3_ecu_manifest_signed_with_wrong_key.der'), 'rb').read()
manifest3_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest3, DATATYPE_ECU_MANIFEST)
manifest4 = open(os.path.join(TEST_DATA_DIR, 'flawed_manifests',
'em4_attack_detected_in_ecu_manifest.der'), 'rb').read()
manifest4_json = asn1_codec.convert_signed_der_to_dersigned_json(
manifest4, DATATYPE_ECU_MANIFEST)
# Register two Secondaries with the Primary.
TestPrimary.instance.register_new_secondary('TCUdemocar')
TestPrimary.instance.register_new_secondary('ecu11111')
# Start with a sequence of tests with bad arguments but an otherwise
# correct ECU Manifest, manifest1.
# Try using a VIN that is not the Primary's VIN (ECU Manifest apparently
with self.assertRaises(uptane.UnknownVehicle):
TestPrimary.instance.register_ecu_manifest(
vin='13105941',
ecu_serial='TCUdemocar', nonce=1,
signed_ecu_manifest=manifest1)
with self.assertRaises(uptane.Spoofing):
TestPrimary.instance.register_ecu_manifest(
vin=VIN,
ecu_serial='ecu11111',
nonce=2, signed_ecu_manifest=manifest1)
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
vin=VIN,
ecu_serial='an unknown secondary ecu serial',
nonce=3,
signed_ecu_manifest=manifest1)
TestPrimary.instance.register_ecu_manifest(
vin=VIN, ecu_serial='TCUdemocar', nonce=10,
signed_ecu_manifest=manifest1)
# dictionary. Note that the Primary holds manifests as JSON-compatible
# Python dictionaries regardless of the format it receives them in.
self.assertIn('TCUdemocar', TestPrimary.instance.ecu_manifests)
self.assertIn(
manifest1_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Make sure the nonce provided was noted in the right place.
self.assertIn(10, TestPrimary.instance.nonces_to_send)
self.assertEqual([], TestPrimary.instance.nonces_sent)
# Though this is not required functionality, test register_ecu_manifest
# with JSON manifests as well, even if we're running in DER mode.
TestPrimary.instance.register_ecu_manifest(
VIN, 'TCUdemocar', nonce=11, signed_ecu_manifest=manifest1_json,
force_pydict=True)
# If we're running in DER mode, we'll try both DER and JSON manifests.
# If we're running in JSON mode, we'll only try JSON manifests
# (though in JSON mode, we'll run twice, once with force_pydict on
# The list again is:
# 2: Correctly signed ECU manifest from ECU unknown_ecu
# 3: ECU Manifest from ECU TCUdemocar signed by the wrong key
# 4: Correctly signed ECU manifest from TCUdemocar w/ attack report
# Case 2: We won't save the ECU Manifest from an unknown ECU Serial.
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn(
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=4, signed_ecu_manifest=manifest2)
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.register_ecu_manifest(
'democar', 'unknown_ecu', nonce=5,
signed_ecu_manifest=manifest2_json, force_pydict=True)
self.assertNotIn('unknown_ecu', TestPrimary.instance.ecu_manifests)
self.assertNotIn(
manifest2_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 3: ECU Manifest signed with the wrong key: we save it anyway and
# send it on to the Director like any other; Primaries don't check
# the right public or symmetric keys.
self.assertNotIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=12, signed_ecu_manifest=manifest3)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=13, signed_ecu_manifest=manifest3_json,
force_pydict=True)
self.assertIn(
manifest3_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
# Case 4: ECU Manifest containing an attack report. Make sure it doesn't
self.assertNotIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=14, signed_ecu_manifest=manifest4)
TestPrimary.instance.register_ecu_manifest(
'democar', 'TCUdemocar', nonce=15, signed_ecu_manifest=manifest4_json,
force_pydict=True)
self.assertIn(
manifest4_json, TestPrimary.instance.ecu_manifests['TCUdemocar'])
for this_nonce in [1, 2, 3, 4, 5]:
self.assertNotIn(this_nonce, TestPrimary.instance.nonces_to_send)
for this_nonce in [10, 11, 12, 13, 14, 15]:
self.assertIn(this_nonce, TestPrimary.instance.nonces_to_send)
def test_15_get_nonces_to_send_and_rotate(self):
# The Primary's list of nonces to send in the next request to the
nonces_to_have_sent = TestPrimary.instance.nonces_to_send
self.assertIn(10, nonces_to_have_sent)
# Cycle nonces: Request the list of nonces to send to the timeserver,
# triggering the rotation of nonces. Make sure the nonce list provided
# is as expected from the previous test, and then that the rotation has
# actually occurred (nonces_to_send emptied, contents moved to nonces_sent).
self.assertEqual(
sorted(nonces_to_have_sent),
sorted(TestPrimary.instance.get_nonces_to_send_and_rotate()))
self.assertEqual(nonces_to_have_sent, TestPrimary.instance.nonces_sent)
self.assertEqual([], TestPrimary.instance.nonces_to_send)
def test_20_update_time(self):
# First, confirm that we've never verified a timeserver attestation, and/or
self.assertIsNone(TestPrimary.instance.get_last_timeserver_attestation())
original_time_attestation = time_attestation = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': 'aabffcebaa57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
time_attestation = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
initial_clock_override = tuf.conf.CLOCK_OVERRIDE
# timeserver. The sample attestation we have here does not have the nonces
# we've indicated to the Primary that we've sent, so this verification
# should fail:
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(time_attestation)
# Check results. The bad attestation should change none of these.
self.assertEqual(1, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(initial_clock_override, tuf.conf.CLOCK_OVERRIDE)
# Now we adjust the Primary's notion of what nonces we sent to the
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.nonces_to_send = [NONCE]
TestPrimary.instance.get_nonces_to_send_and_rotate()
TestPrimary.instance.update_time(time_attestation)
self.assertEqual(
time_attestation,
TestPrimary.instance.get_last_timeserver_attestation())
self.assertEqual(2, len(TestPrimary.instance.all_valid_timeserver_times))
self.assertEqual(
int(tuf.formats.datetime_to_unix_timestamp(iso8601.parse_date(
'2016-11-02T21:06:05Z'))), tuf.conf.CLOCK_OVERRIDE)
if tuf.conf.METADATA_FORMAT == 'der':
# Fail to re-sign the DER, so that the signature is over JSON instead,
# which results in a bad signature.
time_attestation__badsig = asn1_codec.convert_signed_metadata_to_der(
original_time_attestation, DATATYPE_TIME_ATTESTATION, resign=False)
else: # 'json' format
# Rewrite the first 9 digits of the signature ('sig') to something
# invalid.
time_attestation__badsig = {
'signed': {'nonces': [NONCE], 'time': '2016-11-02T21:06:05Z'},
'signatures': [{
'method': 'ed25519',
'sig': '987654321a57f1d6397bdc5647764261fd23516d2996446c3c40b3f30efb2a4a8d80cd2c21a453e78bf99dafb9d0f5e56c4e072db365499fa5f2f304afec100e',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
# Now actually perform the bad signature test.
with self.assertRaises(tuf.BadSignatureError):
TestPrimary.instance.update_time(time_attestation__badsig)
assert 500 not in original_time_attestation['signed']['nonces'], \
'Programming error: bad and good test nonces are equal.'
time_attestation__wrongnonce = {
'signed': {'nonces': [500], 'time': '2016-11-02T21:15:00Z'},
'signatures': [{
'method': 'ed25519',
'sig': '4d01df35ca829fd7ead1408c250950c444db8ac51fa929a7f0288578fbf81016f0e81ed35789689481aee6b7af28ab311306397ef38572732854fb6cf2072604',
'keyid': '79c796d7e87389d1ebad04edce49faef611d139ee41ea9fb1931732afbfaac2e'}]}
if tuf.conf.METADATA_FORMAT == 'der':
# Convert this time attestation to the expected ASN.1/DER format.
time_attestation__wrongnonce = asn1_codec.convert_signed_metadata_to_der(
time_attestation__wrongnonce, DATATYPE_TIME_ATTESTATION,
private_key=TestPrimary.key_timeserver_pri, resign=True)
with self.assertRaises(uptane.BadTimeAttestation):
TestPrimary.instance.update_time(
time_attestation__wrongnonce)
# TODO: Consider other tests here.
def test_25_generate_signed_vehicle_manifest(self):
vehicle_manifest = TestPrimary.instance.generate_signed_vehicle_manifest()
# If the vehicle manifest is in DER format, check its format and then
# convert back to JSON so that we can inspect it further.
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(vehicle_manifest)
vehicle_manifest = asn1_codec.convert_signed_der_to_dersigned_json(
vehicle_manifest, DATATYPE_VEHICLE_MANIFEST)
# Now it's not in DER format, whether or not it started that way.
uptane.formats.SIGNABLE_VEHICLE_VERSION_MANIFEST_SCHEMA.check_match(
vehicle_manifest)
self.assertEqual(1, len(vehicle_manifest['signatures']))
# ECU Manifest test above) is listed in the Vehicle Manifest.
self.assertIn(
'TCUdemocar', vehicle_manifest['signed']['ecu_version_manifests'])
# TODO: More testing of the contents of the vehicle manifest.
# Check the signature on the vehicle manifest.
self.assertTrue(uptane.common.verify_signature_over_metadata(
TestPrimary.ecu_key,
vehicle_manifest['signatures'][0], # TODO: Deal with 1-sig assumption?
vehicle_manifest['signed'],
DATATYPE_VEHICLE_MANIFEST))
def test_30_refresh_toplevel_metadata(self):
# Check that in the fresh temp directory for this test Primary client,
# there aren't any metadata files except root.json yet.
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_DIRECTOR_METADATA_DIR)))
self.assertEqual(
['root.der', 'root.json'],
sorted(os.listdir(TEST_IMAGE_REPO_METADATA_DIR)))
try:
TestPrimary.instance.refresh_toplevel_metadata()
except (URLError, tuf.NoWorkingMirrorError) as e:
pass
else:
for repo in ['director', 'imagerepo']:
self.assertEqual(
['root.' + tuf.conf.METADATA_FORMAT,
'snapshot.' + tuf.conf.METADATA_FORMAT,
'targets.' + tuf.conf.METADATA_FORMAT,
'timestamp.' + tuf.conf.METADATA_FORMAT],
sorted(os.listdir(os.path.join(TEMP_CLIENT_DIR, 'metadata', repo,
'current'))))
def test_35_get_target_list_from_director(self):
# This will probably entail modification to the pinned.json file to
# point it to a local directory instead of a remote server.
#directed_targets = TestPrimary.instance.test_35_get_target_list_from_director
pass
def test_40_get_validated_target_info(self):
# TODO: Write this in a way that draws on saved sample metadata from the
# Director and Image Repo. Don't expect an actual server to be
pass
def test_55_update_exists_for_ecu(self):
# 1: Registered with the Primary but NOT listed in Director metadata
# (i.e. will not have any updates assigned)
known_secondary_with_no_updates = "secondary_without_updates"
# 2: NOT registered w/ the Primary and NOT listed in Director metadata
unknown_secondary = "unknown_ecu_serial"
# 3: Registered with the Primary and listed in Director metadata
normal_secondary = "TCUdemocar"
# 4: Invalid name for a Secondary (wrong format)
invalid_name_secondary = 5
# Register the Secondaries with the Primary and make sure registration
# succeeded.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
TestPrimary.instance.register_new_secondary(normal_secondary)
self.assertIn(
known_secondary_with_no_updates, TestPrimary.instance.my_secondaries)
self.assertIn(normal_secondary, TestPrimary.instance.my_secondaries)
# Try registering a Secondary that has already been registered with the
# Primary. Expect success??? # TODO: Clarify.
TestPrimary.instance.register_new_secondary(known_secondary_with_no_updates)
# Try registering an invalid name.
with self.assertRaises(tuf.FormatError):
TestPrimary.instance.register_new_secondary(invalid_name_secondary)
# Confirm that unknown_secondary has not been registered.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance._check_ecu_serial(unknown_secondary)
# Run a primary update cycle so that the Primary fetches and validates
# metadata and targets from the "repositories" (in this test, the
# repositories sit in a local folder accessed by file://).
# This also processes the data acquired to populate fields accessed by
# Secondaries below.
TestPrimary.instance.primary_update_cycle()
# Try to find out if updates exist for an unknown ECU.
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.update_exists_for_ecu(unknown_secondary)
# Find out if updates exist for a known ECU that has no updates assigned to
# it by the Director (expect empty list).
self.assertFalse(TestPrimary.instance.update_exists_for_ecu(
known_secondary_with_no_updates))
# Confirm that updates exist for a known ECU to which we've assigned
self.assertTrue(TestPrimary.instance.update_exists_for_ecu(
normal_secondary))
TestPrimary.instance.primary_update_cycle()
def test_60_get_image_fname_for_ecu(self):
with self.assertRaises(uptane.UnknownECU):
TestPrimary.instance.get_image_fname_for_ecu('unknown')
image_fname = TestPrimary.instance.get_image_fname_for_ecu('TCUdemocar')
self.assertTrue(image_fname)
tuf.formats.RELPATH_SCHEMA.check_match(image_fname)
self.assertIsNone(TestPrimary.instance.get_image_fname_for_ecu(
'secondary_without_updates'))
def test_61_get_full_metadata_archive_fname(self):
archive_fname = TestPrimary.instance.get_full_metadata_archive_fname()
self.assertTrue(archive_fname)
tuf.formats.RELPATH_SCHEMA.check_match(archive_fname)
def test_62_get_partial_metadata_fname(self):
fname = TestPrimary.instance.get_partial_metadata_fname()
self.assertTrue(fname)
tuf.formats.RELPATH_SCHEMA.check_match(fname)
def test_65_get_metadata_for_ecu(self):
pass
def test_70_get_last_timeserver_attestation(self):
attestation = TestPrimary.instance.get_last_timeserver_attestation()
self.assertIsNotNone(attestation)
if tuf.conf.METADATA_FORMAT == 'der':
uptane.formats.DER_DATA_SCHEMA.check_match(attestation)
else:
assert tuf.conf.METADATA_FORMAT == 'json', 'Coding error in test.'
uptane.formats.SIGNABLE_TIMESERVER_ATTESTATION_SCHEMA.check_match(
attestation)
if __name__ == '__main__':
unittest.main()
| true
| true
|
f7087addb93a393392d9800dc38093aa66036065
| 10,705
|
py
|
Python
|
reegis/mobility.py
|
jnettels/reegis
|
fe50c124aa041b9faa494611cba6b833675115e4
|
[
"MIT"
] | null | null | null |
reegis/mobility.py
|
jnettels/reegis
|
fe50c124aa041b9faa494611cba6b833675115e4
|
[
"MIT"
] | null | null | null |
reegis/mobility.py
|
jnettels/reegis
|
fe50c124aa041b9faa494611cba6b833675115e4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Calculate the mobility demand.
SPDX-FileCopyrightText: 2016-2019 Uwe Krien <krien@uni-bremen.de>
SPDX-License-Identifier: MIT
"""
__copyright__ = "Uwe Krien <krien@uni-bremen.de>"
__license__ = "MIT"
import os
import pandas as pd
from collections import namedtuple
from reegis import geometries, config as cfg, tools, energy_balance
def format_kba_table(filename, sheet):
"""
Clean the layout of the table.
The tables are made for human readability and not for automatic processing.
Lines with subtotals and format-strings of the column names are removed.
A valid MultiIndex is created to make it easier to filter the table by the
index.
Parameters
----------
filename : str
Path and name of the excel file.
sheet : str
Name of the sheet of the excel table.
Returns
-------
pandas.DataFrame
"""
# Read table
df = pd.read_excel(filename, sheet, skiprows=7, header=[0, 1])
# Drop empty column
df = df.drop([("Unnamed: 0_level_0", "Unnamed: 0_level_1")], axis=1)
idx1 = df.columns[0]
idx2 = df.columns[1]
idx3 = df.columns[2]
# Remove lines with subtotal
df.loc[(df[idx1] == "SONSTIGE"), idx2] = "SONSTIGE"
df.loc[(df[idx1] == "SONSTIGE"), idx3] = "00000 SONSTIGE"
df = df.drop(df.loc[df[idx3].isnull()].index)
df[df.columns[[0, 1, 2]]] = df[df.columns[[0, 1, 2]]].fillna(
method="ffill"
)
# Add column with name of subregion and remove name from index
df[df.columns[2]] = df[df.columns[2]].str[:5]
# set MultiIndex
df.set_index(list(df.columns[[0, 1, 2]]), inplace=True)
df.index = df.index.set_names(["state", "region", "subregion"])
# Remove format-strings from column names
level1 = (
df.columns.get_level_values(1)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
level0 = (
df.columns.get_level_values(0)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
df.columns = pd.MultiIndex.from_arrays([level0, level1])
return df
def get_kba_table():
"""
Get the "kfz" table for all vehicles and the "pkw" table for more
statistics about passenger cars.
Returns
-------
namedtuple
Examples
--------
>>> table = get_kba_table()
>>> kfz = table.kfz
>>> print(type(kfz))
<class 'pandas.core.frame.DataFrame'>
"""
kba_table = namedtuple("kba_table", "kfz pkw")
kba_filename = os.path.join(
cfg.get("paths", "general"), cfg.get("mobility", "table_kba")
)
# Download table if it does not exit
if not os.path.isfile(kba_filename):
tools.download_file(kba_filename, cfg.get("mobility", "url_kba"))
return kba_table(
kfz=format_kba_table(kba_filename, "Kfz_u_Kfz_Anh"),
pkw=format_kba_table(kba_filename, "Pkw"),
)
def get_mileage_table():
"""
Download mileage table from the KBA (Kraftfahrtbundesamt) and store it
locally.
"""
url = (
"https://www.kba.de/SharedDocs/Publikationen/DE/Statistik/"
"Kraftverkehr/VK/2018/vk_2018_xlsx.xlsx?__blob=publicationFile&v=22"
)
mileage_filename = os.path.join(
cfg.get("paths", "general"), "mileage_table_kba.xlsx"
)
# Download table if it does not exit
if not os.path.isfile(mileage_filename):
tools.download_file(mileage_filename, url)
return mileage_filename
def get_sheet_from_mileage_table(sheet):
"""Load given sheet from the mileage file."""
fn = get_mileage_table()
df = pd.read_excel(
fn, sheet, skiprows=7, index_col=[0, 1, 2], skipfooter=9
)
df.index = df.index.droplevel(0).set_names(["", ""])
return df.drop(
df.loc[pd.IndexSlice[slice(None), "Insgesamt"], slice(None)].index
)
def get_mileage_by_type_and_fuel(year=2018):
"""
Get mileage by type and fuel from mileage table and other sources.
See mobility.ini file for more information.
"""
# get km per year and type
total = (
get_sheet_from_mileage_table("VK 1.1")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
passenger = (
get_sheet_from_mileage_table("VK 1.7")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
small_trucks = (
get_sheet_from_mileage_table("VK 1.17")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
medium_trucks = (
get_sheet_from_mileage_table("VK 1.20")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
big_trucks_diesel = (
get_sheet_from_mileage_table("VK 1.23")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
.sum()
)
df = pd.DataFrame(index=total.index, columns=["diesel", "petrol", "other"])
vt_dict = cfg.get_dict("vehicle_types_dictionary")
df.rename(vt_dict, axis=0, inplace=True)
total.rename(vt_dict, axis=0, inplace=True)
dc = cfg.get_dict("fuel_dictionary")
# add km by fuel for passenger cars
df.loc["passenger car"] = passenger.rename(dc, axis=0)
# add km by fuel for small trucks (<= 3.5 tons)
df.loc["small truck (max. 3.5 tons)"] = small_trucks.rename(dc, axis=0)
# add km by fuel for medium trucks (3.5 < weight <= 7.5 tons)
df.loc["medium truck (3.5 to 7.5 tons)"] = medium_trucks.rename(dc, axis=0)
# add km by fuel for big trucks (> 7.5 tons)
# assuming that non-diesel engines are 50% petrol and 50% other
n = "big truck (over 7.5 tons)"
df.loc[n, "diesel"] = big_trucks_diesel
df.loc[n, ["petrol", "other"]] = (total[n] - big_trucks_diesel) / 2
fuel_share = pd.DataFrame(
cfg.get_dict_list("fuel share"), index=["diesel", "petrol", "other"]
).astype(float)
for col in fuel_share.columns:
df.loc[col] = fuel_share[col].mul(total[col])
return df
def create_grouped_table_kfz():
"""Group the kfz-table by main groups."""
df = get_kba_table().kfz
df.index = df.index.droplevel([0, 1])
df.columns = [" ".join(col).strip() for col in df.columns]
kfz_dict = cfg.get_dict("KFZ")
for col in df.columns:
df[col] = pd.to_numeric(df[col].replace("-", ""))
df = df.groupby(by=kfz_dict, axis=1).sum()
df["traction engine, general"] = (
df["traction engine"] - df["traction engine, agriculture and forestry"]
)
df.drop("traction engine", axis=1, inplace=True)
df.drop("ignore", axis=1, inplace=True)
return df
def create_grouped_table_pkw():
"""
Extract fuel groups of passenger cars
Examples
--------
>>> pkw = create_grouped_table_pkw()
>>> pkw['petrol'].sum()
31031021.0
>>> pkw['diesel'].sum()
15153364.0
"""
df = get_kba_table().pkw
df.index = df.index.droplevel([0, 1])
df = df["Nach Kraftstoffarten"]
df = df.groupby(by=cfg.get_dict("PKW"), axis=1).sum()
df.drop("ignore", axis=1, inplace=True)
return df
def get_admin_by_region(region):
"""
Allocate admin keys to the given regions.
Parameters
----------
region : geopandas.GeoDataFrame
Returns
-------
pd.DataFrame
"""
fn = os.path.join(cfg.get("paths", "geometry"), "vg1000_geodata.geojson")
vg = geometries.load(fullname=fn)
vg.set_index("RS", inplace=True)
reg2vg = geometries.spatial_join_with_buffer(
vg.representative_point(), region, "fs", limit=0
)
return pd.DataFrame(reg2vg.drop("geometry", axis=1))
def get_grouped_kfz_by_region(region):
"""
Get the main vehicle groups by region.
Parameters
----------
region : geopandas.GeoDataFrame
Returns
-------
pd.DataFrame
Examples
--------
>>> fs = geometries.get_federal_states_polygon()
>>> total = get_grouped_kfz_by_region(fs).sum()
>>> int(total["passenger car"])
47095784
>>> int(total["lorry, > 7500"])
295826
"""
df = create_grouped_table_kfz()
reg2vg = get_admin_by_region(region)
df2reg = df.merge(reg2vg, left_index=True, right_index=True, how="left")
df2reg["fs"] = df2reg["fs"].fillna("unknown")
return df2reg.groupby("fs").sum()
def get_traffic_fuel_energy(year):
"""
Parameters
----------
year : int
Returns
-------
Examples
--------
>>> fuel_energy = get_traffic_fuel_energy(2017)
>>> int(fuel_energy["Ottokraftstoffe"])
719580
>>> fuel_share = fuel_energy.div(fuel_energy.sum()) * 100
>>> round(fuel_share["Dieselkraftstoffe"], 1)
62.7
"""
fuel_energy = energy_balance.get_de_balance(year).loc["Straßenverkehr"]
fuel_energy = fuel_energy[fuel_energy != 0]
fuel_energy.drop(
["primär (gesamt)", "sekundär (gesamt)", "Row", "gesamt"], inplace=True
)
return fuel_energy
def calculate_mobility_energy_use(year):
"""
Parameters
----------
year
Returns
-------
Examples
--------
>>> mobility_balance = get_traffic_fuel_energy(2017)
>>> energy_use = calculate_mobility_energy_use(2017)
>>> p = "Petrol usage [TJ]"
>>> d = "Diesel usage [TJ]"
>>> o = "Overall fuel usage [TJ]"
>>> print(p, "(energy balance):", int(mobility_balance["Ottokraftstoffe"]))
Petrol usage [TJ] (energy balance): 719580
>>> print(p, "(calculated):", int(energy_use["petrol"].sum()))
Petrol usage [TJ] (calculated): 803603
>>> print(d, "(energy balance):",
... int(mobility_balance["Dieselkraftstoffe"]))
Diesel usage [TJ] (energy balance): 1425424
>>> print(d, "(calculated):", int(energy_use["diesel"].sum()))
Diesel usage [TJ] (calculated): 1636199
>>> print(o, "(energy balance):", int(mobility_balance.sum()))
Overall fuel usage [TJ] (energy balance): 2275143
>>> print(o, "(calculated):", int(energy_use.sum().sum()))
Overall fuel usage [TJ] (calculated): 2439803
"""
# fetch table of mileage by fuel and vehicle type
mileage = get_mileage_by_type_and_fuel(year)
# fetch table of specific demand by fuel and vehicle type (from 2011)
spec_demand = (
pd.DataFrame(
cfg.get_dict_list("fuel consumption"),
index=["diesel", "petrol", "other"],
)
.astype(float)
.transpose()
)
# fetch the energy content of the different fuel types
energy_content = pd.Series(cfg.get_dict("energy_per_liter"))[
["diesel", "petrol", "other"]
]
return mileage.mul(spec_demand).mul(energy_content) / 10 ** 6
if __name__ == "__main__":
pass
| 27.877604
| 79
| 0.616534
|
__copyright__ = "Uwe Krien <krien@uni-bremen.de>"
__license__ = "MIT"
import os
import pandas as pd
from collections import namedtuple
from reegis import geometries, config as cfg, tools, energy_balance
def format_kba_table(filename, sheet):
df = pd.read_excel(filename, sheet, skiprows=7, header=[0, 1])
df = df.drop([("Unnamed: 0_level_0", "Unnamed: 0_level_1")], axis=1)
idx1 = df.columns[0]
idx2 = df.columns[1]
idx3 = df.columns[2]
df.loc[(df[idx1] == "SONSTIGE"), idx2] = "SONSTIGE"
df.loc[(df[idx1] == "SONSTIGE"), idx3] = "00000 SONSTIGE"
df = df.drop(df.loc[df[idx3].isnull()].index)
df[df.columns[[0, 1, 2]]] = df[df.columns[[0, 1, 2]]].fillna(
method="ffill"
)
df[df.columns[2]] = df[df.columns[2]].str[:5]
df.set_index(list(df.columns[[0, 1, 2]]), inplace=True)
df.index = df.index.set_names(["state", "region", "subregion"])
level1 = (
df.columns.get_level_values(1)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
level0 = (
df.columns.get_level_values(0)
.str.replace("\n", " ")
.str.replace("- ", "")
.str.replace(":", "")
)
df.columns = pd.MultiIndex.from_arrays([level0, level1])
return df
def get_kba_table():
kba_table = namedtuple("kba_table", "kfz pkw")
kba_filename = os.path.join(
cfg.get("paths", "general"), cfg.get("mobility", "table_kba")
)
if not os.path.isfile(kba_filename):
tools.download_file(kba_filename, cfg.get("mobility", "url_kba"))
return kba_table(
kfz=format_kba_table(kba_filename, "Kfz_u_Kfz_Anh"),
pkw=format_kba_table(kba_filename, "Pkw"),
)
def get_mileage_table():
url = (
"https://www.kba.de/SharedDocs/Publikationen/DE/Statistik/"
"Kraftverkehr/VK/2018/vk_2018_xlsx.xlsx?__blob=publicationFile&v=22"
)
mileage_filename = os.path.join(
cfg.get("paths", "general"), "mileage_table_kba.xlsx"
)
if not os.path.isfile(mileage_filename):
tools.download_file(mileage_filename, url)
return mileage_filename
def get_sheet_from_mileage_table(sheet):
fn = get_mileage_table()
df = pd.read_excel(
fn, sheet, skiprows=7, index_col=[0, 1, 2], skipfooter=9
)
df.index = df.index.droplevel(0).set_names(["", ""])
return df.drop(
df.loc[pd.IndexSlice[slice(None), "Insgesamt"], slice(None)].index
)
def get_mileage_by_type_and_fuel(year=2018):
total = (
get_sheet_from_mileage_table("VK 1.1")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
passenger = (
get_sheet_from_mileage_table("VK 1.7")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
small_trucks = (
get_sheet_from_mileage_table("VK 1.17")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
medium_trucks = (
get_sheet_from_mileage_table("VK 1.20")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
)
big_trucks_diesel = (
get_sheet_from_mileage_table("VK 1.23")
.loc["Jahresfahrleistung in 1.000 km", str(year)]
.mul(1000)
.sum()
)
df = pd.DataFrame(index=total.index, columns=["diesel", "petrol", "other"])
vt_dict = cfg.get_dict("vehicle_types_dictionary")
df.rename(vt_dict, axis=0, inplace=True)
total.rename(vt_dict, axis=0, inplace=True)
dc = cfg.get_dict("fuel_dictionary")
df.loc["passenger car"] = passenger.rename(dc, axis=0)
df.loc["small truck (max. 3.5 tons)"] = small_trucks.rename(dc, axis=0)
df.loc["medium truck (3.5 to 7.5 tons)"] = medium_trucks.rename(dc, axis=0)
n = "big truck (over 7.5 tons)"
df.loc[n, "diesel"] = big_trucks_diesel
df.loc[n, ["petrol", "other"]] = (total[n] - big_trucks_diesel) / 2
fuel_share = pd.DataFrame(
cfg.get_dict_list("fuel share"), index=["diesel", "petrol", "other"]
).astype(float)
for col in fuel_share.columns:
df.loc[col] = fuel_share[col].mul(total[col])
return df
def create_grouped_table_kfz():
df = get_kba_table().kfz
df.index = df.index.droplevel([0, 1])
df.columns = [" ".join(col).strip() for col in df.columns]
kfz_dict = cfg.get_dict("KFZ")
for col in df.columns:
df[col] = pd.to_numeric(df[col].replace("-", ""))
df = df.groupby(by=kfz_dict, axis=1).sum()
df["traction engine, general"] = (
df["traction engine"] - df["traction engine, agriculture and forestry"]
)
df.drop("traction engine", axis=1, inplace=True)
df.drop("ignore", axis=1, inplace=True)
return df
def create_grouped_table_pkw():
df = get_kba_table().pkw
df.index = df.index.droplevel([0, 1])
df = df["Nach Kraftstoffarten"]
df = df.groupby(by=cfg.get_dict("PKW"), axis=1).sum()
df.drop("ignore", axis=1, inplace=True)
return df
def get_admin_by_region(region):
fn = os.path.join(cfg.get("paths", "geometry"), "vg1000_geodata.geojson")
vg = geometries.load(fullname=fn)
vg.set_index("RS", inplace=True)
reg2vg = geometries.spatial_join_with_buffer(
vg.representative_point(), region, "fs", limit=0
)
return pd.DataFrame(reg2vg.drop("geometry", axis=1))
def get_grouped_kfz_by_region(region):
df = create_grouped_table_kfz()
reg2vg = get_admin_by_region(region)
df2reg = df.merge(reg2vg, left_index=True, right_index=True, how="left")
df2reg["fs"] = df2reg["fs"].fillna("unknown")
return df2reg.groupby("fs").sum()
def get_traffic_fuel_energy(year):
fuel_energy = energy_balance.get_de_balance(year).loc["Straßenverkehr"]
fuel_energy = fuel_energy[fuel_energy != 0]
fuel_energy.drop(
["primär (gesamt)", "sekundär (gesamt)", "Row", "gesamt"], inplace=True
)
return fuel_energy
def calculate_mobility_energy_use(year):
mileage = get_mileage_by_type_and_fuel(year)
spec_demand = (
pd.DataFrame(
cfg.get_dict_list("fuel consumption"),
index=["diesel", "petrol", "other"],
)
.astype(float)
.transpose()
)
energy_content = pd.Series(cfg.get_dict("energy_per_liter"))[
["diesel", "petrol", "other"]
]
return mileage.mul(spec_demand).mul(energy_content) / 10 ** 6
if __name__ == "__main__":
pass
| true
| true
|
f7087b51d7ba26204dd3feb9b980fba59565ce46
| 5,841
|
py
|
Python
|
configs/body/2d_kpt_sview_rgb_img/associative_embedding/aic/higherhrnet_w32_aic_512x512.py
|
robertpreda/mmpose
|
42d00d7b5742ce89105e73dec1b72b4fea2cacde
|
[
"Apache-2.0"
] | 1,775
|
2020-07-10T01:20:01.000Z
|
2022-03-31T16:31:50.000Z
|
configs/body/2d_kpt_sview_rgb_img/associative_embedding/aic/higherhrnet_w32_aic_512x512.py
|
ly015/mmpose
|
2b4a5cf3197cb14832cb6b7ecb883c76084d7131
|
[
"Apache-2.0"
] | 1,021
|
2020-07-11T11:40:24.000Z
|
2022-03-31T14:32:26.000Z
|
configs/body/2d_kpt_sview_rgb_img/associative_embedding/aic/higherhrnet_w32_aic_512x512.py
|
ly015/mmpose
|
2b4a5cf3197cb14832cb6b7ecb883c76084d7131
|
[
"Apache-2.0"
] | 477
|
2020-07-11T11:27:51.000Z
|
2022-03-31T09:42:25.000Z
|
_base_ = ['../../../../_base_/datasets/aic.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=50)
evaluation = dict(interval=50, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='AssociativeEmbedding',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='AEHigherResolutionHead',
in_channels=32,
num_joints=14,
tag_per_joint=True,
extra=dict(final_conv_kernel=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False],
loss_keypoint=dict(
type='MultiLossFactory',
num_joints=14,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.01, 0.01],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0])),
train_cfg=dict(),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
align_corners=False,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/aic'
data = dict(
samples_per_gpu=24,
workers_per_gpu=2,
train=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_train.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
'keypoint_train_images_20170902/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| 28.773399
| 77
| 0.574046
|
_base_ = ['../../../../_base_/datasets/aic.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=50)
evaluation = dict(interval=50, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)
model = dict(
type='AssociativeEmbedding',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='AEHigherResolutionHead',
in_channels=32,
num_joints=14,
tag_per_joint=True,
extra=dict(final_conv_kernel=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False],
loss_keypoint=dict(
type='MultiLossFactory',
num_joints=14,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.01, 0.01],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0])),
train_cfg=dict(),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
align_corners=False,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True))
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/aic'
data = dict(
samples_per_gpu=24,
workers_per_gpu=2,
train=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_train.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
'keypoint_train_images_20170902/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=test_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| true
| true
|
f7087ca213f3e2731d33f44af33711a2e82b380c
| 386
|
py
|
Python
|
KOMORANPy/training/trainer.py
|
shineware/KOMORANPy
|
b8c1904b42a0bdfcd26c4c85cb37cd8cb48ffb6a
|
[
"Apache-2.0"
] | 2
|
2021-07-02T04:41:03.000Z
|
2021-12-08T10:26:20.000Z
|
KOMORANPy/training/trainer.py
|
shineware/KOMORANPy
|
b8c1904b42a0bdfcd26c4c85cb37cd8cb48ffb6a
|
[
"Apache-2.0"
] | 1
|
2021-08-24T16:09:00.000Z
|
2021-08-24T16:09:00.000Z
|
KOMORANPy/training/trainer.py
|
shineware/KOMORANPy
|
b8c1904b42a0bdfcd26c4c85cb37cd8cb48ffb6a
|
[
"Apache-2.0"
] | 1
|
2021-07-25T10:35:56.000Z
|
2021-07-25T10:35:56.000Z
|
from KOMORANPy.training.model_builder import ModelBuilder
# corpus_builder = CorpusBuilder()
# # todo : 트레이닝 데이터 위치 ( 실제로는 바이너리 파일만 제공 될 예정 )
# corpus_builder.build_path("/Users/shinjunsoo/shineware/data/komoran_training_data", ".refine.txt")
# corpus_builder.save("corpus_build")
model_builder = ModelBuilder()
model_builder.build_path("corpus_build")
model_builder.save("../model")
| 35.090909
| 100
| 0.782383
|
from KOMORANPy.training.model_builder import ModelBuilder
er.build_path("corpus_build")
model_builder.save("../model")
| true
| true
|
f7087ce4c035b1fb1de188116f3653f00a4e8ccb
| 581
|
py
|
Python
|
web/project/settings/production.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
web/project/settings/production.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
web/project/settings/production.py
|
borzunov/django-forum
|
37ee43327575e59a4f7e1fcaa9f3a1c0de08d2b3
|
[
"MIT"
] | null | null | null |
from .common import *
DEBUG = False
ALLOWED_HOSTS = [os.environ['HOST']]
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_PORT = int(os.environ['EMAIL_PORT'])
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
| 18.741935
| 55
| 0.580034
|
from .common import *
DEBUG = False
ALLOWED_HOSTS = [os.environ['HOST']]
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_PORT = int(os.environ['EMAIL_PORT'])
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
| true
| true
|
f7087d2d98dd995de906a86e6eeae12f9b9f9d50
| 2,840
|
py
|
Python
|
chembl_webresource_client/new_client.py
|
RowAnalytics/chembl_webresource_client
|
74dc4cb463a118cff8be949a3acf79f0d43e1625
|
[
"Apache-2.0"
] | 1
|
2019-08-06T02:14:02.000Z
|
2019-08-06T02:14:02.000Z
|
chembl_webresource_client/new_client.py
|
RowAnalytics/chembl_webresource_client
|
74dc4cb463a118cff8be949a3acf79f0d43e1625
|
[
"Apache-2.0"
] | null | null | null |
chembl_webresource_client/new_client.py
|
RowAnalytics/chembl_webresource_client
|
74dc4cb463a118cff8be949a3acf79f0d43e1625
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'mnowotka'
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
import requests_cache
from chembl_webresource_client.spore_client import Client, make_spore_function
from chembl_webresource_client.query_set import QuerySet
from chembl_webresource_client.query_set import Model
from chembl_webresource_client.settings import Settings
from easydict import EasyDict
#-----------------------------------------------------------------------------------------------------------------------
class NewClient(object):
pass
#-----------------------------------------------------------------------------------------------------------------------
def client_from_url(url, base_url=None):
"""Builds a client from an url
:param url: the url you want to get the SPORE schema from
:param session: the :class:`request.Session` instance to use. Defaults to
the requests module itself.
"""
res = requests.get(url)
if not res.ok:
raise Exception('Error getting schema from url {0} with status {1} and msg {2}'.format(url, res.status_code, res.text))
schema = res.json()
if 'base_url' not in schema:
if base_url:
schema['base_url'] = base_url
else:
parsed_url = urlparse(url)
schema['base_url'] = parsed_url.scheme + '://' + parsed_url.netloc + '/'
if not schema['base_url'].endswith('/'):
schema['base_url'] += '/'
client = NewClient()
client.description = EasyDict(schema)
client.official = False # TODO: change
keys = client.description.methods.keys()
for method, definition in [(m,d) for (m,d) in client.description.methods.items() if
(m.startswith('POST_') or m.startswith('GET_')) and m.endswith('_detail')]:
searchable = False
if method.replace('dispatch_detail', 'get_search') in keys:
searchable = True
name = definition['resource_name']
collection_name = definition['collection_name']
formats = [format for format in definition['formats'] if format not in ('jsonp', 'html')]
default_format = definition['default_format'].split('/')[-1]
if not name:
continue
model = Model(name, collection_name, formats, searchable)
qs = QuerySet(model=model)
if default_format != 'xml':
qs.set_format(default_format)
setattr(client, name, qs)
return client
#-----------------------------------------------------------------------------------------------------------------------
new_client = client_from_url(Settings.Instance().NEW_CLIENT_URL + '/spore')
#-----------------------------------------------------------------------------------------------------------------------
| 39.444444
| 127
| 0.549648
|
__author__ = 'mnowotka'
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import requests
import requests_cache
from chembl_webresource_client.spore_client import Client, make_spore_function
from chembl_webresource_client.query_set import QuerySet
from chembl_webresource_client.query_set import Model
from chembl_webresource_client.settings import Settings
from easydict import EasyDict
class NewClient(object):
pass
def client_from_url(url, base_url=None):
res = requests.get(url)
if not res.ok:
raise Exception('Error getting schema from url {0} with status {1} and msg {2}'.format(url, res.status_code, res.text))
schema = res.json()
if 'base_url' not in schema:
if base_url:
schema['base_url'] = base_url
else:
parsed_url = urlparse(url)
schema['base_url'] = parsed_url.scheme + '://' + parsed_url.netloc + '/'
if not schema['base_url'].endswith('/'):
schema['base_url'] += '/'
client = NewClient()
client.description = EasyDict(schema)
client.official = False
keys = client.description.methods.keys()
for method, definition in [(m,d) for (m,d) in client.description.methods.items() if
(m.startswith('POST_') or m.startswith('GET_')) and m.endswith('_detail')]:
searchable = False
if method.replace('dispatch_detail', 'get_search') in keys:
searchable = True
name = definition['resource_name']
collection_name = definition['collection_name']
formats = [format for format in definition['formats'] if format not in ('jsonp', 'html')]
default_format = definition['default_format'].split('/')[-1]
if not name:
continue
model = Model(name, collection_name, formats, searchable)
qs = QuerySet(model=model)
if default_format != 'xml':
qs.set_format(default_format)
setattr(client, name, qs)
return client
new_client = client_from_url(Settings.Instance().NEW_CLIENT_URL + '/spore')
| true
| true
|
f7087e3040b02607a04724ec8594d793d799f809
| 1,140
|
py
|
Python
|
app/platforms/country_map_update.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
app/platforms/country_map_update.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
app/platforms/country_map_update.py
|
QuittyMR/etlas-collector
|
0d2c444f1f0e125ee4accd425591c5468041e7f1
|
[
"MIT"
] | null | null | null |
import pickle
from appcore.services import Factory
from platforms.base_platform import BasePlatform
from platforms.helpers.mysql_connection import MysqlConnection
class CountryMapUpdate(BasePlatform):
API_URL = 'my.sql.server'
DB_SETTINGS = {
'hostname': API_URL,
'username': 'db_user',
'password': 'db_pass',
'db': 'db_schema',
'table': 'countries'
}
def _run(self):
country_map = self._fetch()
self._store(country_map)
return True
def _fetch(self):
self.update('pull', 'started')
with MysqlConnection(**self.DB_SETTINGS) as connection:
countries = connection.execute(
'select country_name, country_code from ' + self.DB_SETTINGS['table']
).fetchall()
self.update('pull', 'completed')
country_map = {country[0].lower(): country[1].lower() for country in countries}
return country_map
def _store(self, country_map):
self.update('store', 'attempted')
Factory().get_storage_client('redis').set('maps', record={'country': pickle.dumps(country_map)})
| 29.230769
| 104
| 0.636842
|
import pickle
from appcore.services import Factory
from platforms.base_platform import BasePlatform
from platforms.helpers.mysql_connection import MysqlConnection
class CountryMapUpdate(BasePlatform):
API_URL = 'my.sql.server'
DB_SETTINGS = {
'hostname': API_URL,
'username': 'db_user',
'password': 'db_pass',
'db': 'db_schema',
'table': 'countries'
}
def _run(self):
country_map = self._fetch()
self._store(country_map)
return True
def _fetch(self):
self.update('pull', 'started')
with MysqlConnection(**self.DB_SETTINGS) as connection:
countries = connection.execute(
'select country_name, country_code from ' + self.DB_SETTINGS['table']
).fetchall()
self.update('pull', 'completed')
country_map = {country[0].lower(): country[1].lower() for country in countries}
return country_map
def _store(self, country_map):
self.update('store', 'attempted')
Factory().get_storage_client('redis').set('maps', record={'country': pickle.dumps(country_map)})
| true
| true
|
f7087efe256dff2d6a4ec38f5d6ad75443d254a4
| 645
|
py
|
Python
|
DQM/L1TMonitorClient/python/L1EmulatorObjHfBitCountsQualityTests_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 3
|
2018-08-24T19:10:26.000Z
|
2019-02-19T11:45:32.000Z
|
DQM/L1TMonitorClient/python/L1EmulatorObjHfBitCountsQualityTests_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 7
|
2016-07-17T02:34:54.000Z
|
2019-08-13T07:58:37.000Z
|
DQM/L1TMonitorClient/python/L1EmulatorObjHfBitCountsQualityTests_cfi.py
|
NTrevisani/cmssw
|
a212a27526f34eb9507cf8b875c93896e6544781
|
[
"Apache-2.0"
] | 5
|
2018-08-21T16:37:52.000Z
|
2020-01-09T13:33:17.000Z
|
# quality tests for L1 HfBitCounts trigger objects
import FWCore.ParameterSet.Config as cms
l1EmulatorObjHfBitCountsQualityTests = cms.EDAnalyzer("QualityTester",
qtList=cms.untracked.FileInPath('DQM/L1TMonitorClient/data/L1EmulatorObjHfBitCountsQualityTests.xml'),
QualityTestPrescaler=cms.untracked.int32(1),
getQualityTestsFromFile=cms.untracked.bool(True),
testInEventloop=cms.untracked.bool(False),
qtestOnEndLumi=cms.untracked.bool(True),
qtestOnEndRun=cms.untracked.bool(True),
qtestOnEndJob=cms.untracked.bool(False),
reportThreshold=cms.untracked.string(""),
verboseQT=cms.untracked.bool(True)
)
| 37.941176
| 106
| 0.789147
|
import FWCore.ParameterSet.Config as cms
l1EmulatorObjHfBitCountsQualityTests = cms.EDAnalyzer("QualityTester",
qtList=cms.untracked.FileInPath('DQM/L1TMonitorClient/data/L1EmulatorObjHfBitCountsQualityTests.xml'),
QualityTestPrescaler=cms.untracked.int32(1),
getQualityTestsFromFile=cms.untracked.bool(True),
testInEventloop=cms.untracked.bool(False),
qtestOnEndLumi=cms.untracked.bool(True),
qtestOnEndRun=cms.untracked.bool(True),
qtestOnEndJob=cms.untracked.bool(False),
reportThreshold=cms.untracked.string(""),
verboseQT=cms.untracked.bool(True)
)
| true
| true
|
f7087f18674824aa39489d7b07d80db3e4b0e9b8
| 543
|
py
|
Python
|
tests/test_renderer.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 17
|
2021-07-29T08:26:08.000Z
|
2022-03-26T23:26:38.000Z
|
tests/test_renderer.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 37
|
2021-07-28T08:19:23.000Z
|
2021-09-24T17:31:07.000Z
|
tests/test_renderer.py
|
saeedou/adia
|
86dc0c96c9b0bd804dff208e91c71a1958df56b0
|
[
"MIT"
] | 3
|
2021-09-14T10:54:51.000Z
|
2022-01-04T15:37:35.000Z
|
from adia.sequence import Module
from adia.renderer import ModulePlan, ItemStartPlan, ItemEndPlan, LEFT, RIGHT
def test_moduleplan():
p = ModulePlan(Module('foo'))
assert repr(p) == 'ModulePlan: foo'
def test_itemplans():
class Item:
def __repr__(self):
return 'foo -> bar'
item = Item()
p = ItemStartPlan(item, Module('foo'), Module('bar'), RIGHT, 0)
assert repr(p) == '~~~> foo -> bar'
p = ItemEndPlan(item, Module('foo'), Module('bar'), LEFT, 0)
assert repr(p) == '<--- foo -> bar'
| 25.857143
| 77
| 0.609576
|
from adia.sequence import Module
from adia.renderer import ModulePlan, ItemStartPlan, ItemEndPlan, LEFT, RIGHT
def test_moduleplan():
p = ModulePlan(Module('foo'))
assert repr(p) == 'ModulePlan: foo'
def test_itemplans():
class Item:
def __repr__(self):
return 'foo -> bar'
item = Item()
p = ItemStartPlan(item, Module('foo'), Module('bar'), RIGHT, 0)
assert repr(p) == '~~~> foo -> bar'
p = ItemEndPlan(item, Module('foo'), Module('bar'), LEFT, 0)
assert repr(p) == '<--- foo -> bar'
| true
| true
|
f70880fce66d165efef7e7785145b657a31e1092
| 8,699
|
py
|
Python
|
tests/test_sflow.py
|
venkatmahalingam/sonic-swss
|
d9f28b64255db54310d3398119f13dfb3203f311
|
[
"Apache-2.0"
] | 1
|
2021-09-01T07:10:04.000Z
|
2021-09-01T07:10:04.000Z
|
tests/test_sflow.py
|
venkatmahalingam/sonic-swss
|
d9f28b64255db54310d3398119f13dfb3203f311
|
[
"Apache-2.0"
] | null | null | null |
tests/test_sflow.py
|
venkatmahalingam/sonic-swss
|
d9f28b64255db54310d3398119f13dfb3203f311
|
[
"Apache-2.0"
] | null | null | null |
import time
class TestSflow:
speed_rate_table = {
"400000": "400000",
"200000": "200000",
"100000": "100000",
"50000": "50000",
"40000": "40000",
"25000": "25000",
"10000": "10000",
"1000": "1000"
}
def setup_sflow(self, dvs):
self.adb = dvs.get_asic_db()
self.cdb = dvs.get_config_db()
self.cdb.create_entry("SFLOW", "global", {"admin_state": "up"})
def test_defaultGlobal(self, dvs, testlog):
self.setup_sflow(dvs)
# Verify that the session is up
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.update_entry("SFLOW", "global", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_globalAll(self, dvs, testlog):
self.setup_sflow(dvs)
# Verify that the session is up first
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
# Then shut down the session
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "up"})
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_InterfaceSet(self, dvs, testlog):
self.setup_sflow(dvs)
# Get the global session info as a baseline
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = ["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
fvs = self.adb.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
global_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
# Then create the interface session
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet0", session_params)
# Verify that the new interface session has been created and is different from the global one
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": global_session}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": "1000"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.create_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.create_entry("SFLOW", "global", {"admin_state": "down"})
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet0")
def test_defaultRate(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
port_oid = self.adb.port_name_map["Ethernet4"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet4")
def test_ConfigDel(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION_TABLE", "Ethernet0", session_params)
self.cdb.delete_entry("SFLOW_SESSION_TABLE", "Ethernet0")
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
def test_SamplingRatePortCfgUpdate(self, dvs, testlog):
'''
This test checks if the SflowMgr updates the sampling rate
1) When the Speed is Updated on the port and no local configuration has been given on the port
Eg:
config sflow enable
config interface speed Ethernet0 25000 (Let's suppose Original Speed for Ethernet0 is 100G)
show sflow interface | grep Ethernet0 (Should see a sampling rate of 25000 not 100000)
'''
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
#dvs.runcmd("portconfig -p {} -s {}".format("Ethernet0", "25000"))
self.cdb.update_entry("PORT", "Ethernet0", {'speed' : "25000"})
expected_fields = {"sample_rate": self.speed_rate_table["25000"]}
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet0", expected_fields)
def test_SamplingRateManualUpdate(self, dvs, testlog):
'''
This test checks if the SflowMgr updates the sampling rate
1) When the Cfg Sflow Table is updated with sampling rate by the user, this rate should not be impacted by Port Speed Changes
Eg:
config sflow enable
config sflow interface sample-rate Ethernet4 256
config interface Ethernet0 speed 25000 (Original Speed for Ethernet0 is 100G)
show sflow interface | grep Ethernet0 (Should see a sampling rate of 256 not 100000 or 25000
'''
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
session_params = {"admin_state": "up", "sample_rate": "256"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
self.cdb.wait_for_field_match("SFLOW_SESSION", "Ethernet4", session_params)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
self.cdb.update_entry("PORT", "Ethernet4", {'speed' : "25000"})
# The Check here is about the original value not getting changed.
# If some bug was to appear, let's give it some time to get noticed
time.sleep(1)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
def test_Teardown(self, dvs, testlog):
self.setup_sflow(dvs)
self.cdb.delete_entry("SFLOW", "global")
self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", 0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
| 45.784211
| 133
| 0.697092
|
import time
class TestSflow:
speed_rate_table = {
"400000": "400000",
"200000": "200000",
"100000": "100000",
"50000": "50000",
"40000": "40000",
"25000": "25000",
"10000": "10000",
"1000": "1000"
}
def setup_sflow(self, dvs):
self.adb = dvs.get_asic_db()
self.cdb = dvs.get_config_db()
self.cdb.create_entry("SFLOW", "global", {"admin_state": "up"})
def test_defaultGlobal(self, dvs, testlog):
self.setup_sflow(dvs)
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.update_entry("SFLOW", "global", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_globalAll(self, dvs, testlog):
self.setup_sflow(dvs)
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.update_entry("SFLOW_SESSION", "all", {"admin_state": "up"})
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
def test_InterfaceSet(self, dvs, testlog):
self.setup_sflow(dvs)
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = ["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
fvs = self.adb.wait_for_fields("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
global_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet0", session_params)
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": global_session}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": "1000"}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.create_entry("SFLOW_SESSION", "all", {"admin_state": "down"})
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.create_entry("SFLOW", "global", {"admin_state": "down"})
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "all")
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet0")
def test_defaultRate(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
port_oid = self.adb.port_name_map["Ethernet4"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
self.cdb.delete_entry("SFLOW_SESSION", "Ethernet4")
def test_ConfigDel(self, dvs, testlog):
self.setup_sflow(dvs)
session_params = {"admin_state": "up", "sample_rate": "1000"}
self.cdb.create_entry("SFLOW_SESSION_TABLE", "Ethernet0", session_params)
self.cdb.delete_entry("SFLOW_SESSION_TABLE", "Ethernet0")
port_oid = self.adb.port_name_map["Ethernet0"]
expected_fields = {"SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE": "oid:0x0"}
fvs = self.adb.wait_for_field_negative_match("ASIC_STATE:SAI_OBJECT_TYPE_PORT", port_oid, expected_fields)
sample_session = fvs["SAI_PORT_ATTR_INGRESS_SAMPLEPACKET_ENABLE"]
speed = fvs["SAI_PORT_ATTR_SPEED"]
rate = self.speed_rate_table.get(speed, None)
assert rate
expected_fields = {"SAI_SAMPLEPACKET_ATTR_SAMPLE_RATE": rate}
self.adb.wait_for_field_match("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", sample_session, expected_fields)
def test_SamplingRatePortCfgUpdate(self, dvs, testlog):
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
self.cdb.update_entry("PORT", "Ethernet0", {'speed' : "25000"})
expected_fields = {"sample_rate": self.speed_rate_table["25000"]}
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet0", expected_fields)
def test_SamplingRateManualUpdate(self, dvs, testlog):
self.setup_sflow(dvs)
appldb = dvs.get_app_db()
session_params = {"admin_state": "up", "sample_rate": "256"}
self.cdb.create_entry("SFLOW_SESSION", "Ethernet4", session_params)
self.cdb.wait_for_field_match("SFLOW_SESSION", "Ethernet4", session_params)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
self.cdb.update_entry("PORT", "Ethernet4", {'speed' : "25000"})
time.sleep(1)
appldb.wait_for_field_match("SFLOW_SESSION_TABLE", "Ethernet4", {"sample_rate": "256"})
def test_Teardown(self, dvs, testlog):
self.setup_sflow(dvs)
self.cdb.delete_entry("SFLOW", "global")
self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_SAMPLEPACKET", 0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down before retrying
def test_nonflaky_dummy():
pass
| true
| true
|
f708832eb4bf7df5624f7937c92fc996b2938f06
| 9,117
|
py
|
Python
|
command.py
|
vapier/git-repo
|
a2e1854e0015f3335959e08ee1aa817fcb8779d9
|
[
"Apache-2.0"
] | 1
|
2021-03-24T01:51:50.000Z
|
2021-03-24T01:51:50.000Z
|
command.py
|
vapier/git-repo
|
a2e1854e0015f3335959e08ee1aa817fcb8779d9
|
[
"Apache-2.0"
] | null | null | null |
command.py
|
vapier/git-repo
|
a2e1854e0015f3335959e08ee1aa817fcb8779d9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
# Number of projects to submit to a single worker process at a time.
# This number represents a tradeoff between the overhead of IPC and finer
# grained opportunity for parallelism. This particular value was chosen by
# iterating through powers of two until the overall performance no longer
# improved. The performance of this batch size is not a function of the
# number of cores on the system.
WORKER_BATCH_SIZE = 32
# How many jobs to run in parallel by default? This assumes the jobs are
# largely I/O bound and do not hit the network.
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
event_log = EventLog()
manifest = None
_optparse = None
# Whether this command supports running in parallel. If greater than 0,
# it is the number of parallel jobs to default to.
PARALLEL_JOBS = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
""" Set options from environment variables. """
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
# Get the user-set option value if any
opt_value = getattr(opts, opt_key)
# If the value is set, it means the user has passed it as a command
# line option, and we should use that. Otherwise we can try to set it
# with the value from the corresponding environment variable.
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
epilog = 'Run `repo help %s` to view the detailed manual.' % self.NAME
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
if self.PARALLEL_JOBS is not None:
p.add_option(
'-j', '--jobs',
type=int, default=self.PARALLEL_JOBS,
help='number of jobs to run in parallel (default: %s)' % self.PARALLEL_JOBS)
def _RegisteredEnvironmentOptions(self):
"""Get options that can be set from environment variables.
Return a dictionary mapping environment variable name
to option key name that it can override.
Example: {'REPO_MY_OPTION': 'my_option'}
Will allow the option with key value 'my_option' to be set
from the value in the environment variable named 'REPO_MY_OPTION'.
Note: This does not work properly for options that are explicitly
set to None by the user, or options that are defined with a
default value other than None.
"""
return {}
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def ValidateOptions(self, opt, args):
"""Validate the user options & arguments before executing.
This is meant to help break the code up into logical steps. Some tips:
* Use self.OptionParser.error to display CLI related errors.
* Adjust opt member defaults as makes sense.
* Adjust the args list, but do so inplace so the caller sees updates.
* Try to avoid updating self state. Leave that to Execute.
"""
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while (path and
path != oldpath and
path != manifest.topdir):
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
"""A list of projects that match the arguments.
"""
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
# We have to filter by manifest groups in case the requested project is
# checked out multiple times or differently based on them.
projects = [project for project in manifest.GetProjectsWithName(arg)
if project.MatchesGroups(groups)]
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# If it's not a derived project, update path->project mapping and
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError('%s (%s)' % (arg, project.relpath))
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, _opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, _opt):
return True
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
class GitcAvailableCommand(object):
"""Command that requires GITC to be available, but does
not require the local client to be a GITC client.
"""
class GitcClientCommand(object):
"""Command that requires the local client to be a GITC
client.
"""
| 31.546713
| 86
| 0.666338
|
import os
import optparse
import platform
import re
import sys
from event_log import EventLog
from error import NoSuchProjectError
from error import InvalidProjectGroupsError
WORKER_BATCH_SIZE = 32
DEFAULT_LOCAL_JOBS = min(os.cpu_count(), 8)
class Command(object):
common = False
event_log = EventLog()
manifest = None
_optparse = None
PARALLEL_JOBS = None
def WantPager(self, _opt):
return False
def ReadEnvironmentOptions(self, opts):
env_options = self._RegisteredEnvironmentOptions()
for env_key, opt_key in env_options.items():
opt_value = getattr(opts, opt_key)
if opt_value is not None:
continue
env_value = os.environ.get(env_key)
if env_value is not None:
setattr(opts, opt_key, env_value)
return opts
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
epilog = 'Run `repo help %s` to view the detailed manual.' % self.NAME
self._optparse = optparse.OptionParser(usage=usage, epilog=epilog)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
if self.PARALLEL_JOBS is not None:
p.add_option(
'-j', '--jobs',
type=int, default=self.PARALLEL_JOBS,
help='number of jobs to run in parallel (default: %s)' % self.PARALLEL_JOBS)
def _RegisteredEnvironmentOptions(self):
return {}
def Usage(self):
self.OptionParser.print_usage()
sys.exit(1)
def ValidateOptions(self, opt, args):
def Execute(self, opt, args):
raise NotImplementedError
def _ResetPathToProjectMap(self, projects):
self._by_path = dict((p.worktree, p) for p in projects)
def _UpdatePathToProjectMap(self, project):
self._by_path[project.worktree] = project
def _GetProjectByPath(self, manifest, path):
project = None
if os.path.exists(path):
oldpath = None
while (path and
path != oldpath and
path != manifest.topdir):
try:
project = self._by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project and path == manifest.topdir:
try:
project = self._by_path[path]
except KeyError:
pass
else:
try:
project = self._by_path[path]
except KeyError:
pass
return project
def GetProjects(self, args, manifest=None, groups='', missing_ok=False,
submodules_ok=False):
if not manifest:
manifest = self.manifest
all_projects_list = manifest.projects
result = []
mp = manifest.manifestProject
if not groups:
groups = manifest.GetGroupsStr()
groups = [x for x in re.split(r'[,\s]+', groups) if x]
if not args:
derived_projects = {}
for project in all_projects_list:
if submodules_ok or project.sync_s:
derived_projects.update((p.name, p)
for p in project.GetDerivedSubprojects())
all_projects_list.extend(derived_projects.values())
for project in all_projects_list:
if (missing_ok or project.Exists) and project.MatchesGroups(groups):
result.append(project)
else:
self._ResetPathToProjectMap(all_projects_list)
for arg in args:
projects = [project for project in manifest.GetProjectsWithName(arg)
if project.MatchesGroups(groups)]
if not projects:
path = os.path.abspath(arg).replace('\\', '/')
project = self._GetProjectByPath(manifest, path)
# search again, as arg might actually point to a derived subproject.
if (project and not project.Derived and (submodules_ok or
project.sync_s)):
search_again = False
for subproject in project.GetDerivedSubprojects():
self._UpdatePathToProjectMap(subproject)
search_again = True
if search_again:
project = self._GetProjectByPath(manifest, path) or project
if project:
projects = [project]
if not projects:
raise NoSuchProjectError(arg)
for project in projects:
if not missing_ok and not project.Exists:
raise NoSuchProjectError('%s (%s)' % (arg, project.relpath))
if not project.MatchesGroups(groups):
raise InvalidProjectGroupsError(arg)
result.extend(projects)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
def FindProjects(self, args, inverse=False):
result = []
patterns = [re.compile(r'%s' % a, re.IGNORECASE) for a in args]
for project in self.GetProjects(''):
for pattern in patterns:
match = pattern.search(project.name) or pattern.search(project.relpath)
if not inverse and match:
result.append(project)
break
if inverse and match:
break
else:
if inverse:
result.append(project)
result.sort(key=lambda project: project.relpath)
return result
class InteractiveCommand(Command):
def WantPager(self, _opt):
return False
class PagedCommand(Command):
def WantPager(self, _opt):
return True
class MirrorSafeCommand(object):
class GitcAvailableCommand(object):
class GitcClientCommand(object):
| true
| true
|
f708844850409e8b816eb505e866c0d4f24940d2
| 2,400
|
py
|
Python
|
tutorials/cdr/utils.py
|
eloriundo/snorkel
|
746374b94c1558357ecb5bc07927dcc453239b3e
|
[
"Apache-2.0"
] | 2
|
2019-01-08T02:30:35.000Z
|
2019-03-13T07:00:34.000Z
|
tutorials/cdr/utils.py
|
sduttap16/snorkel
|
bbbc1a38295d9411dbb792777e7d834865c0fd63
|
[
"Apache-2.0"
] | null | null | null |
tutorials/cdr/utils.py
|
sduttap16/snorkel
|
bbbc1a38295d9411dbb792777e7d834865c0fd63
|
[
"Apache-2.0"
] | 2
|
2018-12-01T17:10:01.000Z
|
2018-12-28T09:16:41.000Z
|
import bz2
from six.moves.cPickle import load
from string import punctuation
def offsets_to_token(left, right, offset_array, lemmas, punc=set(punctuation)):
token_start, token_end = None, None
for i, c in enumerate(offset_array):
if left >= c:
token_start = i
if c > right and token_end is None:
token_end = i
break
token_end = len(offset_array) - 1 if token_end is None else token_end
token_end = token_end - 1 if lemmas[token_end - 1] in punc else token_end
return range(token_start, token_end)
class CDRTagger(object):
def __init__(self, fname='data/unary_tags.pkl.bz2'):
with bz2.BZ2File(fname, 'rb') as f:
self.tag_dict = load(f)
def tag(self, parts):
pubmed_id, _, _, sent_start, sent_end = parts['stable_id'].split(':')
sent_start, sent_end = int(sent_start), int(sent_end)
tags = self.tag_dict.get(pubmed_id, {})
for tag in tags:
if not (sent_start <= tag[1] <= sent_end):
continue
offsets = [offset + sent_start for offset in parts['char_offsets']]
toks = offsets_to_token(tag[1], tag[2], offsets, parts['lemmas'])
for tok in toks:
ts = tag[0].split('|')
parts['entity_types'][tok] = ts[0]
parts['entity_cids'][tok] = ts[1]
return parts
class TaggerOneTagger(CDRTagger):
def __init__(self, fname_tags='data/taggerone_unary_tags_cdr.pkl.bz2',
fname_mesh='data/chem_dis_mesh_dicts.pkl.bz2'):
with bz2.BZ2File(fname_tags, 'rb') as f:
self.tag_dict = load(f)
with bz2.BZ2File(fname_mesh, 'rb') as f:
self.chem_mesh_dict, self.dis_mesh_dict = load(f)
def tag(self, parts):
parts = super(TaggerOneTagger, self).tag(parts)
for i, word in enumerate(parts['words']):
tag = parts['entity_types'][i]
if len(word) > 4 and tag is None:
wl = word.lower()
if wl in self.dis_mesh_dict:
parts['entity_types'][i] = 'Disease'
parts['entity_cids'][i] = self.dis_mesh_dict[wl]
elif wl in self.chem_mesh_dict:
parts['entity_types'][i] = 'Chemical'
parts['entity_cids'][i] = self.chem_mesh_dict[wl]
return parts
| 37.5
| 79
| 0.585417
|
import bz2
from six.moves.cPickle import load
from string import punctuation
def offsets_to_token(left, right, offset_array, lemmas, punc=set(punctuation)):
token_start, token_end = None, None
for i, c in enumerate(offset_array):
if left >= c:
token_start = i
if c > right and token_end is None:
token_end = i
break
token_end = len(offset_array) - 1 if token_end is None else token_end
token_end = token_end - 1 if lemmas[token_end - 1] in punc else token_end
return range(token_start, token_end)
class CDRTagger(object):
def __init__(self, fname='data/unary_tags.pkl.bz2'):
with bz2.BZ2File(fname, 'rb') as f:
self.tag_dict = load(f)
def tag(self, parts):
pubmed_id, _, _, sent_start, sent_end = parts['stable_id'].split(':')
sent_start, sent_end = int(sent_start), int(sent_end)
tags = self.tag_dict.get(pubmed_id, {})
for tag in tags:
if not (sent_start <= tag[1] <= sent_end):
continue
offsets = [offset + sent_start for offset in parts['char_offsets']]
toks = offsets_to_token(tag[1], tag[2], offsets, parts['lemmas'])
for tok in toks:
ts = tag[0].split('|')
parts['entity_types'][tok] = ts[0]
parts['entity_cids'][tok] = ts[1]
return parts
class TaggerOneTagger(CDRTagger):
def __init__(self, fname_tags='data/taggerone_unary_tags_cdr.pkl.bz2',
fname_mesh='data/chem_dis_mesh_dicts.pkl.bz2'):
with bz2.BZ2File(fname_tags, 'rb') as f:
self.tag_dict = load(f)
with bz2.BZ2File(fname_mesh, 'rb') as f:
self.chem_mesh_dict, self.dis_mesh_dict = load(f)
def tag(self, parts):
parts = super(TaggerOneTagger, self).tag(parts)
for i, word in enumerate(parts['words']):
tag = parts['entity_types'][i]
if len(word) > 4 and tag is None:
wl = word.lower()
if wl in self.dis_mesh_dict:
parts['entity_types'][i] = 'Disease'
parts['entity_cids'][i] = self.dis_mesh_dict[wl]
elif wl in self.chem_mesh_dict:
parts['entity_types'][i] = 'Chemical'
parts['entity_cids'][i] = self.chem_mesh_dict[wl]
return parts
| true
| true
|
f70884914a35420087f57ef29fe00e211e674b21
| 219
|
py
|
Python
|
misc/udp_sender2.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 18
|
2020-10-16T00:38:55.000Z
|
2022-03-03T06:01:49.000Z
|
misc/udp_sender2.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 20
|
2020-07-23T03:50:50.000Z
|
2021-11-09T04:00:26.000Z
|
misc/udp_sender2.py
|
RyanC1681/RCAI1122
|
c9683110b58c255a7a78d880ff73df7ff2329405
|
[
"Apache-2.0"
] | 140
|
2019-11-20T22:46:02.000Z
|
2022-03-29T13:26:17.000Z
|
import cv2
import numpy as np
import socket
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 12345
while True:
s.sendto(b'hello world', ("192.168.1.10", 8001))
| 21.9
| 56
| 0.6621
|
import cv2
import numpy as np
import socket
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = 12345
while True:
s.sendto(b'hello world', ("192.168.1.10", 8001))
| true
| true
|
f70885a0c1f0e264313599ed4882bcdc4fbd90cc
| 6,207
|
py
|
Python
|
qcfractal/services/service_util.py
|
dgasmith/QCFractal
|
137cb91d4409a1395273239a9df668a314a1914b
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/services/service_util.py
|
dgasmith/QCFractal
|
137cb91d4409a1395273239a9df668a314a1914b
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/services/service_util.py
|
dgasmith/QCFractal
|
137cb91d4409a1395273239a9df668a314a1914b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Utilities and base functions for Services.
"""
import abc
import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
from pydantic import validator
from qcelemental.models import ComputeError
from ..interface.models import ObjectId, ProtoModel
from ..interface.models.rest_models import TaskQueuePOSTBody
from ..interface.models.task_models import PriorityEnum
from ..procedures import get_procedure_parser
class TaskManager(ProtoModel):
storage_socket: Optional[Any] = None
logger: Optional[Any] = None
required_tasks: Dict[str, str] = {}
tag: Optional[str] = None
priority: PriorityEnum = PriorityEnum.HIGH
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def done(self) -> bool:
"""
Check if requested tasks are complete.
"""
if len(self.required_tasks) == 0:
return True
task_query = self.storage_socket.get_procedures(
id=list(self.required_tasks.values()), include=["status", "error"]
)
status_values = set(x["status"] for x in task_query["data"])
if status_values == {"COMPLETE"}:
return True
elif "ERROR" in status_values:
for x in task_query["data"]:
if x["status"] != "ERROR":
continue
self.logger.error("Error in service compute as follows:")
tasks = self.storage_socket.get_queue()["data"]
for x in tasks:
if "error" not in x:
continue
self.logger.error(x["error"]["error_message"])
raise KeyError("All tasks did not execute successfully.")
else:
return False
def get_tasks(self) -> Dict[str, Any]:
"""
Pulls currently held tasks.
"""
ret = {}
for k, id in self.required_tasks.items():
ret[k] = self.storage_socket.get_procedures(id=id)["data"][0]
return ret
def submit_tasks(self, procedure_type: str, tasks: Dict[str, Any]) -> bool:
"""
Submits new tasks to the queue and provides a waiter until there are done.
"""
procedure_parser = get_procedure_parser(procedure_type, self.storage_socket, self.logger)
required_tasks = {}
# Add in all new tasks
for key, packet in tasks.items():
packet["meta"].update({"tag": self.tag, "priority": self.priority})
# print("Check tag and priority:", packet)
packet = TaskQueuePOSTBody(**packet)
# Turn packet into a full task, if there are duplicates, get the ID
r = procedure_parser.submit_tasks(packet)
if len(r["meta"]["errors"]):
raise KeyError("Problem submitting task: {}.".format(errors))
# print("Submission:", r["data"])
required_tasks[key] = r["data"]["ids"][0]
self.required_tasks = required_tasks
return True
class BaseService(ProtoModel, abc.ABC):
# Excluded fields
storage_socket: Optional[Any]
logger: Optional[Any]
# Base identification
id: Optional[ObjectId] = None
hash_index: str
service: str
program: str
procedure: str
# Output data
output: Any
# Links
task_id: Optional[ObjectId] = None
procedure_id: Optional[ObjectId] = None
# Task manager
task_tag: Optional[str] = None
task_priority: PriorityEnum
task_manager: TaskManager = TaskManager()
status: str = "WAITING"
error: Optional[ComputeError] = None
tag: Optional[str] = None
# Sorting and priority
priority: PriorityEnum = PriorityEnum.NORMAL
modified_on: datetime.datetime = None
created_on: datetime.datetime = None
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def __init__(self, **data):
dt = datetime.datetime.utcnow()
data.setdefault("modified_on", dt)
data.setdefault("created_on", dt)
super().__init__(**data)
self.task_manager.logger = self.logger
self.task_manager.storage_socket = self.storage_socket
self.task_manager.tag = self.task_tag
self.task_manager.priority = self.task_priority
@validator("task_priority", pre=True)
def munge_priority(cls, v):
if isinstance(v, str):
v = PriorityEnum[v.upper()]
elif v is None:
v = PriorityEnum.HIGH
return v
@classmethod
@abc.abstractmethod
def initialize_from_api(cls, storage_socket, meta, molecule, tag=None, priority=None):
"""
Initalizes a Service from the API.
"""
@abc.abstractmethod
def iterate(self):
"""
Takes a "step" of the service. Should return False if not finished.
"""
def expand_ndimensional_grid(
dimensions: Tuple[int, ...], seeds: Set[Tuple[int, ...]], complete: Set[Tuple[int, ...]]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
"""
Expands an n-dimensional key/value grid.
Example
-------
>>> expand_ndimensional_grid((3, 3), {(1, 1)}, set())
[((1, 1), (0, 1)), ((1, 1), (2, 1)), ((1, 1), (1, 0)), ((1, 1), (1, 2))]
"""
dimensions = tuple(dimensions)
compute = set()
connections = []
for d in range(len(dimensions)):
# Loop over all compute seeds
for seed in seeds:
# Iterate both directions
for disp in [-1, 1]:
new_dim = seed[d] + disp
# Bound check
if new_dim >= dimensions[d]:
continue
if new_dim < 0:
continue
new = list(seed)
new[d] = new_dim
new = tuple(new)
# Push out duplicates from both new compute and copmlete
if new in compute:
continue
if new in complete:
continue
compute |= {new}
connections.append((seed, new))
return connections
| 28.213636
| 97
| 0.585629
|
import abc
import datetime
from typing import Any, Dict, List, Optional, Set, Tuple
from pydantic import validator
from qcelemental.models import ComputeError
from ..interface.models import ObjectId, ProtoModel
from ..interface.models.rest_models import TaskQueuePOSTBody
from ..interface.models.task_models import PriorityEnum
from ..procedures import get_procedure_parser
class TaskManager(ProtoModel):
storage_socket: Optional[Any] = None
logger: Optional[Any] = None
required_tasks: Dict[str, str] = {}
tag: Optional[str] = None
priority: PriorityEnum = PriorityEnum.HIGH
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def done(self) -> bool:
if len(self.required_tasks) == 0:
return True
task_query = self.storage_socket.get_procedures(
id=list(self.required_tasks.values()), include=["status", "error"]
)
status_values = set(x["status"] for x in task_query["data"])
if status_values == {"COMPLETE"}:
return True
elif "ERROR" in status_values:
for x in task_query["data"]:
if x["status"] != "ERROR":
continue
self.logger.error("Error in service compute as follows:")
tasks = self.storage_socket.get_queue()["data"]
for x in tasks:
if "error" not in x:
continue
self.logger.error(x["error"]["error_message"])
raise KeyError("All tasks did not execute successfully.")
else:
return False
def get_tasks(self) -> Dict[str, Any]:
ret = {}
for k, id in self.required_tasks.items():
ret[k] = self.storage_socket.get_procedures(id=id)["data"][0]
return ret
def submit_tasks(self, procedure_type: str, tasks: Dict[str, Any]) -> bool:
procedure_parser = get_procedure_parser(procedure_type, self.storage_socket, self.logger)
required_tasks = {}
for key, packet in tasks.items():
packet["meta"].update({"tag": self.tag, "priority": self.priority})
packet = TaskQueuePOSTBody(**packet)
r = procedure_parser.submit_tasks(packet)
if len(r["meta"]["errors"]):
raise KeyError("Problem submitting task: {}.".format(errors))
required_tasks[key] = r["data"]["ids"][0]
self.required_tasks = required_tasks
return True
class BaseService(ProtoModel, abc.ABC):
storage_socket: Optional[Any]
logger: Optional[Any]
id: Optional[ObjectId] = None
hash_index: str
service: str
program: str
procedure: str
output: Any
task_id: Optional[ObjectId] = None
procedure_id: Optional[ObjectId] = None
task_tag: Optional[str] = None
task_priority: PriorityEnum
task_manager: TaskManager = TaskManager()
status: str = "WAITING"
error: Optional[ComputeError] = None
tag: Optional[str] = None
priority: PriorityEnum = PriorityEnum.NORMAL
modified_on: datetime.datetime = None
created_on: datetime.datetime = None
class Config(ProtoModel.Config):
allow_mutation = True
serialize_default_excludes = {"storage_socket", "logger"}
def __init__(self, **data):
dt = datetime.datetime.utcnow()
data.setdefault("modified_on", dt)
data.setdefault("created_on", dt)
super().__init__(**data)
self.task_manager.logger = self.logger
self.task_manager.storage_socket = self.storage_socket
self.task_manager.tag = self.task_tag
self.task_manager.priority = self.task_priority
@validator("task_priority", pre=True)
def munge_priority(cls, v):
if isinstance(v, str):
v = PriorityEnum[v.upper()]
elif v is None:
v = PriorityEnum.HIGH
return v
@classmethod
@abc.abstractmethod
def initialize_from_api(cls, storage_socket, meta, molecule, tag=None, priority=None):
@abc.abstractmethod
def iterate(self):
def expand_ndimensional_grid(
dimensions: Tuple[int, ...], seeds: Set[Tuple[int, ...]], complete: Set[Tuple[int, ...]]
) -> List[Tuple[Tuple[int, ...], Tuple[int, ...]]]:
dimensions = tuple(dimensions)
compute = set()
connections = []
for d in range(len(dimensions)):
for seed in seeds:
for disp in [-1, 1]:
new_dim = seed[d] + disp
if new_dim >= dimensions[d]:
continue
if new_dim < 0:
continue
new = list(seed)
new[d] = new_dim
new = tuple(new)
if new in compute:
continue
if new in complete:
continue
compute |= {new}
connections.append((seed, new))
return connections
| true
| true
|
f70887653f4a99c859d4974b9a671614af60f65c
| 1,445
|
py
|
Python
|
nipype/interfaces/fsl/tests/test_auto_ImageMeants.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/fsl/tests/test_auto_ImageMeants.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
nipype/interfaces/fsl/tests/test_auto_ImageMeants.py
|
grlee77/nipype
|
73f3a733ac1b7d9b09ec32a387905a9302423b87
|
[
"BSD-3-Clause"
] | null | null | null |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import ImageMeants
def test_ImageMeants_inputs():
input_map = dict(args=dict(argstr='%s',
),
eig=dict(argstr='--eig',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-i %s',
mandatory=True,
position=0,
),
mask=dict(argstr='-m %s',
),
nobin=dict(argstr='--no_bin',
),
order=dict(argstr='--order=%d',
usedefault=True,
),
out_file=dict(argstr='-o %s',
genfile=True,
hash_files=False,
),
output_type=dict(),
show_all=dict(argstr='--showall',
),
spatial_coord=dict(argstr='-c %s',
),
terminal_output=dict(nohash=True,
),
transpose=dict(argstr='--transpose',
),
use_mm=dict(argstr='--usemm',
),
)
inputs = ImageMeants.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ImageMeants_outputs():
output_map = dict(out_file=dict(),
)
outputs = ImageMeants.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 24.913793
| 78
| 0.626298
|
from nipype.testing import assert_equal
from nipype.interfaces.fsl.utils import ImageMeants
def test_ImageMeants_inputs():
input_map = dict(args=dict(argstr='%s',
),
eig=dict(argstr='--eig',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-i %s',
mandatory=True,
position=0,
),
mask=dict(argstr='-m %s',
),
nobin=dict(argstr='--no_bin',
),
order=dict(argstr='--order=%d',
usedefault=True,
),
out_file=dict(argstr='-o %s',
genfile=True,
hash_files=False,
),
output_type=dict(),
show_all=dict(argstr='--showall',
),
spatial_coord=dict(argstr='-c %s',
),
terminal_output=dict(nohash=True,
),
transpose=dict(argstr='--transpose',
),
use_mm=dict(argstr='--usemm',
),
)
inputs = ImageMeants.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ImageMeants_outputs():
output_map = dict(out_file=dict(),
)
outputs = ImageMeants.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| true
| true
|
f70888cccef2dea4dc7409027873cec829ecaa0e
| 3,682
|
py
|
Python
|
backend/shopping-cart-service/add_to_cart.py
|
qingshui-hui/aws-serverless-shopping-cart
|
3838c981b02726e1ff7b504f1aa0f99b1ddf9b5a
|
[
"MIT-0"
] | null | null | null |
backend/shopping-cart-service/add_to_cart.py
|
qingshui-hui/aws-serverless-shopping-cart
|
3838c981b02726e1ff7b504f1aa0f99b1ddf9b5a
|
[
"MIT-0"
] | null | null | null |
backend/shopping-cart-service/add_to_cart.py
|
qingshui-hui/aws-serverless-shopping-cart
|
3838c981b02726e1ff7b504f1aa0f99b1ddf9b5a
|
[
"MIT-0"
] | null | null | null |
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from shared import (
NotFoundException,
generate_ttl,
get_cart_id,
get_headers,
get_user_sub,
)
from utils import get_product_from_external_service
logger = Logger()
tracer = Tracer()
metrics = Metrics()
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["TABLE_NAME"])
product_service_url = os.environ["PRODUCT_SERVICE_URL"]
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(log_event=True)
@tracer.capture_lambda_handler
def lambda_handler(event, context):
"""
Add a the provided quantity of a product to a cart. Where an item already exists in the cart, the quantities will
be summed.
"""
try:
request_payload = json.loads(event["body"])
except KeyError:
return {
"statusCode": 400,
"headers": get_headers(""),
"body": json.dumps({"message": "No Request payload"}),
}
product_id = request_payload["productId"]
quantity = request_payload.get("quantity", 1)
cart_id, _ = get_cart_id(event["headers"])
# Because this method can be called anonymously, we need to check there's a logged in user
user_sub = None
jwt_token = event["headers"].get("Authorization")
if jwt_token:
user_sub = get_user_sub(jwt_token)
try:
product = get_product_from_external_service(product_id)
logger.info("No product found with product_id: %s", product_id)
except NotFoundException:
return {
"statusCode": 404,
"headers": get_headers(cart_id=cart_id),
"body": json.dumps({"message": "product not found"}),
}
if user_sub:
logger.info("Authenticated user")
pk = f"user#{user_sub}"
ttl = generate_ttl(
7
) # Set a longer ttl for logged in users - we want to keep their cart for longer.
else:
logger.info("Unauthenticated user")
pk = f"cart#{cart_id}"
ttl = generate_ttl()
if int(quantity) < 0:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": ttl,
":productDetail": product,
":limit": abs(quantity),
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
# Prevent quantity less than 0
ConditionExpression="quantity >= :limit",
)
else:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": generate_ttl(),
":productDetail": product,
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
)
metrics.add_metric(name="CartUpdated", unit="Count", value=1)
return {
"statusCode": 200,
"headers": get_headers(cart_id),
"body": json.dumps(
{"productId": product_id, "message": "product added to cart"}
),
}
| 32.017391
| 117
| 0.591798
|
import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from shared import (
NotFoundException,
generate_ttl,
get_cart_id,
get_headers,
get_user_sub,
)
from utils import get_product_from_external_service
logger = Logger()
tracer = Tracer()
metrics = Metrics()
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(os.environ["TABLE_NAME"])
product_service_url = os.environ["PRODUCT_SERVICE_URL"]
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(log_event=True)
@tracer.capture_lambda_handler
def lambda_handler(event, context):
try:
request_payload = json.loads(event["body"])
except KeyError:
return {
"statusCode": 400,
"headers": get_headers(""),
"body": json.dumps({"message": "No Request payload"}),
}
product_id = request_payload["productId"]
quantity = request_payload.get("quantity", 1)
cart_id, _ = get_cart_id(event["headers"])
user_sub = None
jwt_token = event["headers"].get("Authorization")
if jwt_token:
user_sub = get_user_sub(jwt_token)
try:
product = get_product_from_external_service(product_id)
logger.info("No product found with product_id: %s", product_id)
except NotFoundException:
return {
"statusCode": 404,
"headers": get_headers(cart_id=cart_id),
"body": json.dumps({"message": "product not found"}),
}
if user_sub:
logger.info("Authenticated user")
pk = f"user#{user_sub}"
ttl = generate_ttl(
7
) # Set a longer ttl for logged in users - we want to keep their cart for longer.
else:
logger.info("Unauthenticated user")
pk = f"cart#{cart_id}"
ttl = generate_ttl()
if int(quantity) < 0:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": ttl,
":productDetail": product,
":limit": abs(quantity),
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
# Prevent quantity less than 0
ConditionExpression="quantity >= :limit",
)
else:
table.update_item(
Key={"pk": pk, "sk": f"product#{product_id}"},
ExpressionAttributeNames={
"#quantity": "quantity",
"#expirationTime": "expirationTime",
"#productDetail": "productDetail",
},
ExpressionAttributeValues={
":val": quantity,
":ttl": generate_ttl(),
":productDetail": product,
},
UpdateExpression="ADD #quantity :val SET #expirationTime = :ttl, #productDetail = :productDetail",
)
metrics.add_metric(name="CartUpdated", unit="Count", value=1)
return {
"statusCode": 200,
"headers": get_headers(cart_id),
"body": json.dumps(
{"productId": product_id, "message": "product added to cart"}
),
}
| true
| true
|
f70888fefa0932966b4e0cafeacfa5a514ce37b1
| 13,148
|
py
|
Python
|
src/config/api-server/vnc_cfg_api_server/tests/resources/test_sync_node_profile.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/vnc_cfg_api_server/tests/resources/test_sync_node_profile.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | null | null | null |
src/config/api-server/vnc_cfg_api_server/tests/resources/test_sync_node_profile.py
|
Dmitry-Eremeev/contrail-controller
|
1238bcff697981662225ec5a15bc4d3d2237ae93
|
[
"Apache-2.0"
] | null | null | null |
import logging
import pprint
from vnc_api.gen.resource_client import Card
from vnc_api.gen.resource_client import Hardware
from vnc_api.gen.resource_client import Node
from vnc_api.gen.resource_client import NodeProfile
from vnc_api.gen.resource_client import Port
from vnc_api.gen.resource_client import Tag
from vnc_api.gen.resource_xsd import BaremetalPortInfo
from vnc_api.gen.resource_xsd import InterfaceMapType
from vnc_api.gen.resource_xsd import LocalLinkConnection
from vnc_api.gen.resource_xsd import PortInfoType
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestNodeProfile(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestNodeProfile, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestNodeProfile, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
def print_node_profile(self, node_profile_uuid="", np_fq_name=[]):
if node_profile_uuid:
np_read = self.api.node_profile_read(id=node_profile_uuid)
elif np_fq_name:
np_read = self.api.node_profile_read(fq_name=np_fq_name)
else:
return
# hw_read = self.api.hardware_read(fq_name=["test-card1"])
# logger.warn( pprint.pformat(hw_read.__dict__))
logger.warn("============ Node Profile Dict ===================")
logger.warn(pprint.pformat(np_read.__dict__))
hw_refs = np_read.get_hardware_refs()
for hw_ref in hw_refs:
hw_obj = self.api.hardware_read(id=hw_ref.get('uuid'))
logger.warn(pprint.pformat(hw_obj.__dict__))
card_refs = hw_obj.get_card_refs()
for card_ref in card_refs:
card_obj = self.api.card_read(id=card_ref.get('uuid'))
logger.warn(pprint.pformat(card_obj.__dict__))
port_map = card_obj.get_interface_map()
port_info = port_map.get_port_info()
for port in port_info:
logger.warn("============== Port Info =================")
logger.warn(pprint.pformat(port))
def create_node_and_port(self, node_and_port):
for node in node_and_port:
node_obj = Node(node, node_hostname=node)
self.api.node_create(node_obj)
for port in node_and_port[node]:
logger.warn(port['name'])
ll_obj = None
if port.get('sw_name') and port.get('port_id'):
ll_obj = LocalLinkConnection(
switch_info=port.get('sw_name'),
port_id=port.get('port_id'))
bm_info = BaremetalPortInfo(address=port.get('address'),
local_link_connection=ll_obj)
node_port_obj = Port(port.get('name'),
node_obj,
bms_port_info=bm_info)
self.api.port_create(node_port_obj)
def remove_node_and_port(self, node_and_port):
logger.warn("Removing Node and Port")
for node in node_and_port:
logger.warn("Removing Node ")
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn('DELETING Port-Group : ' + str(pg['fq_name'][-1]))
self.api.port_group_delete(fq_name=pg['fq_name'])
for port in node_and_port[node]:
logger.warn("Removing Port " + port.get('name'))
self.api.port_delete(fq_name=['default-global-system-config',
node, port.get('name')])
logger.warn("PORT : " + port.get('name'))
self.api.node_delete(fq_name=['default-global-system-config',
node])
logger.warn("NODE: " + node)
return
def create_tags(self):
tag_list = {
'provisioning': {'tag_type_name': 'label'},
'tenant': {'tag_type_name': 'label'},
'tenant1': {'tag_type_name': 'label'},
'tenant2': {'tag_type_name': 'label'},
'tenant3': {'tag_type_name': 'label'},
'provisioning1': {'tag_type_name': 'label'},
'control-data1': {'tag_type_name': 'label'},
'control-data': {'tag_type_name': 'label'}}
for tag in tag_list:
tag_obj = Tag(tag_type_name=tag_list[tag]['tag_type_name'],
tag_value=tag)
self.api.tag_create(tag_obj)
tag_read_obj = self.api.tag_read(id=tag_obj.uuid)
logger.warn("TAGS %s", pprint.pformat(tag_read_obj.__dict__))
def create_node_profile(self, node_profile_data):
for np in node_profile_data:
hardware = node_profile_data[np]['hardware']
interface_map = hardware['card']['interface-map']
ifmap_list = []
for iface in interface_map:
logger.warn(iface)
logger.warn(pprint.pformat(interface_map[iface]))
port_info = PortInfoType(
name=iface,
type="xe",
port_speed=interface_map[iface].get('port_speed'),
labels=interface_map[iface].get('labels'),
port_group=interface_map[iface].get('port_group'))
ifmap_list.append(port_info)
iface_map = InterfaceMapType(port_info=ifmap_list)
logger.warn("PORT-MPA %s", pprint.pformat(iface_map.__dict__))
card_obj = Card(hardware['card'].get('name'),
interface_map=iface_map)
self.api.card_create(card_obj)
hw_obj = Hardware(hardware.get('name'))
hw_obj.add_card(card_obj)
self.api.hardware_create(hw_obj)
node_profile_obj = NodeProfile(
np,
node_profile_vendor=node_profile_data[np].get(
'node_profile_vendor'),
node_profile_device_family=node_profile_data[np].get(
'node_profile_device_family'))
node_profile_obj.add_hardware(hw_obj)
self.api.node_profile_create(node_profile_obj)
self.print_node_profile(node_profile_uuid=node_profile_obj.uuid)
return
def test_create_node_profile(self):
"""Test node-profile association with Node.
create node (node1), and ports.
create node-profiles qfx1-np and qfx2-np
create tags to be used
associate node with qfx1-np, now node-ports should
ref to tags from node-profile.
assoicate node with qfx2-np, now node-ports should
ref to new tags from node-profile.
remove ref from node, tags from node-ports should
be removed.
remove ports and node, there should not be any error.
"""
node_and_port = {
'node1':
[{'name': 'eth0',
'address': "11:22:33:44:55:55",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/0'},
{'name': 'eth1',
'address': "11:22:33:44:55:56",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/1'},
{'name': 'eth2',
'address': "11:22:33:44:55:57",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/2'}]}
node_profile_data = {
'qfx1-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw1',
'card': {
'name': 'card1',
'interface-map': {
'eth0': {
'labels': ["provisioning", "tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth2': {
'labels': ["provisioning",
"tenant",
"control-data"],
'port_speed': '10G'
}
}
}
}
}
}
node_profile_data1 = {
'qfx2-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw2',
'card': {
'name': 'card2',
'interface-map': {
'eth0': {
'labels': [
"provisioning1",
"tenant1"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant2"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth2': {
'labels': [
"provisioning1",
"tenant3",
"control-data1"],
'port_speed': '10G'
}
}
}
}
}
}
self.create_tags()
self.create_node_profile(node_profile_data)
self.create_node_profile(node_profile_data1)
self.create_node_and_port(node_and_port)
node_object = self.api.node_read(
fq_name=['default-global-system-config', 'node1'])
np_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx1-np'])
np2_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx2-np'])
logger.warn(pprint.pformat(node_object.__dict__))
node_object.set_node_profile(np_object)
self.api.node_update(node_object)
node_object.set_node_profile(np2_object)
self.api.node_update(node_object)
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn(pprint.pformat(port_obj.__dict__))
self.api.ref_update('node',
node_object.uuid,
'node-profile',
np2_object.uuid,
['default-global-system-config', 'qfx2-np'],
'DELETE')
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn("==============")
logger.warn(pprint.pformat(port_obj.__dict__))
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn('Port-Groups Printing ==============')
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn("==============")
pg_obj = self.api.port_group_read(fq_name=pg['fq_name'])
logger.warn(pprint.pformat(pg_obj.__dict__))
self.remove_node_and_port(node_and_port)
logger.warn('PASS - NodeProfile Created')
| 42.688312
| 78
| 0.5054
|
import logging
import pprint
from vnc_api.gen.resource_client import Card
from vnc_api.gen.resource_client import Hardware
from vnc_api.gen.resource_client import Node
from vnc_api.gen.resource_client import NodeProfile
from vnc_api.gen.resource_client import Port
from vnc_api.gen.resource_client import Tag
from vnc_api.gen.resource_xsd import BaremetalPortInfo
from vnc_api.gen.resource_xsd import InterfaceMapType
from vnc_api.gen.resource_xsd import LocalLinkConnection
from vnc_api.gen.resource_xsd import PortInfoType
from vnc_cfg_api_server.tests import test_case
logger = logging.getLogger(__name__)
class TestNodeProfile(test_case.ApiServerTestCase):
@classmethod
def setUpClass(cls, *args, **kwargs):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
super(TestNodeProfile, cls).setUpClass(*args, **kwargs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestNodeProfile, cls).tearDownClass(*args, **kwargs)
@property
def api(self):
return self._vnc_lib
def print_node_profile(self, node_profile_uuid="", np_fq_name=[]):
if node_profile_uuid:
np_read = self.api.node_profile_read(id=node_profile_uuid)
elif np_fq_name:
np_read = self.api.node_profile_read(fq_name=np_fq_name)
else:
return
logger.warn("============ Node Profile Dict ===================")
logger.warn(pprint.pformat(np_read.__dict__))
hw_refs = np_read.get_hardware_refs()
for hw_ref in hw_refs:
hw_obj = self.api.hardware_read(id=hw_ref.get('uuid'))
logger.warn(pprint.pformat(hw_obj.__dict__))
card_refs = hw_obj.get_card_refs()
for card_ref in card_refs:
card_obj = self.api.card_read(id=card_ref.get('uuid'))
logger.warn(pprint.pformat(card_obj.__dict__))
port_map = card_obj.get_interface_map()
port_info = port_map.get_port_info()
for port in port_info:
logger.warn("============== Port Info =================")
logger.warn(pprint.pformat(port))
def create_node_and_port(self, node_and_port):
for node in node_and_port:
node_obj = Node(node, node_hostname=node)
self.api.node_create(node_obj)
for port in node_and_port[node]:
logger.warn(port['name'])
ll_obj = None
if port.get('sw_name') and port.get('port_id'):
ll_obj = LocalLinkConnection(
switch_info=port.get('sw_name'),
port_id=port.get('port_id'))
bm_info = BaremetalPortInfo(address=port.get('address'),
local_link_connection=ll_obj)
node_port_obj = Port(port.get('name'),
node_obj,
bms_port_info=bm_info)
self.api.port_create(node_port_obj)
def remove_node_and_port(self, node_and_port):
logger.warn("Removing Node and Port")
for node in node_and_port:
logger.warn("Removing Node ")
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn('DELETING Port-Group : ' + str(pg['fq_name'][-1]))
self.api.port_group_delete(fq_name=pg['fq_name'])
for port in node_and_port[node]:
logger.warn("Removing Port " + port.get('name'))
self.api.port_delete(fq_name=['default-global-system-config',
node, port.get('name')])
logger.warn("PORT : " + port.get('name'))
self.api.node_delete(fq_name=['default-global-system-config',
node])
logger.warn("NODE: " + node)
return
def create_tags(self):
tag_list = {
'provisioning': {'tag_type_name': 'label'},
'tenant': {'tag_type_name': 'label'},
'tenant1': {'tag_type_name': 'label'},
'tenant2': {'tag_type_name': 'label'},
'tenant3': {'tag_type_name': 'label'},
'provisioning1': {'tag_type_name': 'label'},
'control-data1': {'tag_type_name': 'label'},
'control-data': {'tag_type_name': 'label'}}
for tag in tag_list:
tag_obj = Tag(tag_type_name=tag_list[tag]['tag_type_name'],
tag_value=tag)
self.api.tag_create(tag_obj)
tag_read_obj = self.api.tag_read(id=tag_obj.uuid)
logger.warn("TAGS %s", pprint.pformat(tag_read_obj.__dict__))
def create_node_profile(self, node_profile_data):
for np in node_profile_data:
hardware = node_profile_data[np]['hardware']
interface_map = hardware['card']['interface-map']
ifmap_list = []
for iface in interface_map:
logger.warn(iface)
logger.warn(pprint.pformat(interface_map[iface]))
port_info = PortInfoType(
name=iface,
type="xe",
port_speed=interface_map[iface].get('port_speed'),
labels=interface_map[iface].get('labels'),
port_group=interface_map[iface].get('port_group'))
ifmap_list.append(port_info)
iface_map = InterfaceMapType(port_info=ifmap_list)
logger.warn("PORT-MPA %s", pprint.pformat(iface_map.__dict__))
card_obj = Card(hardware['card'].get('name'),
interface_map=iface_map)
self.api.card_create(card_obj)
hw_obj = Hardware(hardware.get('name'))
hw_obj.add_card(card_obj)
self.api.hardware_create(hw_obj)
node_profile_obj = NodeProfile(
np,
node_profile_vendor=node_profile_data[np].get(
'node_profile_vendor'),
node_profile_device_family=node_profile_data[np].get(
'node_profile_device_family'))
node_profile_obj.add_hardware(hw_obj)
self.api.node_profile_create(node_profile_obj)
self.print_node_profile(node_profile_uuid=node_profile_obj.uuid)
return
def test_create_node_profile(self):
node_and_port = {
'node1':
[{'name': 'eth0',
'address': "11:22:33:44:55:55",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/0'},
{'name': 'eth1',
'address': "11:22:33:44:55:56",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/1'},
{'name': 'eth2',
'address': "11:22:33:44:55:57",
'sw_name': 'unit_test_qfx1',
'port_id': 'xe-0/0/2'}]}
node_profile_data = {
'qfx1-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw1',
'card': {
'name': 'card1',
'interface-map': {
'eth0': {
'labels': ["provisioning", "tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant"],
'port_group': 'bond0',
'port_speed': '10G'
},
'eth2': {
'labels': ["provisioning",
"tenant",
"control-data"],
'port_speed': '10G'
}
}
}
}
}
}
node_profile_data1 = {
'qfx2-np': {
'node_profile_vendor': 'Juniper',
'node_profile_device_family': 'qfx',
'hardware': {
'name': 'hw2',
'card': {
'name': 'card2',
'interface-map': {
'eth0': {
'labels': [
"provisioning1",
"tenant1"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth1': {
'labels': ["tenant2"],
'port_group': 'bond1',
'port_speed': '10G'
},
'eth2': {
'labels': [
"provisioning1",
"tenant3",
"control-data1"],
'port_speed': '10G'
}
}
}
}
}
}
self.create_tags()
self.create_node_profile(node_profile_data)
self.create_node_profile(node_profile_data1)
self.create_node_and_port(node_and_port)
node_object = self.api.node_read(
fq_name=['default-global-system-config', 'node1'])
np_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx1-np'])
np2_object = self.api.node_profile_read(
fq_name=['default-global-system-config', 'qfx2-np'])
logger.warn(pprint.pformat(node_object.__dict__))
node_object.set_node_profile(np_object)
self.api.node_update(node_object)
node_object.set_node_profile(np2_object)
self.api.node_update(node_object)
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn(pprint.pformat(port_obj.__dict__))
self.api.ref_update('node',
node_object.uuid,
'node-profile',
np2_object.uuid,
['default-global-system-config', 'qfx2-np'],
'DELETE')
for node in node_and_port:
node_object_update = self.api.node_read(
fq_name=['default-global-system-config', node])
logger.warn(pprint.pformat(node_object_update.__dict__))
for port in node_and_port[node]:
port_obj = self.api.port_read(
fq_name=['default-global-system-config',
node,
port.get('name')])
logger.warn("==============")
logger.warn(pprint.pformat(port_obj.__dict__))
port_groups = self.api.port_groups_list(
parent_fq_name=['default-global-system-config', node])
logger.warn('Port-Groups Printing ==============')
logger.warn(pprint.pformat(port_groups))
for pg in port_groups['port-groups']:
logger.warn("==============")
pg_obj = self.api.port_group_read(fq_name=pg['fq_name'])
logger.warn(pprint.pformat(pg_obj.__dict__))
self.remove_node_and_port(node_and_port)
logger.warn('PASS - NodeProfile Created')
| true
| true
|
f70889aa8067984aad29790c5f57f13c36b50ae1
| 8,204
|
py
|
Python
|
CDAN-GD/pre_process.py
|
MoriZSJ/GVB
|
9b954660ef377ead81c8e631c4a0f4a17075b2ea
|
[
"MIT"
] | null | null | null |
CDAN-GD/pre_process.py
|
MoriZSJ/GVB
|
9b954660ef377ead81c8e631c4a0f4a17075b2ea
|
[
"MIT"
] | null | null | null |
CDAN-GD/pre_process.py
|
MoriZSJ/GVB
|
9b954660ef377ead81c8e631c4a0f4a17075b2ea
|
[
"MIT"
] | null | null | null |
import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
import numbers
import torch
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class RandomSizedCrop(object):
"""Crop the given PIL.Image to random size and aspect ratio.
A crop of random size of (0.08 to 1.0) of the original size and a random
aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
h_off = random.randint(0, img.shape[1]-self.size)
w_off = random.randint(0, img.shape[2]-self.size)
img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]
return img
class Normalize(object):
"""Normalize an tensor image with mean and standard deviation.
Given mean: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = channel - mean
Args:
mean (sequence): Sequence of means for R, G, B channels respecitvely.
"""
def __init__(self, mean=None, meanfile=None):
if mean:
self.mean = mean
else:
arr = np.load(meanfile)
self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
# TODO: make efficient
for t, m in zip(tensor, self.mean):
t.sub_(m)
return tensor
class PlaceCrop(object):
"""Crops the given PIL.Image at the particular index.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (w, h), a square crop (size, size) is
made.
"""
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class ForceFlip(object):
"""Horizontally flip the given PIL.Image randomly with a probability of 0.5."""
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be flipped.
Returns:
PIL.Image: Randomly flipped image.
"""
return img.transpose(Image.FLIP_LEFT_RIGHT)
class CenterCrop(object):
"""Crops the given PIL.Image at the center.
Args:
size (sequence or int): Desired output size of the crop. If size is an
int instead of sequence like (h, w), a square crop (size, size) is
made.
"""
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be cropped.
Returns:
PIL.Image: Cropped image.
"""
w, h = (img.shape[1], img.shape[2])
th, tw = self.size
w_off = int((w - tw) / 2.)
h_off = int((h - th) / 2.)
img = img[:, h_off:h_off+th, w_off:w_off+tw]
return img
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_target(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
def image_test_10crop(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
data_transforms = [
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
])
]
return data_transforms
| 31.43295
| 91
| 0.576548
|
import numpy as np
from torchvision import transforms
import os
from PIL import Image, ImageOps
import numbers
import torch
class ResizeImage():
def __init__(self, size):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
th, tw = self.size
return img.resize((th, tw))
class RandomSizedCrop(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
h_off = random.randint(0, img.shape[1]-self.size)
w_off = random.randint(0, img.shape[2]-self.size)
img = img[:, h_off:h_off+self.size, w_off:w_off+self.size]
return img
class Normalize(object):
def __init__(self, mean=None, meanfile=None):
if mean:
self.mean = mean
else:
arr = np.load(meanfile)
self.mean = torch.from_numpy(arr.astype('float32')/255.0)[[2, 1, 0], :, :]
def __call__(self, tensor):
for t, m in zip(tensor, self.mean):
t.sub_(m)
return tensor
class PlaceCrop(object):
def __init__(self, size, start_x, start_y):
if isinstance(size, int):
self.size = (int(size), int(size))
else:
self.size = size
self.start_x = start_x
self.start_y = start_y
def __call__(self, img):
th, tw = self.size
return img.crop((self.start_x, self.start_y, self.start_x + tw, self.start_y + th))
class ForceFlip(object):
def __call__(self, img):
return img.transpose(Image.FLIP_LEFT_RIGHT)
class CenterCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, img):
w, h = (img.shape[1], img.shape[2])
th, tw = self.size
w_off = int((w - tw) / 2.)
h_off = int((h - th) / 2.)
img = img[:, h_off:h_off+th, w_off:w_off+tw]
return img
def image_train(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_target(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.RandomCrop(crop_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
def image_test(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
return transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
def image_test_10crop(resize_size=256, crop_size=224, alexnet=False):
if not alexnet:
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
else:
normalize = Normalize(meanfile='./ilsvrc_2012_mean.npy')
start_first = 0
start_center = (resize_size - crop_size - 1) / 2
start_last = resize_size - crop_size - 1
data_transforms = [
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size), ForceFlip(),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_last, start_first),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_first, start_last),
transforms.ToTensor(),
normalize
]),
transforms.Compose([
ResizeImage(resize_size),
PlaceCrop(crop_size, start_center, start_center),
transforms.ToTensor(),
normalize
])
]
return data_transforms
| true
| true
|
f70889fb73d826435c3a78574b11a432ea63d154
| 17,638
|
py
|
Python
|
View/cadastro_fornecedor.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
View/cadastro_fornecedor.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
View/cadastro_fornecedor.py
|
felipezago/ControleEstoque
|
229659c4f9888fd01df34375ec92af7a1f734d10
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI/cadastro_fornecedor.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ct_FormFornecedor(object):
def setupUi(self, ct_FormFornecedor):
ct_FormFornecedor.setObjectName("ct_FormFornecedor")
ct_FormFornecedor.resize(653, 371)
self.fr_FormFornecedor = QtWidgets.QFrame(ct_FormFornecedor)
self.fr_FormFornecedor.setGeometry(QtCore.QRect(0, 0, 1000, 500))
self.fr_FormFornecedor.setStyleSheet("background: #FFF;\n"
"border: none")
self.fr_FormFornecedor.setObjectName("fr_FormFornecedor")
self.lb_FormFornecedor = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor.setGeometry(QtCore.QRect(20, 10, 880, 30))
self.lb_FormFornecedor.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"\n"
"border-bottom: 2px solid #A2A2A2\n"
"}")
self.lb_FormFornecedor.setObjectName("lb_FormFornecedor")
self.lb_FormFornecedor_2 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_2.setGeometry(QtCore.QRect(370, 60, 150, 20))
self.lb_FormFornecedor_2.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_2.setObjectName("lb_FormFornecedor_2")
self.tx_NomeFantasia = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_NomeFantasia.setGeometry(QtCore.QRect(370, 80, 271, 25))
self.tx_NomeFantasia.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_NomeFantasia.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase;\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_NomeFantasia.setObjectName("tx_NomeFantasia")
self.lb_FormFornecedor_3 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_3.setGeometry(QtCore.QRect(20, 60, 190, 20))
self.lb_FormFornecedor_3.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_3.setObjectName("lb_FormFornecedor_3")
self.lb_FormFornecedor_5 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_5.setGeometry(QtCore.QRect(20, 120, 196, 20))
self.lb_FormFornecedor_5.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_5.setObjectName("lb_FormFornecedor_5")
self.tx_Telefone = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Telefone.setGeometry(QtCore.QRect(20, 140, 196, 25))
self.tx_Telefone.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Telefone.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Telefone.setPlaceholderText("")
self.tx_Telefone.setObjectName("tx_Telefone")
self.lb_FormFornecedor_8 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_8.setGeometry(QtCore.QRect(20, 180, 630, 30))
self.lb_FormFornecedor_8.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: normal;\n"
"\n"
"border-bottom: 2px solid #A2A2A2;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_8.setObjectName("lb_FormFornecedor_8")
self.tx_Cep = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cep.setGeometry(QtCore.QRect(20, 240, 101, 25))
self.tx_Cep.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cep.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cep.setAlignment(QtCore.Qt.AlignCenter)
self.tx_Cep.setObjectName("tx_Cep")
self.lb_FormFornecedor_10 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_10.setGeometry(QtCore.QRect(20, 215, 50, 20))
self.lb_FormFornecedor_10.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_10.setObjectName("lb_FormFornecedor_10")
self.fr_BotoesFormFornecedor = QtWidgets.QFrame(self.fr_FormFornecedor)
self.fr_BotoesFormFornecedor.setGeometry(QtCore.QRect(-340, 340, 1000, 30))
self.fr_BotoesFormFornecedor.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_BotoesFormFornecedor.setObjectName("fr_BotoesFormFornecedor")
self.bt_Voltar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Voltar.setGeometry(QtCore.QRect(880, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Voltar.setFont(font)
self.bt_Voltar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Voltar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Voltar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Voltar.setStyleSheet("QPushButton {\n"
"background-color: #1E87F0;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Voltar.setIconSize(QtCore.QSize(75, 35))
self.bt_Voltar.setObjectName("bt_Voltar")
self.bt_Salvar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Salvar.setGeometry(QtCore.QRect(750, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Salvar.setFont(font)
self.bt_Salvar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Salvar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Salvar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Salvar.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Salvar.setIconSize(QtCore.QSize(75, 35))
self.bt_Salvar.setObjectName("bt_Salvar")
self.tx_cnpj = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_cnpj.setGeometry(QtCore.QRect(20, 80, 221, 25))
self.tx_cnpj.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_cnpj.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_cnpj.setPlaceholderText("")
self.tx_cnpj.setObjectName("tx_cnpj")
self.lb_FormFornecedor_23 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_23.setGeometry(QtCore.QRect(230, 120, 190, 20))
self.lb_FormFornecedor_23.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_23.setObjectName("lb_FormFornecedor_23")
self.tx_Email = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Email.setGeometry(QtCore.QRect(230, 140, 196, 25))
self.tx_Email.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Email.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Email.setPlaceholderText("")
self.tx_Email.setObjectName("tx_Email")
self.lb_FormFornecedor_11 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_11.setGeometry(QtCore.QRect(160, 215, 250, 20))
self.lb_FormFornecedor_11.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_11.setObjectName("lb_FormFornecedor_11")
self.tx_Endereco = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Endereco.setGeometry(QtCore.QRect(160, 240, 400, 25))
self.tx_Endereco.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Endereco.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Endereco.setInputMask("")
self.tx_Endereco.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Endereco.setPlaceholderText("")
self.tx_Endereco.setObjectName("tx_Endereco")
self.lb_FormFornecedor_12 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_12.setGeometry(QtCore.QRect(580, 215, 50, 20))
self.lb_FormFornecedor_12.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_12.setObjectName("lb_FormFornecedor_12")
self.tx_Numero = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Numero.setGeometry(QtCore.QRect(580, 240, 70, 25))
self.tx_Numero.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Numero.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Numero.setInputMask("")
self.tx_Numero.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Numero.setPlaceholderText("")
self.tx_Numero.setObjectName("tx_Numero")
self.tx_Bairro = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Bairro.setGeometry(QtCore.QRect(20, 295, 260, 25))
self.tx_Bairro.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Bairro.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Bairro.setInputMask("")
self.tx_Bairro.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Bairro.setPlaceholderText("")
self.tx_Bairro.setObjectName("tx_Bairro")
self.lb_FormFornecedor_13 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_13.setGeometry(QtCore.QRect(20, 270, 120, 20))
self.lb_FormFornecedor_13.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_13.setObjectName("lb_FormFornecedor_13")
self.tx_Cidade = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cidade.setGeometry(QtCore.QRect(300, 295, 260, 25))
self.tx_Cidade.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cidade.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cidade.setInputMask("")
self.tx_Cidade.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Cidade.setPlaceholderText("")
self.tx_Cidade.setObjectName("tx_Cidade")
self.lb_FormFornecedor_14 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_14.setGeometry(QtCore.QRect(300, 270, 120, 20))
self.lb_FormFornecedor_14.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_14.setObjectName("lb_FormFornecedor_14")
self.lb_FormFornecedor_15 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_15.setGeometry(QtCore.QRect(580, 270, 70, 20))
self.lb_FormFornecedor_15.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_15.setObjectName("lb_FormFornecedor_15")
self.tx_Estado = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Estado.setGeometry(QtCore.QRect(580, 295, 70, 25))
self.tx_Estado.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Estado.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Estado.setInputMask("")
self.tx_Estado.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Estado.setPlaceholderText("")
self.tx_Estado.setObjectName("tx_Estado")
self.bt_busca_cep = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cep.setGeometry(QtCore.QRect(130, 240, 21, 31))
self.bt_busca_cep.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("UI/../../Imagens/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_busca_cep.setIcon(icon)
self.bt_busca_cep.setObjectName("bt_busca_cep")
self.bt_busca_cnpj = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cnpj.setGeometry(QtCore.QRect(250, 80, 111, 31))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_busca_cnpj.setFont(font)
self.bt_busca_cnpj.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_busca_cnpj.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_busca_cnpj.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_busca_cnpj.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_busca_cnpj.setIconSize(QtCore.QSize(75, 35))
self.bt_busca_cnpj.setObjectName("bt_busca_cnpj")
self.retranslateUi(ct_FormFornecedor)
QtCore.QMetaObject.connectSlotsByName(ct_FormFornecedor)
ct_FormFornecedor.setTabOrder(self.tx_cnpj, self.tx_NomeFantasia)
ct_FormFornecedor.setTabOrder(self.tx_NomeFantasia, self.tx_Telefone)
ct_FormFornecedor.setTabOrder(self.tx_Telefone, self.tx_Email)
ct_FormFornecedor.setTabOrder(self.tx_Email, self.tx_Cep)
ct_FormFornecedor.setTabOrder(self.tx_Cep, self.bt_busca_cep)
ct_FormFornecedor.setTabOrder(self.bt_busca_cep, self.tx_Endereco)
ct_FormFornecedor.setTabOrder(self.tx_Endereco, self.tx_Numero)
ct_FormFornecedor.setTabOrder(self.tx_Numero, self.tx_Bairro)
ct_FormFornecedor.setTabOrder(self.tx_Bairro, self.tx_Cidade)
ct_FormFornecedor.setTabOrder(self.tx_Cidade, self.tx_Estado)
def retranslateUi(self, ct_FormFornecedor):
_translate = QtCore.QCoreApplication.translate
ct_FormFornecedor.setWindowTitle(_translate("ct_FormFornecedor", "Cadastro Fornecedores"))
self.lb_FormFornecedor.setText(_translate("ct_FormFornecedor", "FICHA CADASTRAL FORNECEDOR"))
self.lb_FormFornecedor_2.setText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.tx_NomeFantasia.setPlaceholderText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.lb_FormFornecedor_3.setText(_translate("ct_FormFornecedor", "CNPJ"))
self.lb_FormFornecedor_5.setText(_translate("ct_FormFornecedor", "TELEFONE "))
self.tx_Telefone.setInputMask(_translate("ct_FormFornecedor", "(00) 0000-00000"))
self.tx_Telefone.setText(_translate("ct_FormFornecedor", "() -"))
self.lb_FormFornecedor_8.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.tx_Cep.setInputMask(_translate("ct_FormFornecedor", "99999-999"))
self.tx_Cep.setPlaceholderText(_translate("ct_FormFornecedor", "123456789"))
self.lb_FormFornecedor_10.setText(_translate("ct_FormFornecedor", "CEP"))
self.bt_Voltar.setText(_translate("ct_FormFornecedor", "VOLTAR"))
self.bt_Salvar.setText(_translate("ct_FormFornecedor", "SALVAR"))
self.tx_cnpj.setInputMask(_translate("ct_FormFornecedor", "##.###.###/####-##"))
self.tx_cnpj.setText(_translate("ct_FormFornecedor", "../-----"))
self.lb_FormFornecedor_23.setText(_translate("ct_FormFornecedor", "Email"))
self.lb_FormFornecedor_11.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.lb_FormFornecedor_12.setText(_translate("ct_FormFornecedor", "Nº"))
self.lb_FormFornecedor_13.setText(_translate("ct_FormFornecedor", "BAIRRO"))
self.lb_FormFornecedor_14.setText(_translate("ct_FormFornecedor", "CIDADE"))
self.lb_FormFornecedor_15.setText(_translate("ct_FormFornecedor", "ESTADO"))
self.bt_busca_cep.setAccessibleName(_translate("ct_FormFornecedor", "BUSCA CEP"))
self.bt_busca_cnpj.setText(_translate("ct_FormFornecedor", "BUSCAR CNPJ"))
| 43.875622
| 105
| 0.707677
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ct_FormFornecedor(object):
def setupUi(self, ct_FormFornecedor):
ct_FormFornecedor.setObjectName("ct_FormFornecedor")
ct_FormFornecedor.resize(653, 371)
self.fr_FormFornecedor = QtWidgets.QFrame(ct_FormFornecedor)
self.fr_FormFornecedor.setGeometry(QtCore.QRect(0, 0, 1000, 500))
self.fr_FormFornecedor.setStyleSheet("background: #FFF;\n"
"border: none")
self.fr_FormFornecedor.setObjectName("fr_FormFornecedor")
self.lb_FormFornecedor = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor.setGeometry(QtCore.QRect(20, 10, 880, 30))
self.lb_FormFornecedor.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"\n"
"border-bottom: 2px solid #A2A2A2\n"
"}")
self.lb_FormFornecedor.setObjectName("lb_FormFornecedor")
self.lb_FormFornecedor_2 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_2.setGeometry(QtCore.QRect(370, 60, 150, 20))
self.lb_FormFornecedor_2.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_2.setObjectName("lb_FormFornecedor_2")
self.tx_NomeFantasia = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_NomeFantasia.setGeometry(QtCore.QRect(370, 80, 271, 25))
self.tx_NomeFantasia.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_NomeFantasia.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase;\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_NomeFantasia.setObjectName("tx_NomeFantasia")
self.lb_FormFornecedor_3 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_3.setGeometry(QtCore.QRect(20, 60, 190, 20))
self.lb_FormFornecedor_3.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_3.setObjectName("lb_FormFornecedor_3")
self.lb_FormFornecedor_5 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_5.setGeometry(QtCore.QRect(20, 120, 196, 20))
self.lb_FormFornecedor_5.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_5.setObjectName("lb_FormFornecedor_5")
self.tx_Telefone = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Telefone.setGeometry(QtCore.QRect(20, 140, 196, 25))
self.tx_Telefone.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Telefone.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Telefone.setPlaceholderText("")
self.tx_Telefone.setObjectName("tx_Telefone")
self.lb_FormFornecedor_8 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_8.setGeometry(QtCore.QRect(20, 180, 630, 30))
self.lb_FormFornecedor_8.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: normal;\n"
"\n"
"border-bottom: 2px solid #A2A2A2;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_8.setObjectName("lb_FormFornecedor_8")
self.tx_Cep = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cep.setGeometry(QtCore.QRect(20, 240, 101, 25))
self.tx_Cep.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cep.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cep.setAlignment(QtCore.Qt.AlignCenter)
self.tx_Cep.setObjectName("tx_Cep")
self.lb_FormFornecedor_10 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_10.setGeometry(QtCore.QRect(20, 215, 50, 20))
self.lb_FormFornecedor_10.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_10.setObjectName("lb_FormFornecedor_10")
self.fr_BotoesFormFornecedor = QtWidgets.QFrame(self.fr_FormFornecedor)
self.fr_BotoesFormFornecedor.setGeometry(QtCore.QRect(-340, 340, 1000, 30))
self.fr_BotoesFormFornecedor.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_BotoesFormFornecedor.setObjectName("fr_BotoesFormFornecedor")
self.bt_Voltar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Voltar.setGeometry(QtCore.QRect(880, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Voltar.setFont(font)
self.bt_Voltar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Voltar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Voltar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Voltar.setStyleSheet("QPushButton {\n"
"background-color: #1E87F0;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Voltar.setIconSize(QtCore.QSize(75, 35))
self.bt_Voltar.setObjectName("bt_Voltar")
self.bt_Salvar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Salvar.setGeometry(QtCore.QRect(750, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Salvar.setFont(font)
self.bt_Salvar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Salvar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Salvar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Salvar.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Salvar.setIconSize(QtCore.QSize(75, 35))
self.bt_Salvar.setObjectName("bt_Salvar")
self.tx_cnpj = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_cnpj.setGeometry(QtCore.QRect(20, 80, 221, 25))
self.tx_cnpj.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_cnpj.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_cnpj.setPlaceholderText("")
self.tx_cnpj.setObjectName("tx_cnpj")
self.lb_FormFornecedor_23 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_23.setGeometry(QtCore.QRect(230, 120, 190, 20))
self.lb_FormFornecedor_23.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_23.setObjectName("lb_FormFornecedor_23")
self.tx_Email = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Email.setGeometry(QtCore.QRect(230, 140, 196, 25))
self.tx_Email.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Email.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Email.setPlaceholderText("")
self.tx_Email.setObjectName("tx_Email")
self.lb_FormFornecedor_11 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_11.setGeometry(QtCore.QRect(160, 215, 250, 20))
self.lb_FormFornecedor_11.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_11.setObjectName("lb_FormFornecedor_11")
self.tx_Endereco = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Endereco.setGeometry(QtCore.QRect(160, 240, 400, 25))
self.tx_Endereco.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Endereco.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Endereco.setInputMask("")
self.tx_Endereco.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Endereco.setPlaceholderText("")
self.tx_Endereco.setObjectName("tx_Endereco")
self.lb_FormFornecedor_12 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_12.setGeometry(QtCore.QRect(580, 215, 50, 20))
self.lb_FormFornecedor_12.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_12.setObjectName("lb_FormFornecedor_12")
self.tx_Numero = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Numero.setGeometry(QtCore.QRect(580, 240, 70, 25))
self.tx_Numero.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Numero.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Numero.setInputMask("")
self.tx_Numero.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Numero.setPlaceholderText("")
self.tx_Numero.setObjectName("tx_Numero")
self.tx_Bairro = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Bairro.setGeometry(QtCore.QRect(20, 295, 260, 25))
self.tx_Bairro.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Bairro.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Bairro.setInputMask("")
self.tx_Bairro.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Bairro.setPlaceholderText("")
self.tx_Bairro.setObjectName("tx_Bairro")
self.lb_FormFornecedor_13 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_13.setGeometry(QtCore.QRect(20, 270, 120, 20))
self.lb_FormFornecedor_13.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_13.setObjectName("lb_FormFornecedor_13")
self.tx_Cidade = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cidade.setGeometry(QtCore.QRect(300, 295, 260, 25))
self.tx_Cidade.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cidade.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cidade.setInputMask("")
self.tx_Cidade.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Cidade.setPlaceholderText("")
self.tx_Cidade.setObjectName("tx_Cidade")
self.lb_FormFornecedor_14 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_14.setGeometry(QtCore.QRect(300, 270, 120, 20))
self.lb_FormFornecedor_14.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_14.setObjectName("lb_FormFornecedor_14")
self.lb_FormFornecedor_15 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_15.setGeometry(QtCore.QRect(580, 270, 70, 20))
self.lb_FormFornecedor_15.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_15.setObjectName("lb_FormFornecedor_15")
self.tx_Estado = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Estado.setGeometry(QtCore.QRect(580, 295, 70, 25))
self.tx_Estado.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Estado.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Estado.setInputMask("")
self.tx_Estado.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Estado.setPlaceholderText("")
self.tx_Estado.setObjectName("tx_Estado")
self.bt_busca_cep = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cep.setGeometry(QtCore.QRect(130, 240, 21, 31))
self.bt_busca_cep.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("UI/../../Imagens/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_busca_cep.setIcon(icon)
self.bt_busca_cep.setObjectName("bt_busca_cep")
self.bt_busca_cnpj = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cnpj.setGeometry(QtCore.QRect(250, 80, 111, 31))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_busca_cnpj.setFont(font)
self.bt_busca_cnpj.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_busca_cnpj.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_busca_cnpj.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_busca_cnpj.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_busca_cnpj.setIconSize(QtCore.QSize(75, 35))
self.bt_busca_cnpj.setObjectName("bt_busca_cnpj")
self.retranslateUi(ct_FormFornecedor)
QtCore.QMetaObject.connectSlotsByName(ct_FormFornecedor)
ct_FormFornecedor.setTabOrder(self.tx_cnpj, self.tx_NomeFantasia)
ct_FormFornecedor.setTabOrder(self.tx_NomeFantasia, self.tx_Telefone)
ct_FormFornecedor.setTabOrder(self.tx_Telefone, self.tx_Email)
ct_FormFornecedor.setTabOrder(self.tx_Email, self.tx_Cep)
ct_FormFornecedor.setTabOrder(self.tx_Cep, self.bt_busca_cep)
ct_FormFornecedor.setTabOrder(self.bt_busca_cep, self.tx_Endereco)
ct_FormFornecedor.setTabOrder(self.tx_Endereco, self.tx_Numero)
ct_FormFornecedor.setTabOrder(self.tx_Numero, self.tx_Bairro)
ct_FormFornecedor.setTabOrder(self.tx_Bairro, self.tx_Cidade)
ct_FormFornecedor.setTabOrder(self.tx_Cidade, self.tx_Estado)
def retranslateUi(self, ct_FormFornecedor):
_translate = QtCore.QCoreApplication.translate
ct_FormFornecedor.setWindowTitle(_translate("ct_FormFornecedor", "Cadastro Fornecedores"))
self.lb_FormFornecedor.setText(_translate("ct_FormFornecedor", "FICHA CADASTRAL FORNECEDOR"))
self.lb_FormFornecedor_2.setText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.tx_NomeFantasia.setPlaceholderText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.lb_FormFornecedor_3.setText(_translate("ct_FormFornecedor", "CNPJ"))
self.lb_FormFornecedor_5.setText(_translate("ct_FormFornecedor", "TELEFONE "))
self.tx_Telefone.setInputMask(_translate("ct_FormFornecedor", "(00) 0000-00000"))
self.tx_Telefone.setText(_translate("ct_FormFornecedor", "() -"))
self.lb_FormFornecedor_8.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.tx_Cep.setInputMask(_translate("ct_FormFornecedor", "99999-999"))
self.tx_Cep.setPlaceholderText(_translate("ct_FormFornecedor", "123456789"))
self.lb_FormFornecedor_10.setText(_translate("ct_FormFornecedor", "CEP"))
self.bt_Voltar.setText(_translate("ct_FormFornecedor", "VOLTAR"))
self.bt_Salvar.setText(_translate("ct_FormFornecedor", "SALVAR"))
self.tx_cnpj.setInputMask(_translate("ct_FormFornecedor", "##.###.###/####-##"))
self.tx_cnpj.setText(_translate("ct_FormFornecedor", "../-----"))
self.lb_FormFornecedor_23.setText(_translate("ct_FormFornecedor", "Email"))
self.lb_FormFornecedor_11.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.lb_FormFornecedor_12.setText(_translate("ct_FormFornecedor", "Nº"))
self.lb_FormFornecedor_13.setText(_translate("ct_FormFornecedor", "BAIRRO"))
self.lb_FormFornecedor_14.setText(_translate("ct_FormFornecedor", "CIDADE"))
self.lb_FormFornecedor_15.setText(_translate("ct_FormFornecedor", "ESTADO"))
self.bt_busca_cep.setAccessibleName(_translate("ct_FormFornecedor", "BUSCA CEP"))
self.bt_busca_cnpj.setText(_translate("ct_FormFornecedor", "BUSCAR CNPJ"))
| true
| true
|
f7088ab28b5d544214f4b2534e46dd9c09bd6fea
| 3,830
|
py
|
Python
|
code/cmoa-img-desc-parallel/scraper.py
|
CreativeInquiry/TeenieHarrisProject
|
c7c2e1730ade29ed086a4bd21d5d21315fcde5e5
|
[
"MIT"
] | null | null | null |
code/cmoa-img-desc-parallel/scraper.py
|
CreativeInquiry/TeenieHarrisProject
|
c7c2e1730ade29ed086a4bd21d5d21315fcde5e5
|
[
"MIT"
] | 9
|
2019-03-27T18:42:41.000Z
|
2019-03-31T17:04:24.000Z
|
code/cmoa-img-desc-parallel/scraper.py
|
CreativeInquiry/TeenieHarrisProject
|
c7c2e1730ade29ed086a4bd21d5d21315fcde5e5
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import os
import sys
from glob import glob
import re
import json
import random
import shutil
import re
import codecs
dones = [x for x in open("done.txt",'r').read().split("\n") if len(x)]
correct = json.loads(open("bad-corrected.json",'r').read())
# print(correct)
box = sys.argv[1]
print("DOING BOX: ",box)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
filenames = [x.split(".")[0].split("/") for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
filenames = [x[1] for x in filenames if x[0] == box]
print("NUM STUFF IN BOX: ",len(filenames))
#filenames = [x.split("/")[1].split(".")[0] for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
return driver
def parse_info_html(html):
url = html.split('href="')[1].split('">')[0]
creator = html.split('creator-link\">')[1].split('</a>')[0]
date = html.split('Date:</dt><dd class="search-result__value">')[1].split("</dd>")[0]
desc = html.split('">')[2].split('</a>')[0]
return url,desc,creator,date
def parse_accession_number(html):
return html.split('Accession number:')[1].split('object__attributes-value">')[1].split('</dd>')[0]
driver = init_driver();
time.sleep(3);
for idx,fname in enumerate(filenames):
if fname in dones:
print(fname,"is done, skip")
continue
print("now processing ",fname)
entry = ("no description","no date","no accession number","no object id")
try:
driver.get("https://collection.cmoa.org/?q="+fname)
search_results = []
trials = 0
while len(search_results) == 0:
time.sleep(3)
if (trials > 5):
print("give up")
break
print("trial ",trials)
search_results = driver.find_elements_by_class_name("search-result__info")
trials += 1
cands = []
for x in search_results:
html = x.get_attribute('innerHTML')
iurl,desc,creator,date = parse_info_html(html)
print(iurl,desc,creator,date)
if (fname in correct):
if correct[fname].split("/")[-1] != iurl.split("/")[-1]:
print("SKIPPING BECAUSE OF MANUAL LABEL", fname,iurl)
continue
if True or (u"Teenie" in creator):
driver.get("https://collection.cmoa.org"+iurl);
time.sleep(2)
obj = driver.find_elements_by_class_name("object")[1].get_attribute('innerHTML')
# print(obj)
acc = parse_accession_number(obj)
print(acc)
cands.append((desc,date,acc,iurl.split("/")[-1]))
if (len(cands) > 1):
entry = cands[0]
print("WARNING!!!!!! MULIPLE POSSIBLE RESULTS FOUND!!! MANUAL CHECK!!!", fname)
elif (len(cands) == 0):
print("WARNING!!!!!! NO RELAVENT RESULT FOUND!!! MANUAL CHECK!!!", fname)
else:
entry = cands[0]
print("ENTRY:",fname,entry)
except:
print("SHIT!!!! DONT KNOW WHAT WENT WRONG",fname)
print(sys.exc_info())
codecs.open("out/"+box+".txt",'a+',encoding='utf8').write(fname+"\t"+entry[0]+"\t"+entry[1]+"\t"+entry[2]+"\t"+entry[3]+"\n")
| 33.893805
| 129
| 0.6047
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
import os
import sys
from glob import glob
import re
import json
import random
import shutil
import re
import codecs
dones = [x for x in open("done.txt",'r').read().split("\n") if len(x)]
correct = json.loads(open("bad-corrected.json",'r').read())
box = sys.argv[1]
print("DOING BOX: ",box)
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("safebrowsing-disable-extension-blacklist")
chrome_options.add_argument("--safebrowsing-disable-download-protection")
chrome_options.add_experimental_option("prefs", {'safebrowsing.enabled': 'false'})
filenames = [x.split(".")[0].split("/") for x in str(open("canonical_filename_order.txt",'r').read()).split("\n") if len(x)]
filenames = [x[1] for x in filenames if x[0] == box]
print("NUM STUFF IN BOX: ",len(filenames))
def init_driver(path=os.path.join(os.getcwd(),"chromedriver")):
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=path)
return driver
def parse_info_html(html):
url = html.split('href="')[1].split('">')[0]
creator = html.split('creator-link\">')[1].split('</a>')[0]
date = html.split('Date:</dt><dd class="search-result__value">')[1].split("</dd>")[0]
desc = html.split('">')[2].split('</a>')[0]
return url,desc,creator,date
def parse_accession_number(html):
return html.split('Accession number:')[1].split('object__attributes-value">')[1].split('</dd>')[0]
driver = init_driver();
time.sleep(3);
for idx,fname in enumerate(filenames):
if fname in dones:
print(fname,"is done, skip")
continue
print("now processing ",fname)
entry = ("no description","no date","no accession number","no object id")
try:
driver.get("https://collection.cmoa.org/?q="+fname)
search_results = []
trials = 0
while len(search_results) == 0:
time.sleep(3)
if (trials > 5):
print("give up")
break
print("trial ",trials)
search_results = driver.find_elements_by_class_name("search-result__info")
trials += 1
cands = []
for x in search_results:
html = x.get_attribute('innerHTML')
iurl,desc,creator,date = parse_info_html(html)
print(iurl,desc,creator,date)
if (fname in correct):
if correct[fname].split("/")[-1] != iurl.split("/")[-1]:
print("SKIPPING BECAUSE OF MANUAL LABEL", fname,iurl)
continue
if True or (u"Teenie" in creator):
driver.get("https://collection.cmoa.org"+iurl);
time.sleep(2)
obj = driver.find_elements_by_class_name("object")[1].get_attribute('innerHTML')
# print(obj)
acc = parse_accession_number(obj)
print(acc)
cands.append((desc,date,acc,iurl.split("/")[-1]))
if (len(cands) > 1):
entry = cands[0]
print("WARNING!!!!!! MULIPLE POSSIBLE RESULTS FOUND!!! MANUAL CHECK!!!", fname)
elif (len(cands) == 0):
print("WARNING!!!!!! NO RELAVENT RESULT FOUND!!! MANUAL CHECK!!!", fname)
else:
entry = cands[0]
print("ENTRY:",fname,entry)
except:
print("SHIT!!!! DONT KNOW WHAT WENT WRONG",fname)
print(sys.exc_info())
codecs.open("out/"+box+".txt",'a+',encoding='utf8').write(fname+"\t"+entry[0]+"\t"+entry[1]+"\t"+entry[2]+"\t"+entry[3]+"\n")
| true
| true
|
f7088b733943ec4deaa0fa6c680bb6105d440e68
| 2,151
|
py
|
Python
|
src/automotive/core/can/tools/reader/usb_can_reader.py
|
philosophy912/automotive
|
de918611652b789a83545f346c1569c2c2c955a6
|
[
"Apache-2.0"
] | null | null | null |
src/automotive/core/can/tools/reader/usb_can_reader.py
|
philosophy912/automotive
|
de918611652b789a83545f346c1569c2c2c955a6
|
[
"Apache-2.0"
] | null | null | null |
src/automotive/core/can/tools/reader/usb_can_reader.py
|
philosophy912/automotive
|
de918611652b789a83545f346c1569c2c2c955a6
|
[
"Apache-2.0"
] | 1
|
2022-02-28T07:23:28.000Z
|
2022-02-28T07:23:28.000Z
|
# -*- coding:utf-8 -*-
# --------------------------------------------------------
# Copyright (C), 2016-2020, lizhe, All rights reserved
# --------------------------------------------------------
# @Name: usb_can_reader.py
# @Author: lizhe
# @Created: 2021/5/1 - 23:45
# --------------------------------------------------------
import re
from typing import List, Tuple
from automotive.core.can.message import Message
from .trace_reader import TraceReader
from automotive.logger.logger import logger
class UsbCanReader(TraceReader):
def read(self, file: str) -> List[Tuple[float, Message]]:
contents = self.__filter_content(file)
logger.debug(f"trace size = {len(contents)}")
return self.__convert(contents)
@staticmethod
def __filter_content(file: str):
with open(file, "r") as f:
lines = f.readlines()
lines.pop(0)
return lines
def __convert(self, contents: list) -> List[Tuple[float, Message]]:
"""
解析content,并生成message对象
00345,="09:35:34.992",0x376549,ch1,接收,0x0406,数据帧,标准帧,0x08,x| 06 01 00 00 00 00 00 00
:param contents:
:return: List<Message>
"""
trace = []
for content in contents:
time = re.search(r"\d{2}:\d{2}:\d{2}\.\d{3}", content).group(0)
data = re.search(r"(\s\w{2}){8}", content).group(0).strip().split(" ")
msg_id = re.search(r"0x\w{4},", content).group(0)[:-1]
logger.debug(f"{time}, {data}, {msg_id}")
message = Message()
message.msg_id = int(msg_id, 16)
message.data = list(map(lambda x: int(x, 16), data))
trace.append((self.__get_time(time), message))
return trace
@staticmethod
def __get_time(hex_time):
splits = hex_time.split(".")
date_time = splits[0].split(":")
hour = date_time[0]
minutes = date_time[1]
seconds = date_time[2]
millisecond = splits[1]
current_time = (int(hour) * 60 * 60 + int(minutes) * 60 + int(seconds)) * 1000 + int(millisecond)
return current_time / 1000
| 35.85
| 105
| 0.535565
|
import re
from typing import List, Tuple
from automotive.core.can.message import Message
from .trace_reader import TraceReader
from automotive.logger.logger import logger
class UsbCanReader(TraceReader):
def read(self, file: str) -> List[Tuple[float, Message]]:
contents = self.__filter_content(file)
logger.debug(f"trace size = {len(contents)}")
return self.__convert(contents)
@staticmethod
def __filter_content(file: str):
with open(file, "r") as f:
lines = f.readlines()
lines.pop(0)
return lines
def __convert(self, contents: list) -> List[Tuple[float, Message]]:
trace = []
for content in contents:
time = re.search(r"\d{2}:\d{2}:\d{2}\.\d{3}", content).group(0)
data = re.search(r"(\s\w{2}){8}", content).group(0).strip().split(" ")
msg_id = re.search(r"0x\w{4},", content).group(0)[:-1]
logger.debug(f"{time}, {data}, {msg_id}")
message = Message()
message.msg_id = int(msg_id, 16)
message.data = list(map(lambda x: int(x, 16), data))
trace.append((self.__get_time(time), message))
return trace
@staticmethod
def __get_time(hex_time):
splits = hex_time.split(".")
date_time = splits[0].split(":")
hour = date_time[0]
minutes = date_time[1]
seconds = date_time[2]
millisecond = splits[1]
current_time = (int(hour) * 60 * 60 + int(minutes) * 60 + int(seconds)) * 1000 + int(millisecond)
return current_time / 1000
| true
| true
|
f7088bd373aef47e9887dc1b0e5be23ea3cb543e
| 190
|
py
|
Python
|
premium/backend/src/baserow_premium/api/urls.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
premium/backend/src/baserow_premium/api/urls.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
premium/backend/src/baserow_premium/api/urls.py
|
cjh0613/baserow
|
62871f5bf53c9d25446976031aacb706c0abe584
|
[
"MIT"
] | null | null | null |
from django.urls import path, include
from .admin import urls as admin_urls
app_name = "baserow_premium.api"
urlpatterns = [
path("admin/", include(admin_urls, namespace="admin")),
]
| 19
| 59
| 0.731579
|
from django.urls import path, include
from .admin import urls as admin_urls
app_name = "baserow_premium.api"
urlpatterns = [
path("admin/", include(admin_urls, namespace="admin")),
]
| true
| true
|
f7088bdebaf4c3ef67a5162ba9bd81693a179b45
| 1,619
|
py
|
Python
|
octoprint_codemods/yield_from_generator.py
|
OctoPrint/codemods
|
6c6cd4bd689582f906571951b0eb7729c4923b51
|
[
"MIT"
] | 5
|
2020-10-06T12:02:23.000Z
|
2021-04-26T00:31:55.000Z
|
octoprint_codemods/yield_from_generator.py
|
OctoPrint/codemods
|
6c6cd4bd689582f906571951b0eb7729c4923b51
|
[
"MIT"
] | null | null | null |
octoprint_codemods/yield_from_generator.py
|
OctoPrint/codemods
|
6c6cd4bd689582f906571951b0eb7729c4923b51
|
[
"MIT"
] | 1
|
2020-10-10T17:18:39.000Z
|
2020-10-10T17:18:39.000Z
|
from typing import Union, cast
import libcst as cst
import libcst.matchers as m
from .util import CodeMod, runner
"""
libcst based transformer to convert 'for x in generator: yield x' to 'yield from generator'.
"""
__author__ = "Gina Häußge <gina@octoprint.org>"
__license__ = "MIT"
class YieldFromGenerator(CodeMod):
DESCRIPTION: str = "Converts 'for x in generator: yield x' to 'yield from generator'."
def leave_For(
self, original_node: cst.For, updated_node: cst.For
) -> Union[cst.For, cst.SimpleStatementLine]:
if m.matches(
updated_node,
m.For(
target=m.Name(),
body=m.IndentedBlock(
body=[m.SimpleStatementLine(body=[m.Expr(value=m.Yield(m.Name()))])]
),
),
):
target = updated_node.target.value
block = cast(cst.IndentedBlock, updated_node.body)
simple_stmt = cast(cst.SimpleStatementLine, block.body[0])
expr_stmt = cast(cst.Expr, simple_stmt.body[0])
yield_stmt = cast(cst.Yield, expr_stmt.value)
yielded = cast(cst.Name, yield_stmt.value).value
if target == yielded:
self._report_node(original_node)
self.count += 1
updated_node = cst.SimpleStatementLine(
body=[
cst.Expr(value=cst.Yield(value=cst.From(item=updated_node.iter)))
]
)
return updated_node
def main():
runner(YieldFromGenerator)
if __name__ == "__main__":
main()
| 29.436364
| 92
| 0.579988
|
from typing import Union, cast
import libcst as cst
import libcst.matchers as m
from .util import CodeMod, runner
__author__ = "Gina Häußge <gina@octoprint.org>"
__license__ = "MIT"
class YieldFromGenerator(CodeMod):
DESCRIPTION: str = "Converts 'for x in generator: yield x' to 'yield from generator'."
def leave_For(
self, original_node: cst.For, updated_node: cst.For
) -> Union[cst.For, cst.SimpleStatementLine]:
if m.matches(
updated_node,
m.For(
target=m.Name(),
body=m.IndentedBlock(
body=[m.SimpleStatementLine(body=[m.Expr(value=m.Yield(m.Name()))])]
),
),
):
target = updated_node.target.value
block = cast(cst.IndentedBlock, updated_node.body)
simple_stmt = cast(cst.SimpleStatementLine, block.body[0])
expr_stmt = cast(cst.Expr, simple_stmt.body[0])
yield_stmt = cast(cst.Yield, expr_stmt.value)
yielded = cast(cst.Name, yield_stmt.value).value
if target == yielded:
self._report_node(original_node)
self.count += 1
updated_node = cst.SimpleStatementLine(
body=[
cst.Expr(value=cst.Yield(value=cst.From(item=updated_node.iter)))
]
)
return updated_node
def main():
runner(YieldFromGenerator)
if __name__ == "__main__":
main()
| true
| true
|
f7088c087cad60100e39ebb04ca112f5066abe65
| 11,764
|
py
|
Python
|
google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py
|
Nawod/python-aiplatform
|
ffc70d148868489161797cc25a63298dda322d5f
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py
|
Nawod/python-aiplatform
|
ffc70d148868489161797cc25a63298dda322d5f
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform_v1beta1/services/pipeline_service/transports/base.py
|
Nawod/python-aiplatform
|
ffc70d148868489161797cc25a63298dda322d5f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1beta1.types import pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_service
from google.cloud.aiplatform_v1beta1.types import training_pipeline
from google.cloud.aiplatform_v1beta1.types import (
training_pipeline as gca_training_pipeline,
)
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class PipelineServiceTransport(abc.ABC):
"""Abstract transport class for PipelineService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=5.0,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job, default_timeout=None, client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job, default_timeout=None, client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs, default_timeout=None, client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job, default_timeout=None, client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job, default_timeout=None, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
| 37.11041
| 103
| 0.663125
|
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.oauth2 import service_account
from google.cloud.aiplatform_v1beta1.types import pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1beta1.types import pipeline_service
from google.cloud.aiplatform_v1beta1.types import training_pipeline
from google.cloud.aiplatform_v1beta1.types import (
training_pipeline as gca_training_pipeline,
)
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try:
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound:
_GOOGLE_AUTH_VERSION = None
class PipelineServiceTransport(abc.ABC):
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
self._scopes = scopes
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
self._credentials = credentials
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=5.0,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=5.0,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job, default_timeout=None, client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job, default_timeout=None, client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs, default_timeout=None, client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job, default_timeout=None, client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job, default_timeout=None, client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
| true
| true
|
f7088c22019381ab4d1c04624a2767d334cb277b
| 1,111
|
py
|
Python
|
PythonBaseDemo/ModulesAndPackages/9.1/import_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/ModulesAndPackages/9.1/import_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
PythonBaseDemo/ModulesAndPackages/9.1/import_test.py
|
CypHelp/TestNewWorldDemo
|
ee6f73df05756f191c1c56250fa290461fdd1b9a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
# 导入sys整个模块
import sys
# 使用sys模块名作为前缀来访问模块中的成员
print(sys.argv[0])
| 58.473684
| 73
| 0.193519
| true
| true
|
|
f7088c63313f91b709e1ed0c583841d1f3be3c28
| 92,660
|
py
|
Python
|
tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
|
Halimaz/tensorflow-1
|
3437fba39d5bca77fd7627aad15ba76fb75f5731
|
[
"Apache-2.0"
] | 1
|
2018-08-15T10:03:38.000Z
|
2018-08-15T10:03:38.000Z
|
tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
|
Halimaz/tensorflow-1
|
3437fba39d5bca77fd7627aad15ba76fb75f5731
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
|
Halimaz/tensorflow-1
|
3437fba39d5bca77fd7627aad15ba76fb75f5731
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import rnn as rnn_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(rnn_lib.RNNCell):
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM "
"should be a tuple of ints.")
self._dims = dims
self._output_size = tensor_shape.TensorShape(self._dims)
self._state_size = (tensor_shape.TensorShape(self._dims),
tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(rnn_lib.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return array_ops.identity(state)
@property
def batch_size(self):
return self._batch_size
@property
def state_size(self):
return self._state_size
class TestStateSaverWithCounters(TestStateSaver):
"""Class wrapper around TestStateSaver.
A dummy class used for testing of static_state_saving_rnn. It helps test if
save_state and state functions got called same number of time when we
evaluate output of rnn cell and state or either of them separately. It
inherits from the TestStateSaver and adds the counters for calls of functions.
"""
def __init__(self, batch_size, state_size):
super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)
self._num_state_calls = variables_lib.Variable(0)
self._num_save_state_calls = variables_lib.Variable(0)
def state(self, name):
with ops_lib.control_dependencies(
[state_ops.assign_add(self._num_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).state(name)
def save_state(self, name, state):
with ops_lib.control_dependencies([state_ops.assign_add(
self._num_save_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).save_state(name, state)
@property
def num_state_calls(self):
return self._num_state_calls
@property
def num_save_state_calls(self):
return self._num_save_state_calls
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(values[-1],
max_length * np.ones(
(batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
(name, dep), = full_dropout_cell._checkpoint_dependencies
self.assertIs(dep, cell)
self.assertEqual("cell", name)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("drop_scope"):
dropped_outputs, _ = rnn.static_rnn(
full_dropout_cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(
dropped_outputs, feed_dict={
inputs[0]: input_value
})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = array_ops.placeholder(dtypes.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
dynamic_state_value = sess.run(
[dynamic_state],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(dynamic_values[2],
np.vstack((np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(dynamic_state_value[0],
np.vstack((1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDType(self):
# Test case for GitHub issue 16228
# Not passing dtype in constructor results in default float32
lstm = rnn_cell.LSTMCell(10)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtypes.float32_ref)
# Explicitly pass dtype in constructor
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
lstm = rnn_cell.LSTMCell(10, dtype=dtype)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtype._as_ref)
def testNoProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testCellClipping(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
cell_clip=0.0,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
# if cell c is clipped to 0, tanh(c) = 0 => m==0
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def testNoProjNoShardingSimpleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(
batch_size, {
"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3
})
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
# Final output comes from _cell(3) which has state size num_units + 3
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={
inputs[0]: input_value
})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(last_states[i],
named_saved_states[flat_state_names[i]])
def testProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_tuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_notuple, state_notuple = rnn.static_rnn(
cell_notuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
# TODO(ebrevdo): For this test, we ensure values are identical and
# therefore the weights here are tied. In the future, we may consider
# making the state_is_tuple property mutable so we can avoid
# having to do this - especially if users ever need to reuse
# the parameters from different RNNCell instances. Right now,
# this seems an unrealistic use case except for testing.
cell_tuple._scope = cell_notuple._scope # pylint: disable=protected-access
outputs_tuple, state_tuple = rnn.static_rnn(
cell_tuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={
inputs[0]: input_value
})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={
inputs[0]: input_value
})
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def testProjSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testDoubleInput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(
cell,
inputs,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def testShardNoShardEquivalentOutput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
cell_noshard = rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = rnn.static_rnn(
cell_noshard, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("shard_scope"):
outputs_shard, state_shard = rnn.static_rnn(
cell_shard, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
"""Smoke test for using LSTM with doubles, dropout, dynamic calculation."""
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
sequence_length = array_ops.placeholder(dtypes.int64)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = rnn.static_rnn(
dropout_cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run(feed_dict={
sequence_length: [2, 3]
})
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
outputs, feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
state_value = sess.run(
[state], feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_d = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer_d,
state_is_tuple=False)
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("diff_scope"):
outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
# Same weights used by both RNNs so outputs should be the same.
self.assertAllEqual(o1, o2)
# Different weights used so outputs should be different.
self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
with ops_lib.name_scope("scope0"):
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with ops_lib.name_scope("scope1"):
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
cell = rnn_cell.GRUCell(30)
# Smoke test, this should not raise an error
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
if in_graph_mode:
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=True,
num_proj=num_proj + i,
initializer=initializer,
state_is_tuple=True)
# This creates a state tuple which has 4 sub-tuples of length 2 each.
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
if in_graph_mode:
input_value = np.random.randn(batch_size, input_size)
variables_lib.global_variables_initializer().run()
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
nest.flatten(state_static), feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
nest.flatten(state_dynamic), feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
state_static = nest.flatten(state_static)
state_dynamic = nest.flatten(state_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size).astype(
np.float32)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
in_graph_mode = not context.executing_eagerly()
# TODO(b/68017812): Eager ignores operation seeds, so we need to create a
# single cell and reuse it across the static and dynamic RNNs. Remove this
# special case once is fixed.
if not in_graph_mode:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
########### Step 1: Run static graph and generate readouts
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
inputs = array_ops.unstack(concat_inputs)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove special case once b/68017812 is fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_static, state_static = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
if in_graph_mode:
# Generate gradients and run sessions to obtain outputs
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
# Generate gradients of individual outputs w.r.t. inputs
static_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
# pylint: disable=bad-builtin
static_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [outputs_static[0], outputs_static[-1], state_static]
])
# Test forward pass
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
########## Step 2: Run dynamic graph and generate readouts
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
if in_graph_mode:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
else:
concat_inputs = constant_op.constant(input_values)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
# TODO(akshayka): Remove this special case once b/68017812 is
# fixed.
if in_graph_mode:
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
if in_graph_mode:
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
# Generate gradients of sum of outputs w.r.t. inputs
dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
# Generate gradients of several individual outputs w.r.t. inputs
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Generate gradients of individual variables w.r.t. inputs
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
# Test forward pass
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
# Test gradients to inputs and variables w.r.t. outputs & final state
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
######### Step 3: Comparisons
if not in_graph_mode:
values_static = outputs_static
values_dynamic = split_outputs_dynamic
state_value_static = state_static
state_value_dynamic = state_dynamic
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
if in_graph_mode:
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(
len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(
len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(
zip(static_individual_grad_values, dynamic_individual_grad_values)):
tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf_logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)
class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_shape, True))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# First sequence in batch is length=2
# Check that the time=0 forward output is equal to time=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the time=1 forward output is equal to time=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the time=0 forward output is equal to time=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the time=1 forward output is equal to time=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the time=2 forward output is equal to time=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict={
inputs[0]: input_value
})
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
# Both sequences in batch are length=8. Check that the time=i
# forward output is equal to time=8-1-i backward output
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
# Via the reasoning above, the forward and backward final state should be
# exactly the same
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_shape=False)
self._testBidirectionalRNN(use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
# Run with pre-specified sequence length of 2, 3
feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
# Since the forward and backward LSTM cells were initialized with the
# same parameters, the forward and backward output has to be the same,
# but reversed in time. The format is output[time][batch][depth], and
# due to depth concatenation (as num_units=3 for both RNNs):
# - forward output: out[][][depth] for 0 <= depth < 3
# - backward output: out[][][depth] for 4 <= depth < 6
#
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
# First sequence in batch is length=2
# Check that the t=0 forward output is equal to t=1 backward output
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
# Check that the t=1 forward output is equal to t=0 backward output
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
# Second sequence in batch is length=3
# Check that the t=0 forward output is equal to t=2 backward output
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
# Check that the t=1 forward output is equal to t=1 backward output
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
# Check that the t=2 forward output is equal to t=0 backward output
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
# Via the reasoning above, the forward and backward final state should
# be exactly the same
self.assertAllClose(s_fw, s_bw)
else: # not use_sequence_length
max_length = 8 # from createBidirectionalDynamicRNN
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
# Generate 2^5 option values
# from [True, True, True, True, True] to [False, False, False, False, False]
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(
use_shape=option[0],
use_state_tuple=option[1],
use_time_major=option[2],
use_sequence_length=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
# REMARKS: factory(scope) is a function accepting a scope
# as an argument, such scope can be None, a string
# or a VariableScope instance.
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
# check that all the variables names starts
# with the proper scope.
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops.stack(inputs)
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
# fwd and bwd activations are concatenated along the second dim.
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
inputs_using_dim[0]: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
inputs_using_dim[0]: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
inputs_using_dim[0]: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
# Create a cell for the whole test. This is fine because the cell has no
# variables.
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
single_input: input_value
})
outputs_static_v = sess.run(
outputs_static, feed_dict={
single_input: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
single_input_using_dim: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
single_input: input_value
})
state_static_v = sess.run(
state_static, feed_dict={
single_input: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
single_input_using_dim: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
single_input_using_dim: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class StateSaverRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _factory(self, scope, state_saver):
num_units = state_saver.state_size // 2
batch_size = state_saver.batch_size
input_size = 5
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))
]
out, state = rnn.static_state_saving_rnn(
cell,
inputs,
state_saver=state_saver,
state_name="save_lstm",
scope=scope)
return out, state, state_saver
def _testScope(self, prefix="prefix", use_outer_scope=True):
num_units = 3
batch_size = 2
state_saver = TestStateSaver(batch_size, 2 * num_units)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
self._factory(scope=scope, state_saver=state_saver)
else:
self._factory(scope=prefix, state_saver=state_saver)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
self._testScope(use_outer_scope=True)
self._testScope(use_outer_scope=False)
self._testScope(prefix=None, use_outer_scope=False)
def testStateSaverCallsSaveState(self):
"""Test that number of calls to state and save_state is equal.
Test if the order of actual evaluating or skipping evaluation of out,
state tensors, which are the output tensors from static_state_saving_rnn,
have influence on number of calls to save_state and state methods of
state_saver object (the number of calls should be same.)
"""
num_units = 3
batch_size = 2
state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)
out, state, state_saver = self._factory(scope=None, state_saver=state_saver)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
sess.run(variables_lib.local_variables_initializer())
_, _, num_state_calls, num_save_state_calls = sess.run([
out,
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
out,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDynamic(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
# Initialize
variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
return rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=ops_lib.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=ops_lib.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
# Verify emit shapes may be unknown by feeding a placeholder that
# determines an emit shape.
unknown_dim = array_ops.placeholder(dtype=dtypes.int32)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
array_ops.ones(
[batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output, feed_dict={unknown_dim: 1})
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(rnn_cell.RNNCell):
"""Class to ensure cell calculation happens on a specific device."""
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with ops_lib.device(self._device):
return self._cell(input_, state, scope=scope)
else:
return self._cell(input_, state, scope=scope)
class TensorArrayOnCorrectDeviceTest(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops_lib.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops_lib.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Writes happen at output of RNN cell
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
# Gather happens on final TensorArray
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
# Reads happen at input to RNN cell
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
# Scatters happen to get initial input into TensorArray
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# All TensorArray operations happen on CPU
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return # Test requires access to a GPU
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
# Everything happens on GPU
_assert_in("TensorArray", gpu_stats, cpu_stats)
if __name__ == "__main__":
test.main()
| 37.897751
| 85
| 0.657555
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import xrange
from tensorflow.contrib import rnn as rnn_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import nest
class Plus1RNNCell(rnn_lib.RNNCell):
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(rnn_lib.RNNCell):
def __init__(self, dims):
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM "
"should be a tuple of ints.")
self._dims = dims
self._output_size = tensor_shape.TensorShape(self._dims)
self._state_size = (tensor_shape.TensorShape(self._dims),
tensor_shape.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(rnn_lib.RNNCell):
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return array_ops.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return array_ops.identity(state)
@property
def batch_size(self):
return self._batch_size
@property
def state_size(self):
return self._state_size
class TestStateSaverWithCounters(TestStateSaver):
def __init__(self, batch_size, state_size):
super(TestStateSaverWithCounters, self).__init__(batch_size, state_size)
self._num_state_calls = variables_lib.Variable(0)
self._num_save_state_calls = variables_lib.Variable(0)
def state(self, name):
with ops_lib.control_dependencies(
[state_ops.assign_add(self._num_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).state(name)
def save_state(self, name, state):
with ops_lib.control_dependencies([state_ops.assign_add(
self._num_save_state_calls, 1)]):
return super(TestStateSaverWithCounters, self).save_state(name, state)
@property
def num_state_calls(self):
return self._num_state_calls
@property
def num_save_state_calls(self):
return self._num_save_state_calls
class RNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [array_ops.placeholder(dtypes.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
rnn.static_rnn(cell, inputs, dtype=dtypes.float32, sequence_length=4)
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
self.assertAllClose(values[-1],
max_length * np.ones(
(batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
(name, dep), = full_dropout_cell._checkpoint_dependencies
self.assertIs(dep, cell)
self.assertEqual("cell", name)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("drop_scope"):
dropped_outputs, _ = rnn.static_rnn(
full_dropout_cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state], feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(
dropped_outputs, feed_dict={
inputs[0]: input_value
})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]:
self.assertAllClose(d_v, np.ones_like(input_value))
def testDynamicCalculation(self):
cell = Plus1RNNCell()
sequence_length = array_ops.placeholder(dtypes.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=True) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(
dynamic_outputs,
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
dynamic_state_value = sess.run(
[dynamic_state],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
self.assertAllClose(dynamic_values[2],
np.vstack((np.zeros((input_size)),
1.0 + input_value[1, :])))
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
self.assertAllEqual(dynamic_state_value[0],
np.vstack((1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testScope(self):
def factory(scope):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
return rnn.static_rnn(cell, inputs, dtype=dtypes.float32, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class LSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDType(self):
lstm = rnn_cell.LSTMCell(10)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtypes.float32_ref)
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
lstm = rnn_cell.LSTMCell(10, dtype=dtype)
input_tensor = array_ops.ones([10, 50])
lstm.build(input_tensor.get_shape())
self.assertEqual(lstm._bias.dtype, dtype._as_ref)
def testNoProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testCellClipping(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
cell_clip=0.0,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
for value in values:
self.assertAllEqual(value, np.zeros((batch_size, num_units)))
def testNoProjNoShardingSimpleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, 2 * num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name="save_lstm")
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
(last_state_value, saved_state_value) = sess.run(
[state, state_saver.saved_state["save_lstm"]],
feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(last_state_value, saved_state_value)
def testNoProjNoShardingTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(batch_size, num_units)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=("c", "m"))
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_and_saved_states = sess.run(
state + (state_saver.saved_state["c"], state_saver.saved_state["m"]),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(4, len(last_and_saved_states))
self.assertAllEqual(last_and_saved_states[:2], last_and_saved_states[2:])
def testNoProjNoShardingNestedTupleStateSaver(self):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
state_saver = TestStateSaver(
batch_size, {
"c0": num_units,
"m0": num_units,
"c1": num_units + 1,
"m1": num_units + 1,
"c2": num_units + 2,
"m2": num_units + 2,
"c3": num_units + 3,
"m3": num_units + 3
})
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=False,
initializer=initializer,
state_is_tuple=True)
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(batch_size, input_size))
]
state_names = (("c0", "m0"), ("c1", "m1"), ("c2", "m2"), ("c3", "m3"))
with variable_scope.variable_scope("share_scope"):
outputs, state = rnn.static_state_saving_rnn(
cell, inputs, state_saver=state_saver, state_name=state_names)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(), [batch_size, num_units + 3])
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
last_states = sess.run(
list(nest.flatten(state)), feed_dict={
inputs[0]: input_value
})
saved_states = sess.run(
list(state_saver.saved_state.values()),
feed_dict={
inputs[0]: input_value
})
self.assertEqual(8, len(last_states))
self.assertEqual(8, len(saved_states))
flat_state_names = nest.flatten(state_names)
named_saved_states = dict(
zip(state_saver.saved_state.keys(), saved_states))
for i in range(8):
self.assertAllEqual(last_states[i],
named_saved_states[flat_state_names[i]])
def testProjNoSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def _testStateTupleWithProjAndSequenceLength(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell_notuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_tuple = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_notuple, state_notuple = rnn.static_rnn(
cell_notuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
cell_tuple._scope = cell_notuple._scope
outputs_tuple, state_tuple = rnn.static_rnn(
cell_tuple,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs_notuple), len(inputs))
self.assertEqual(len(outputs_tuple), len(inputs))
self.assertTrue(isinstance(state_tuple, tuple))
self.assertTrue(isinstance(state_notuple, ops_lib.Tensor))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_notuple_v = sess.run(
outputs_notuple, feed_dict={
inputs[0]: input_value
})
outputs_tuple_v = sess.run(
outputs_tuple, feed_dict={
inputs[0]: input_value
})
self.assertAllEqual(outputs_notuple_v, outputs_tuple_v)
(state_notuple_v,) = sess.run(
(state_notuple,), feed_dict={
inputs[0]: input_value
})
state_tuple_v = sess.run(state_tuple, feed_dict={inputs[0]: input_value})
self.assertAllEqual(state_notuple_v, np.hstack(state_tuple_v))
def testProjSharding(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
sess.run(outputs, feed_dict={inputs[0]: input_value})
def testDoubleInput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
outputs, _ = rnn.static_rnn(
cell,
inputs,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run()
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(outputs, feed_dict={inputs[0]: input_value})
self.assertEqual(values[0].dtype, input_value.dtype)
def testShardNoShardEquivalentOutput(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
initializer = init_ops.constant_initializer(0.001)
cell_noshard = rnn_cell.LSTMCell(
num_units,
num_proj=num_proj,
use_peepholes=True,
initializer=initializer,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
state_is_tuple=False)
cell_shard = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("noshard_scope"):
outputs_noshard, state_noshard = rnn.static_rnn(
cell_noshard, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("shard_scope"):
outputs_shard, state_shard = rnn.static_rnn(
cell_shard, inputs, dtype=dtypes.float32)
self.assertEqual(len(outputs_noshard), len(inputs))
self.assertEqual(len(outputs_noshard), len(outputs_shard))
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
feeds = dict((x, input_value) for x in inputs)
values_noshard = sess.run(outputs_noshard, feed_dict=feeds)
values_shard = sess.run(outputs_shard, feed_dict=feeds)
state_values_noshard = sess.run([state_noshard], feed_dict=feeds)
state_values_shard = sess.run([state_shard], feed_dict=feeds)
self.assertEqual(len(values_noshard), len(values_shard))
self.assertEqual(len(state_values_noshard), len(state_values_shard))
for (v_noshard, v_shard) in zip(values_noshard, values_shard):
self.assertAllClose(v_noshard, v_shard, atol=1e-3)
for (s_noshard, s_shard) in zip(state_values_noshard, state_values_shard):
self.assertAllClose(s_noshard, s_shard, atol=1e-3)
def testDoubleInputWithDropoutAndDynamicCalculation(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
num_proj_shards = 3
num_unit_shards = 2
max_length = 8
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
sequence_length = array_ops.placeholder(dtypes.int64)
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float64, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
num_unit_shards=num_unit_shards,
num_proj_shards=num_proj_shards,
initializer=initializer,
state_is_tuple=False)
dropout_cell = rnn_cell.DropoutWrapper(cell, 0.5, seed=0)
outputs, state = rnn.static_rnn(
dropout_cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float64))
self.assertEqual(len(outputs), len(inputs))
variables_lib.global_variables_initializer().run(feed_dict={
sequence_length: [2, 3]
})
input_value = np.asarray(
np.random.randn(batch_size, input_size), dtype=np.float64)
values = sess.run(
outputs, feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
state_value = sess.run(
[state], feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
self.assertEqual(values[0].dtype, input_value.dtype)
self.assertEqual(state_value[0].dtype, input_value.dtype)
def testSharingWeightsWithReuse(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
initializer_d = init_ops.random_uniform_initializer(
-1, 1, seed=self._seed + 1)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
cell_d = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer_d,
state_is_tuple=False)
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with variable_scope.variable_scope("diff_scope"):
outputs2, _ = rnn.static_rnn(cell_d, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1 + outputs2, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:2 * max_length]
outputs2_values = output_values[2 * max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
self.assertEqual(len(outputs0_values), len(outputs2_values))
for o1, o2, o3 in zip(outputs0_values, outputs1_values, outputs2_values):
self.assertAllEqual(o1, o2)
self.assertTrue(np.linalg.norm(o1 - o3) > 1e-6)
def testSharingWeightsWithDifferentNamescope(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(-1, 1, seed=self._seed)
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=False)
with ops_lib.name_scope("scope0"):
with variable_scope.variable_scope("share_scope"):
outputs0, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
with ops_lib.name_scope("scope1"):
with variable_scope.variable_scope("share_scope", reuse=True):
outputs1, _ = rnn.static_rnn(cell, inputs, dtype=dtypes.float32)
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
output_values = sess.run(
outputs0 + outputs1, feed_dict={
inputs[0]: input_value
})
outputs0_values = output_values[:max_length]
outputs1_values = output_values[max_length:]
self.assertEqual(len(outputs0_values), len(outputs1_values))
for out0, out1 in zip(outputs0_values, outputs1_values):
self.assertAllEqual(out0, out1)
def testDynamicRNNAllowsUnknownTimeDimension(self):
inputs = array_ops.placeholder(dtypes.float32, shape=[1, None, 20])
cell = rnn_cell.GRUCell(30)
rnn.dynamic_rnn(cell, inputs, dtype=dtypes.float32)
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
num_proj=num_proj,
initializer=initializer,
state_is_tuple=True)
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
self.assertTrue(isinstance(state_static, rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(state_dynamic, rnn_cell.LSTMStateTuple))
self.assertEqual(state_static[0], state_static.c)
self.assertEqual(state_static[1], state_static.h)
self.assertEqual(state_dynamic[0], state_dynamic.c)
self.assertEqual(state_dynamic[1], state_dynamic.h)
if in_graph_mode:
variables_lib.global_variables_initializer().run()
input_value = np.random.randn(batch_size, input_size)
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
@test_util.run_in_graph_and_eager_modes
def testDynamicRNNWithNestedTupleStates(self):
num_units = 3
input_size = 5
batch_size = 2
num_proj = 4
max_length = 8
sequence_length = [4, 6]
in_graph_mode = not context.executing_eagerly()
with self.test_session(graph=ops_lib.Graph()) as sess:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
if in_graph_mode:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None, input_size))
]
else:
inputs = max_length * [
constant_op.constant(
np.random.randn(batch_size, input_size).astype(np.float32))
]
inputs_c = array_ops.stack(inputs)
def _cell(i):
return rnn_cell.LSTMCell(
num_units + i,
use_peepholes=True,
num_proj=num_proj + i,
initializer=initializer,
state_is_tuple=True)
cell = rnn_cell.MultiRNNCell(
[_cell(i) for i in range(4)], state_is_tuple=True)
self.assertEqual(len(cell.state_size), 4)
for i in range(4):
self.assertEqual(len(cell.state_size[i]), 2)
test_zero = cell.zero_state(1, dtypes.float32)
self.assertEqual(len(test_zero), 4)
for i in range(4):
self.assertEqual(test_zero[i][0].get_shape()[1], cell.state_size[i][0])
self.assertEqual(test_zero[i][1].get_shape()[1], cell.state_size[i][1])
with variable_scope.variable_scope("root") as scope:
outputs_static, state_static = rnn.static_rnn(
cell,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
scope.reuse_variables()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length,
scope=scope)
if in_graph_mode:
input_value = np.random.randn(batch_size, input_size)
variables_lib.global_variables_initializer().run()
outputs_static = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
state_static = sess.run(
nest.flatten(state_static), feed_dict={
inputs[0]: input_value
})
state_dynamic = sess.run(
nest.flatten(state_dynamic), feed_dict={
inputs[0]: input_value
})
if in_graph_mode:
self.assertAllEqual(outputs_static, outputs_dynamic)
else:
self.assertAllEqual(array_ops.stack(outputs_static), outputs_dynamic)
state_static = nest.flatten(state_static)
state_dynamic = nest.flatten(state_dynamic)
self.assertAllEqual(np.hstack(state_static), np.hstack(state_dynamic))
def _testDynamicEquivalentToStaticRNN(self, use_sequence_length):
time_steps = 8
num_units = 3
num_proj = 4
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size).astype(
np.float32)
if use_sequence_length:
sequence_length = np.random.randint(0, time_steps, size=batch_size)
else:
sequence_length = None
in_graph_mode = not context.executing_eagerly()
if not in_graph_mode:
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_static, state_static = rnn.static_rnn(
cell, inputs, sequence_length=sequence_length, dtype=dtypes.float32)
if in_graph_mode:
feeds = {concat_inputs: input_values}
variables_lib.global_variables_initializer().run(feed_dict=feeds)
static_gradients = gradients_impl.gradients(
outputs_static + [state_static], [concat_inputs])
static_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [outputs_static[0], outputs_static[-1], state_static]
])
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
static_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [outputs_static[0], outputs_static[-1], state_static]
])
values_static = sess.run(outputs_static, feed_dict=feeds)
(state_value_static,) = sess.run((state_static,), feed_dict=feeds)
static_grad_values = sess.run(static_gradients, feed_dict=feeds)
static_individual_grad_values = sess.run(
static_individual_gradients, feed_dict=feeds)
static_individual_var_grad_values = sess.run(
static_individual_variable_gradients, feed_dict=feeds)
ts,
use_peepholes=True,
initializer=initializer,
num_proj=num_proj,
state_is_tuple=False)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
split_outputs_dynamic = array_ops.unstack(outputs_dynamic, time_steps)
if in_graph_mode:
feeds = {concat_inputs: input_values}
variables_lib.global_variables_initializer().run(feed_dict=feeds)
dynamic_gradients = gradients_impl.gradients(
split_outputs_dynamic + [state_dynamic], [concat_inputs])
dynamic_individual_gradients = nest.flatten([
gradients_impl.gradients(y, [concat_inputs])
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
trainable_variables = ops_lib.get_collection(
ops_lib.GraphKeys.TRAINABLE_VARIABLES)
assert len(trainable_variables) > 1, (
"Count of trainable variables: %d" % len(trainable_variables))
dynamic_individual_variable_gradients = nest.flatten([
gradients_impl.gradients(y, trainable_variables)
for y in [
split_outputs_dynamic[0], split_outputs_dynamic[-1],
state_dynamic
]
])
values_dynamic = sess.run(split_outputs_dynamic, feed_dict=feeds)
(state_value_dynamic,) = sess.run((state_dynamic,), feed_dict=feeds)
dynamic_grad_values = sess.run(dynamic_gradients, feed_dict=feeds)
dynamic_individual_grad_values = sess.run(
dynamic_individual_gradients, feed_dict=feeds)
dynamic_individual_var_grad_values = sess.run(
dynamic_individual_variable_gradients, feed_dict=feeds)
self.assertEqual(len(values_static), len(values_dynamic))
for (value_static, value_dynamic) in zip(values_static, values_dynamic):
self.assertAllEqual(value_static, value_dynamic)
self.assertAllEqual(state_value_static, state_value_dynamic)
if in_graph_mode:
self.assertAllEqual(static_grad_values, dynamic_grad_values)
self.assertEqual(
len(static_individual_grad_values),
len(dynamic_individual_grad_values))
self.assertEqual(
len(static_individual_var_grad_values),
len(dynamic_individual_var_grad_values))
for i, (a, b) in enumerate(
zip(static_individual_grad_values, dynamic_individual_grad_values)):
tf_logging.info("Comparing individual gradients iteration %d" % i)
self.assertAllEqual(a, b)
for i, (a, b) in enumerate(
zip(static_individual_var_grad_values,
dynamic_individual_var_grad_values)):
tf_logging.info(
"Comparing individual variable gradients iteration %d" % i)
self.assertAllEqual(a, b)
@test_util.run_in_graph_and_eager_modes
def testDynamicEquivalentToStaticRNN(self):
self._testDynamicEquivalentToStaticRNN(use_sequence_length=True)
self._testDynamicEquivalentToStaticRNN(use_sequence_length=False)
class BidirectionalRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _createBidirectionalRNN(self, use_shape, use_sequence_length, scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = array_ops.placeholder(
dtypes.int64) if use_sequence_length else None
cell_fw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
cell_bw = rnn_cell.LSTMCell(
num_units, input_size, initializer=initializer, state_is_tuple=False)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size) if use_shape else (None, input_size))
]
outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=scope)
self.assertEqual(len(outputs), len(inputs))
for out in outputs:
self.assertEqual(out.get_shape().as_list(),
[batch_size if use_shape else None, 2 * num_units])
input_value = np.random.randn(batch_size, input_size)
outputs = array_ops.stack(outputs)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalRNN(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalRNN(use_shape, True))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw],
feed_dict={
inputs[0]: input_value,
sequence_length: [2, 3]
})
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
self.assertAllClose(s_fw, s_bw)
def _testBidirectionalRNNWithoutSequenceLength(self, use_shape):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, _ = (
self._createBidirectionalRNN(use_shape, False))
variables_lib.global_variables_initializer().run()
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict={
inputs[0]: input_value
})
for i in xrange(8):
self.assertEqual(out[i][0][0], out[8 - 1 - i][0][3])
self.assertEqual(out[i][0][1], out[8 - 1 - i][0][4])
self.assertEqual(out[i][0][2], out[8 - 1 - i][0][5])
for i in xrange(8):
self.assertEqual(out[i][1][0], out[8 - 1 - i][1][3])
self.assertEqual(out[i][1][1], out[8 - 1 - i][1][4])
self.assertEqual(out[i][1][2], out[8 - 1 - i][1][5])
self.assertAllClose(s_fw, s_bw)
def testBidirectionalRNN(self):
self._testBidirectionalRNN(use_shape=False)
self._testBidirectionalRNN(use_shape=True)
def testBidirectionalRNNWithoutSequenceLength(self):
self._testBidirectionalRNNWithoutSequenceLength(use_shape=False)
self._testBidirectionalRNNWithoutSequenceLength(use_shape=True)
def _createBidirectionalDynamicRNN(self,
use_shape,
use_state_tuple,
use_time_major,
use_sequence_length,
scope=None):
num_units = 3
input_size = 5
batch_size = 2
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
sequence_length = (
array_ops.placeholder(dtypes.int64) if use_sequence_length else None)
cell_fw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
cell_bw = rnn_cell.LSTMCell(
num_units, initializer=initializer, state_is_tuple=use_state_tuple)
inputs = max_length * [
array_ops.placeholder(
dtypes.float32,
shape=(batch_size if use_shape else None, input_size))
]
inputs_c = array_ops.stack(inputs)
if not use_time_major:
inputs_c = array_ops.transpose(inputs_c, [1, 0, 2])
outputs, states = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs_c,
sequence_length,
dtype=dtypes.float32,
time_major=use_time_major,
scope=scope)
outputs = array_ops.concat(outputs, 2)
state_fw, state_bw = states
outputs_shape = [None, max_length, 2 * num_units]
if use_shape:
outputs_shape[0] = batch_size
if use_time_major:
outputs_shape[0], outputs_shape[1] = outputs_shape[1], outputs_shape[0]
self.assertEqual(outputs.get_shape().as_list(), outputs_shape)
input_value = np.random.randn(batch_size, input_size)
return input_value, inputs, outputs, state_fw, state_bw, sequence_length
def _testBidirectionalDynamicRNN(self, use_shape, use_state_tuple,
use_time_major, use_sequence_length):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
input_value, inputs, outputs, state_fw, state_bw, sequence_length = (
self._createBidirectionalDynamicRNN(
use_shape, use_state_tuple, use_time_major, use_sequence_length))
variables_lib.global_variables_initializer().run()
feed_dict = ({sequence_length: [2, 3]} if use_sequence_length else {})
feed_dict.update({inputs[0]: input_value})
if use_state_tuple:
out, c_fw, m_fw, c_bw, m_bw = sess.run(
[outputs, state_fw[0], state_fw[1], state_bw[0], state_bw[1]],
feed_dict=feed_dict)
s_fw = (c_fw, m_fw)
s_bw = (c_bw, m_bw)
else:
feed_dict.update({inputs[0]: input_value})
out, s_fw, s_bw = sess.run(
[outputs, state_fw, state_bw], feed_dict=feed_dict)
if not use_time_major:
out = np.swapaxes(out, 0, 1)
if use_sequence_length:
self.assertEqual(out[0][0][0], out[1][0][3])
self.assertEqual(out[0][0][1], out[1][0][4])
self.assertEqual(out[0][0][2], out[1][0][5])
self.assertEqual(out[1][0][0], out[0][0][3])
self.assertEqual(out[1][0][1], out[0][0][4])
self.assertEqual(out[1][0][2], out[0][0][5])
self.assertEqual(out[0][1][0], out[2][1][3])
self.assertEqual(out[0][1][1], out[2][1][4])
self.assertEqual(out[0][1][2], out[2][1][5])
self.assertEqual(out[1][1][0], out[1][1][3])
self.assertEqual(out[1][1][1], out[1][1][4])
self.assertEqual(out[1][1][2], out[1][1][5])
self.assertEqual(out[2][1][0], out[0][1][3])
self.assertEqual(out[2][1][1], out[0][1][4])
self.assertEqual(out[2][1][2], out[0][1][5])
self.assertAllClose(s_fw, s_bw)
else:
max_length = 8
for t in range(max_length):
self.assertAllEqual(out[t, :, 0:3], out[max_length - t - 1, :, 3:6])
self.assertAllClose(s_fw, s_bw)
def testBidirectionalDynamicRNN(self):
options = itertools.product([True, False], repeat=4)
for option in options:
self._testBidirectionalDynamicRNN(
use_shape=option[0],
use_state_tuple=option[1],
use_time_major=option[2],
use_sequence_length=option[3])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "bidirectional_rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("BiRNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testBidirectionalRNNScope(self):
def factory(scope):
return self._createBidirectionalRNN(
use_shape=True, use_sequence_length=True, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
def testBidirectionalDynamicRNNScope(self):
def get_factory(use_time_major):
def factory(scope):
return self._createBidirectionalDynamicRNN(
use_shape=True,
use_state_tuple=True,
use_sequence_length=True,
use_time_major=use_time_major,
scope=scope)
return factory
self._testScope(get_factory(True), use_outer_scope=True)
self._testScope(get_factory(True), use_outer_scope=False)
self._testScope(get_factory(True), prefix=None, use_outer_scope=False)
self._testScope(get_factory(False), use_outer_scope=True)
self._testScope(get_factory(False), use_outer_scope=False)
self._testScope(get_factory(False), prefix=None, use_outer_scope=False)
class MultiDimensionalLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testMultiDimensionalLSTMAllRNNContainers(self):
feature_dims = (3, 4, 5)
input_size = feature_dims
batch_size = 2
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
inputs = max_length * [
array_ops.placeholder(dtypes.float32, shape=(None,) + input_size)
]
inputs_using_dim = max_length * [
array_ops.placeholder(
dtypes.float32, shape=(batch_size,) + input_size)
]
inputs_c = array_ops.stack(inputs)
cell = DummyMultiDimensionalLSTM(feature_dims)
state_saver = TestStateSaver(batch_size, input_size)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
self.assertEqual(outputs_dynamic.get_shape().as_list(),
inputs_c.get_shape().as_list())
for out, inp in zip(outputs_static, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
for out, inp in zip(outputs_bid, inputs_using_dim):
input_shape_list = inp.get_shape().as_list()
input_shape_list[1] *= 2
self.assertEqual(out.get_shape().as_list(), input_shape_list)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size,) + input_size
input_value = np.random.randn(*input_total_size)
outputs_static_v = sess.run(
outputs_static, feed_dict={
inputs[0]: input_value
})
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
inputs[0]: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
inputs_using_dim[0]: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(outputs_static_v, outputs_dynamic_v)
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=2)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_static_v = sess.run(
state_static, feed_dict={
inputs[0]: input_value
})
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
inputs[0]: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
inputs_using_dim[0]: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
inputs_using_dim[0]: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
inputs_using_dim[0]: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class NestedLSTMTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testNestedIOLSTMAllRNNContainers(self):
input_size = 5
batch_size = 2
state_size = 6
max_length = 8
sequence_length = [4, 6]
with self.test_session(graph=ops_lib.Graph()) as sess:
state_saver = TestStateSaver(batch_size, state_size)
single_input = (array_ops.placeholder(
dtypes.float32, shape=(None, input_size)),
array_ops.placeholder(
dtypes.float32, shape=(None, input_size)))
inputs = max_length * [single_input]
inputs_c = (array_ops.stack([input_[0] for input_ in inputs]),
array_ops.stack([input_[1] for input_ in inputs]))
single_input_using_dim = (array_ops.placeholder(
dtypes.float32, shape=(batch_size, input_size)),
array_ops.placeholder(
dtypes.float32,
shape=(batch_size, input_size)))
inputs_using_dim = max_length * [single_input_using_dim]
cell = NestedRNNCell()
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs_c,
dtype=dtypes.float32,
time_major=True,
sequence_length=sequence_length)
outputs_static, state_static = rnn.static_rnn(
cell, inputs, dtype=dtypes.float32, sequence_length=sequence_length)
outputs_bid, state_fw, state_bw = rnn.static_bidirectional_rnn(
cell,
cell,
inputs_using_dim,
dtype=dtypes.float32,
sequence_length=sequence_length)
outputs_sav, state_sav = rnn.static_state_saving_rnn(
cell,
inputs_using_dim,
sequence_length=sequence_length,
state_saver=state_saver,
state_name=("h", "c"))
def _assert_same_shape(input1, input2, double=False):
flat_input1 = nest.flatten(input1)
flat_input2 = nest.flatten(input2)
for inp1, inp2 in zip(flat_input1, flat_input2):
input_shape = inp1.get_shape().as_list()
if double:
input_shape[1] *= 2
self.assertEqual(input_shape, inp2.get_shape().as_list())
_assert_same_shape(inputs_c, outputs_dynamic)
_assert_same_shape(inputs, outputs_static)
_assert_same_shape(inputs_using_dim, outputs_sav)
_assert_same_shape(inputs_using_dim, outputs_bid, double=True)
variables_lib.global_variables_initializer().run()
input_total_size = (batch_size, input_size)
input_value = (np.random.randn(*input_total_size),
np.random.randn(*input_total_size))
outputs_dynamic_v = sess.run(
outputs_dynamic, feed_dict={
single_input: input_value
})
outputs_static_v = sess.run(
outputs_static, feed_dict={
single_input: input_value
})
outputs_sav_v = sess.run(
outputs_sav, feed_dict={
single_input_using_dim: input_value
})
outputs_bid_v = sess.run(
outputs_bid, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(outputs_static_v,
np.transpose(outputs_dynamic_v, (1, 0, 2, 3)))
self.assertAllEqual(outputs_static_v, outputs_sav_v)
outputs_static_array = np.array(outputs_static_v)
outputs_static_array_double = np.concatenate(
(outputs_static_array, outputs_static_array), axis=3)
outputs_bid_array = np.array(outputs_bid_v)
self.assertAllEqual(outputs_static_array_double, outputs_bid_array)
state_dynamic_v = sess.run(
state_dynamic, feed_dict={
single_input: input_value
})
state_static_v = sess.run(
state_static, feed_dict={
single_input: input_value
})
state_bid_fw_v = sess.run(
state_fw, feed_dict={
single_input_using_dim: input_value
})
state_bid_bw_v = sess.run(
state_bw, feed_dict={
single_input_using_dim: input_value
})
state_sav_v = sess.run(
state_sav, feed_dict={
single_input_using_dim: input_value
})
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_dynamic_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_sav_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_fw_v))
self.assertAllEqual(np.hstack(state_static_v), np.hstack(state_bid_bw_v))
class StateSaverRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _factory(self, scope, state_saver):
num_units = state_saver.state_size // 2
batch_size = state_saver.batch_size
input_size = 5
max_length = 8
initializer = init_ops.random_uniform_initializer(
-0.01, 0.01, seed=self._seed)
cell = rnn_cell.LSTMCell(
num_units,
use_peepholes=False,
initializer=initializer,
state_is_tuple=False)
inputs = max_length * [
array_ops.zeros(dtype=dtypes.float32, shape=(batch_size, input_size))
]
out, state = rnn.static_state_saving_rnn(
cell,
inputs,
state_saver=state_saver,
state_name="save_lstm",
scope=scope)
return out, state, state_saver
def _testScope(self, prefix="prefix", use_outer_scope=True):
num_units = 3
batch_size = 2
state_saver = TestStateSaver(batch_size, 2 * num_units)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
self._factory(scope=scope, state_saver=state_saver)
else:
self._factory(scope=prefix, state_saver=state_saver)
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testStateSaverRNNScope(self):
self._testScope(use_outer_scope=True)
self._testScope(use_outer_scope=False)
self._testScope(prefix=None, use_outer_scope=False)
def testStateSaverCallsSaveState(self):
num_units = 3
batch_size = 2
state_saver = TestStateSaverWithCounters(batch_size, 2 * num_units)
out, state, state_saver = self._factory(scope=None, state_saver=state_saver)
with self.test_session() as sess:
sess.run(variables_lib.global_variables_initializer())
sess.run(variables_lib.local_variables_initializer())
_, _, num_state_calls, num_save_state_calls = sess.run([
out,
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
out,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
_, num_state_calls, num_save_state_calls = sess.run([
state,
state_saver.num_state_calls,
state_saver.num_save_state_calls])
self.assertEqual(num_state_calls, num_save_state_calls)
class GRUTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testDynamic(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
with self.test_session(use_gpu=True, graph=ops_lib.Graph()) as sess:
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
with variable_scope.variable_scope("dynamic_scope"):
outputs_dynamic, state_dynamic = rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
feeds = {concat_inputs: input_values}
variables_lib.global_variables_initializer().run(feed_dict=feeds)
sess.run([outputs_dynamic, state_dynamic], feed_dict=feeds)
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testDynamicScope(self):
time_steps = 8
num_units = 3
input_size = 5
batch_size = 2
sequence_length = np.random.randint(0, time_steps, size=batch_size)
def factory(scope):
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
cell = rnn_cell.GRUCell(num_units=num_units)
return rnn.dynamic_rnn(
cell,
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32,
scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class RawRNNTest(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def _testRawRNN(self, max_time):
with self.test_session(graph=ops_lib.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output
if cell_output is None:
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
def testRawRNNZeroLength(self):
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
def testLoopState(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops.stack([array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], loop_state.eval())
def testLoopStateWithTensorArray(self):
with self.test_session(graph=ops_lib.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state.eval())
def testEmitDifferentStructureThanCellOutput(self):
with self.test_session(graph=ops_lib.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
unknown_dim = array_ops.placeholder(dtype=dtypes.int32)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
array_ops.ones(
[batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output, feed_dict={unknown_dim: 1})
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.test_session(use_gpu=True, graph=ops_lib.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output
if cell_output is None:
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
next_input = control_flow_ops.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
class DeviceWrapperCell(rnn_cell.RNNCell):
def __init__(self, cell, device):
self._cell = cell
self._device = device
@property
def output_size(self):
return self._cell.output_size
@property
def state_size(self):
return self._cell.state_size
def __call__(self, input_, state, scope=None):
if self._device is not None:
with ops_lib.device(self._device):
return self._cell(input_, state, scope=scope)
else:
return self._cell(input_, state, scope=scope)
class TensorArrayOnCorrectDeviceTest(test.TestCase):
def _execute_rnn_on(self,
rnn_device=None,
cell_device=None,
input_device=None):
batch_size = 3
time_steps = 7
input_size = 5
num_units = 10
cell = rnn_cell.LSTMCell(num_units, use_peepholes=True)
gpu_cell = DeviceWrapperCell(cell, cell_device)
inputs = np.random.randn(batch_size, time_steps, input_size).astype(
np.float32)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
if input_device is not None:
with ops_lib.device(input_device):
inputs = constant_op.constant(inputs)
if rnn_device is not None:
with ops_lib.device(rnn_device):
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
else:
outputs, _ = rnn.dynamic_rnn(
gpu_cell,
inputs,
sequence_length=sequence_length,
dtype=dtypes.float32)
with self.test_session(use_gpu=True) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
variables_lib.global_variables_initializer().run()
sess.run(outputs, options=opts, run_metadata=run_metadata)
return run_metadata
def _retrieve_cpu_gpu_stats(self, run_metadata):
cpu_stats = None
gpu_stats = None
step_stats = run_metadata.step_stats
for ds in step_stats.dev_stats:
if "cpu:0" in ds.device[-5:].lower():
cpu_stats = ds.node_stats
if "gpu:0" == ds.device[-5:].lower():
gpu_stats = ds.node_stats
return cpu_stats, gpu_stats
def testRNNOnCPUCellOnGPU(self):
if not test.is_gpu_available():
return
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
_assert_in("TensorArrayWrite", gpu_stats, cpu_stats)
_assert_in("TensorArrayGather", gpu_stats, cpu_stats)
_assert_in("TensorArrayRead", cpu_stats, gpu_stats)
_assert_in("TensorArrayScatter", cpu_stats, gpu_stats)
def testRNNOnCPUCellOnCPU(self):
if not test.is_gpu_available():
return
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(
rnn_device="/cpu:0", cell_device="/cpu:0", input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
_assert_in("TensorArray", cpu_stats, gpu_stats)
def testInputOnGPUCellNotDeclared(self):
if not test.is_gpu_available():
return
gpu_dev = test.gpu_device_name()
run_metadata = self._execute_rnn_on(input_device=gpu_dev)
cpu_stats, gpu_stats = self._retrieve_cpu_gpu_stats(run_metadata)
def _assert_in(op_str, in_stats, out_stats):
self.assertTrue(any(op_str in s.node_name for s in in_stats))
self.assertFalse(any(op_str in s.node_name for s in out_stats))
_assert_in("TensorArray", gpu_stats, cpu_stats)
if __name__ == "__main__":
test.main()
| true
| true
|
f7088d12531d2cc76918d77327731449d698a09b
| 2,304
|
py
|
Python
|
tests/test_run.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
tests/test_run.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
tests/test_run.py
|
hfchong/dvc
|
2e3ce3b3dbb02f6524b0383e3f599c4561413634
|
[
"Apache-2.0"
] | null | null | null |
import os
import filecmp
from dvc.main import main
from dvc.utils import file_md5
from dvc.stage import Stage
from dvc.command.run import CmdRun
from tests.basic_env import TestDvc
class TestRun(TestDvc):
def test(self):
cmd = 'python {} {} {}'.format(self.CODE, self.FOO, 'out')
deps = [self.FOO, self.CODE]
outs = [os.path.join(self.dvc.root_dir, 'out')]
outs_no_cache = []
fname = os.path.join(self.dvc.root_dir, 'out.dvc')
cwd = os.curdir
self.dvc.add(self.FOO)
stage = self.dvc.run(cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname=fname,
cwd=cwd)
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(stage.cmd, cmd)
self.assertEqual(len(stage.deps), len(deps))
self.assertEqual(len(stage.outs), len(outs + outs_no_cache))
self.assertEqual(stage.outs[0].path, outs[0])
self.assertEqual(stage.outs[0].md5, file_md5(self.FOO)[0])
self.assertTrue(stage.path, fname)
class TestRunEmpty(TestDvc):
def test(self):
self.dvc.run(cmd='',
deps=[],
outs=[],
outs_no_cache=[],
fname='empty.dvc',
cwd=os.curdir)
class TestRunNoExec(TestDvc):
def test(self):
self.dvc.run(cmd='python {} {} {}'.format(self.CODE, self.FOO, 'out'),
no_exec=True)
self.assertFalse(os.path.exists('out'))
class TestCmdRun(TestDvc):
def test_run(self):
ret = main(['run',
'-d', self.FOO,
'-d', self.CODE,
'-o', 'out',
'-f', 'out.dvc',
'python', self.CODE, self.FOO, 'out'])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile('out'))
self.assertTrue(os.path.isfile('out.dvc'))
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
def test_run_bad_command(self):
ret = main(['run',
'non-existing-command'])
self.assertNotEqual(ret, 0)
| 31.561644
| 78
| 0.523438
|
import os
import filecmp
from dvc.main import main
from dvc.utils import file_md5
from dvc.stage import Stage
from dvc.command.run import CmdRun
from tests.basic_env import TestDvc
class TestRun(TestDvc):
def test(self):
cmd = 'python {} {} {}'.format(self.CODE, self.FOO, 'out')
deps = [self.FOO, self.CODE]
outs = [os.path.join(self.dvc.root_dir, 'out')]
outs_no_cache = []
fname = os.path.join(self.dvc.root_dir, 'out.dvc')
cwd = os.curdir
self.dvc.add(self.FOO)
stage = self.dvc.run(cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname=fname,
cwd=cwd)
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(stage.cmd, cmd)
self.assertEqual(len(stage.deps), len(deps))
self.assertEqual(len(stage.outs), len(outs + outs_no_cache))
self.assertEqual(stage.outs[0].path, outs[0])
self.assertEqual(stage.outs[0].md5, file_md5(self.FOO)[0])
self.assertTrue(stage.path, fname)
class TestRunEmpty(TestDvc):
def test(self):
self.dvc.run(cmd='',
deps=[],
outs=[],
outs_no_cache=[],
fname='empty.dvc',
cwd=os.curdir)
class TestRunNoExec(TestDvc):
def test(self):
self.dvc.run(cmd='python {} {} {}'.format(self.CODE, self.FOO, 'out'),
no_exec=True)
self.assertFalse(os.path.exists('out'))
class TestCmdRun(TestDvc):
def test_run(self):
ret = main(['run',
'-d', self.FOO,
'-d', self.CODE,
'-o', 'out',
'-f', 'out.dvc',
'python', self.CODE, self.FOO, 'out'])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile('out'))
self.assertTrue(os.path.isfile('out.dvc'))
self.assertTrue(filecmp.cmp(self.FOO, 'out'))
def test_run_bad_command(self):
ret = main(['run',
'non-existing-command'])
self.assertNotEqual(ret, 0)
| true
| true
|
f7088d26d6e642b7dffd25f3df9157dad7084972
| 8,462
|
py
|
Python
|
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
|
omad/airflow
|
663259d4b541ab10ce55fec4d2460e23917062c2
|
[
"Apache-2.0"
] | 1
|
2021-07-07T15:13:51.000Z
|
2021-07-07T15:13:51.000Z
|
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
|
omad/airflow
|
663259d4b541ab10ce55fec4d2460e23917062c2
|
[
"Apache-2.0"
] | 1
|
2020-10-15T22:39:05.000Z
|
2020-10-15T22:39:05.000Z
|
airflow/providers/cncf/kubernetes/hooks/kubernetes.py
|
tanjinP/airflow
|
f0b9aae564805fb09328faf0c47f441ee0699ed8
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tempfile
from typing import Any, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
"""
Creates Kubernetes API connection.
- use in cluster configuration by using ``extra__kubernetes__in_cluster`` in connection
- use custom config by providing path to the file using ``extra__kubernetes__kube_config_path``
- use custom configuration by providing content of kubeconfig file via
``extra__kubernetes__kube_config`` in connection
- use default config by providing no extras
This hook check for configuration option in the above order. Once an option is present it will
use this configuration.
.. seealso::
For more information about Kubernetes connection:
:ref:`apache-airflow:howto/connection:kubernetes`
:param conn_id: the connection to Kubernetes cluster
:type conn_id: str
"""
def __init__(
self, conn_id: str = "kubernetes_default", client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
"""Returns kubernetes api session for use with requests"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
"""Cached Kubernetes API client"""
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
"""
Creates custom resource definition object in Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param body: crd object definition
:type body: Union[str, dict]
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
"""
Get custom resource definition object from Kubernetes
:param group: api group
:type group: str
:param version: api version
:type version: str
:param plural: api plural
:type plural: str
:param name: crd object name
:type name: str
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
"""Returns the namespace that defined in the connection"""
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
"""
Retrieves a log stream for a container in a kubernetes pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
"""
Retrieves a container's log from the specified pod.
:param pod_name: pod name
:type pod_name: str
:param container: container name
:param namespace: kubernetes namespace
:type namespace: str
"""
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
| 37.608889
| 110
| 0.650201
|
import tempfile
from typing import Any, Generator, Optional, Tuple, Union
import yaml
from cached_property import cached_property
from kubernetes import client, config, watch
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
def _load_body_to_dict(body):
try:
body_dict = yaml.safe_load(body)
except yaml.YAMLError as e:
raise AirflowException("Exception when loading resource definition: %s\n" % e)
return body_dict
class KubernetesHook(BaseHook):
def __init__(
self, conn_id: str = "kubernetes_default", client_configuration: Optional[client.Configuration] = None
) -> None:
super().__init__()
self.conn_id = conn_id
self.client_configuration = client_configuration
def get_conn(self) -> Any:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
in_cluster = extras.get("extra__kubernetes__in_cluster")
kubeconfig_path = extras.get("extra__kubernetes__kube_config_path")
kubeconfig = extras.get("extra__kubernetes__kube_config")
num_selected_configuration = len([o for o in [in_cluster, kubeconfig, kubeconfig_path] if o])
if num_selected_configuration > 1:
raise AirflowException(
"Invalid connection configuration. Options extra__kubernetes__kube_config_path, "
"extra__kubernetes__kube_config, extra__kubernetes__in_cluster are mutually exclusive. "
"You can only use one option at a time."
)
if in_cluster:
self.log.debug("loading kube_config from: in_cluster configuration")
config.load_incluster_config()
return client.ApiClient()
if kubeconfig_path is not None:
self.log.debug("loading kube_config from: %s", kubeconfig_path)
config.load_kube_config(
config_file=kubeconfig_path, client_configuration=self.client_configuration
)
return client.ApiClient()
if kubeconfig is not None:
with tempfile.NamedTemporaryFile() as temp_config:
self.log.debug("loading kube_config from: connection kube_config")
temp_config.write(kubeconfig.encode())
temp_config.flush()
config.load_kube_config(
config_file=temp_config.name, client_configuration=self.client_configuration
)
return client.ApiClient()
self.log.debug("loading kube_config from: default file")
config.load_kube_config(client_configuration=self.client_configuration)
return client.ApiClient()
@cached_property
def api_client(self) -> Any:
return self.get_conn()
def create_custom_object(
self, group: str, version: str, plural: str, body: Union[str, dict], namespace: Optional[str] = None
):
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
if isinstance(body, str):
body = _load_body_to_dict(body)
try:
response = api.create_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, body=body
)
self.log.debug("Response: %s", response)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> create_custom_object: %s\n" % e)
def get_custom_object(
self, group: str, version: str, plural: str, name: str, namespace: Optional[str] = None
):
api = client.CustomObjectsApi(self.api_client)
if namespace is None:
namespace = self.get_namespace()
try:
response = api.get_namespaced_custom_object(
group=group, version=version, namespace=namespace, plural=plural, name=name
)
return response
except client.rest.ApiException as e:
raise AirflowException("Exception when calling -> get_custom_object: %s\n" % e)
def get_namespace(self) -> str:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
namespace = extras.get("extra__kubernetes__namespace", "default")
return namespace
def get_pod_log_stream(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
) -> Tuple[watch.Watch, Generator[str, None, None]]:
api = client.CoreV1Api(self.api_client)
watcher = watch.Watch()
return (
watcher,
watcher.stream(
api.read_namespaced_pod_log,
name=pod_name,
container=container,
namespace=namespace if namespace else self.get_namespace(),
),
)
def get_pod_logs(
self,
pod_name: str,
container: Optional[str] = "",
namespace: Optional[str] = None,
):
api = client.CoreV1Api(self.api_client)
return api.read_namespaced_pod_log(
name=pod_name,
container=container,
_preload_content=False,
namespace=namespace if namespace else self.get_namespace(),
)
| true
| true
|
f7088d8ddc8d7584686f5d81c7a2b9b5233b813c
| 128
|
py
|
Python
|
src/init.py
|
HofmannCh/PythonSnake
|
e737414f1e9150bdd22d6267a53e25b85f3b5ccc
|
[
"MIT"
] | null | null | null |
src/init.py
|
HofmannCh/PythonSnake
|
e737414f1e9150bdd22d6267a53e25b85f3b5ccc
|
[
"MIT"
] | null | null | null |
src/init.py
|
HofmannCh/PythonSnake
|
e737414f1e9150bdd22d6267a53e25b85f3b5ccc
|
[
"MIT"
] | null | null | null |
from Window import Window
from Logic import Logic
print("Init")
logic = Logic()
win = Window(logic)
win.mainloop()
print("End")
| 16
| 25
| 0.734375
|
from Window import Window
from Logic import Logic
print("Init")
logic = Logic()
win = Window(logic)
win.mainloop()
print("End")
| true
| true
|
f7088ed2ed08b3c8c7ac9ec0bca022ea7e57a025
| 735
|
py
|
Python
|
students/K33422/Iskhakova_Emina/labs/lab1/task2_server.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33422/Iskhakova_Emina/labs/lab1/task2_server.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33422/Iskhakova_Emina/labs/lab1/task2_server.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 8080))
sock.listen(1)
clientsoc, addr = sock.accept()
print('connected:', addr)
message = ''
while True:
clientsoc.sendall(bytes(message + f'Enter two bases and its hight or "exit" to finish the program', "utf-8"))
try:
data = clientsoc.recv(1024)
if not data:
break
if data.decode("utf-8") == "exit":
clientsoc.close()
break
a, b, h = data.decode("utf-8").split(' ')
s = (int(a) + int(b)) / 2 * int(h)
message = f'Square of the figure is {s}; \n'
except KeyboardInterrupt:
clientsoc.close()
break
| 27.222222
| 114
| 0.560544
|
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 8080))
sock.listen(1)
clientsoc, addr = sock.accept()
print('connected:', addr)
message = ''
while True:
clientsoc.sendall(bytes(message + f'Enter two bases and its hight or "exit" to finish the program', "utf-8"))
try:
data = clientsoc.recv(1024)
if not data:
break
if data.decode("utf-8") == "exit":
clientsoc.close()
break
a, b, h = data.decode("utf-8").split(' ')
s = (int(a) + int(b)) / 2 * int(h)
message = f'Square of the figure is {s}; \n'
except KeyboardInterrupt:
clientsoc.close()
break
| true
| true
|
f708911d8933c643cca11ef42a3909ff1dc435e3
| 595
|
py
|
Python
|
angr/procedures/libc/fscanf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 2
|
2020-04-29T02:39:42.000Z
|
2020-04-29T08:07:44.000Z
|
angr/procedures/libc/fscanf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | null | null | null |
angr/procedures/libc/fscanf.py
|
Kyle-Kyle/angr
|
345b2131a7a67e3a6ffc7d9fd475146a3e12f837
|
[
"BSD-2-Clause"
] | 3
|
2019-10-17T07:47:36.000Z
|
2022-01-24T23:38:13.000Z
|
from angr.procedures.stubs.format_parser import FormatParser
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
class fscanf(FormatParser):
#pylint:disable=arguments-differ
def run(self, file_ptr):
# TODO handle errors
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset:].int.resolved
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
fmt_str = self._parse(1)
items = fmt_str.interpret(2, self.arg, simfd=simfd)
return items
| 29.75
| 70
| 0.678992
|
from angr.procedures.stubs.format_parser import FormatParser
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
class fscanf(FormatParser):
def run(self, file_ptr):
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fd = self.state.mem[file_ptr + fd_offset:].int.resolved
simfd = self.state.posix.get_fd(fd)
if simfd is None:
return -1
fmt_str = self._parse(1)
items = fmt_str.interpret(2, self.arg, simfd=simfd)
return items
| true
| true
|
f708920170b2d6285358225fb37557f4b233ef57
| 5,448
|
py
|
Python
|
gomatic/gocd/artifacts.py
|
ayr-ton/gomatic
|
314d0bed56888f44326f15ca6b2da20e7909cf67
|
[
"MIT"
] | 96
|
2015-01-06T22:08:29.000Z
|
2017-06-01T08:14:11.000Z
|
gomatic/gocd/artifacts.py
|
ayr-ton/gomatic
|
314d0bed56888f44326f15ca6b2da20e7909cf67
|
[
"MIT"
] | 50
|
2017-06-10T20:10:12.000Z
|
2021-12-21T15:41:25.000Z
|
gomatic/gocd/artifacts.py
|
ayr-ton/gomatic
|
314d0bed56888f44326f15ca6b2da20e7909cf67
|
[
"MIT"
] | 39
|
2017-06-10T20:06:16.000Z
|
2021-10-30T14:18:09.000Z
|
from xml.etree import ElementTree as ET
from gomatic.mixins import CommonEqualityMixin
def fetch_artifact_src_from(element):
if 'srcfile' in element.attrib:
return FetchArtifactFile(element.attrib['srcfile'])
if 'srcdir' in element.attrib:
return FetchArtifactDir(element.attrib['srcdir'])
raise RuntimeError("Expected srcfile or srcdir. Do not know what src type to use for " + ET.tostring(element, 'utf-8'))
def fetch_properties_from(element):
props = {}
for prop in element.iter('property'):
props[prop.find('key').text] = prop.find('value').text
return props if props else None
class FetchArtifactFile(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactFile("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcfile", self.__src_value
class FetchArtifactDir(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactDir("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcdir", self.__src_value
class Artifact(CommonEqualityMixin):
def __init__(self, src=None, dest=None, id=None, store_id=None, config=None, artifact_type='build'):
self._src = src
self._dest = dest
self._artifact_id = id
self._store_id = store_id
self._config = config
self._type = artifact_type
def __repr__(self):
if self._artifact_id is not None:
if self._config is None:
return '%s("%s", "%s")' % (self.constructor, self._artifact_id, self._store_id)
else:
return '%s("%s", "%s", %s)' % (self.constructor, self._artifact_id, self._store_id, self._config)
if self._dest is None:
return '%s("%s")' % (self.constructor, self._src)
else:
return '%s("%s", "%s")' % (self.constructor, self._src, self._dest)
@property
def constructor(self):
if self._type == "build":
return "BuildArtifact"
if self._type == "test":
return "TestArtifact"
if self._type == "external":
return "ExternalArtifact"
raise RuntimeError("Unknown artifact type %s" % self._type)
def append_to(self, element, gocd_18_3_and_above=False):
if gocd_18_3_and_above:
self._append_to_gocd_18_3_and_above(element)
else:
self._append_to_gocd_18_2_and_below(element)
def _append_to_gocd_18_3_and_above(self, element):
if self._artifact_id is not None:
if self._config is None:
element.append(ET.fromstring('<artifact id="%s" storeId="%s" type="%s" />' % (self._artifact_id, self._store_id, self._type)))
else:
properties_xml = "".join(["<property><key>{}</key><value>{}</value></property>".format(k, str(v or '')) for k, v in self._config.items()])
new_element = ET.fromstring('<artifact id="{}" storeId="{}" type="{}"><configuration>{}</configuration></artifact>'.format(self._artifact_id, self._store_id, self._type, properties_xml))
element.append(new_element)
elif self._dest is None:
element.append(ET.fromstring('<artifact src="%s" type="%s" />' % (self._src, self._type)))
else:
element.append(ET.fromstring('<artifact src="%s" dest="%s" type="%s" />' % (self._src, self._dest, self._type)))
def _append_to_gocd_18_2_and_below(self, element):
if not self._type == 'build' and not self._type == 'test':
raise RuntimeError("Artifact type '%s' not supported in GoCD 18.2 and below" % self._type)
tag = 'artifact' if self._type == 'build' else 'test'
if self._dest is None:
element.append(ET.fromstring('<%s src="%s" />' % (tag, self._src)))
else:
element.append(ET.fromstring('<%s src="%s" dest="%s" />' % (tag, self._src, self._dest)))
@classmethod
def get_artifact_for(cls, element):
src = element.attrib.get('src', None)
dest = element.attrib.get('dest', None)
id = element.attrib.get('id', None)
store_id = element.attrib.get('storeId', None)
artifact_type_attribute = element.attrib.get('type', None)
if id is not None:
return cls(id=id, store_id=store_id, config=fetch_properties_from(element), artifact_type=artifact_type_attribute)
if artifact_type_attribute is None:
_type = 'build' if element.tag == 'artifact' else 'test'
return cls(src=src, dest=dest, artifact_type=_type)
else:
return cls(src=src, dest=dest, artifact_type=artifact_type_attribute)
@classmethod
def get_build_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='build')
@classmethod
def get_test_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='test')
@classmethod
def get_external_artifact(cls, id, store_id, config=None):
return cls(id=id, store_id=store_id, config=config, artifact_type='external')
ArtifactFor = Artifact.get_artifact_for
BuildArtifact = Artifact.get_build_artifact
TestArtifact = Artifact.get_test_artifact
ExternalArtifact = Artifact.get_external_artifact
| 41.587786
| 202
| 0.643172
|
from xml.etree import ElementTree as ET
from gomatic.mixins import CommonEqualityMixin
def fetch_artifact_src_from(element):
if 'srcfile' in element.attrib:
return FetchArtifactFile(element.attrib['srcfile'])
if 'srcdir' in element.attrib:
return FetchArtifactDir(element.attrib['srcdir'])
raise RuntimeError("Expected srcfile or srcdir. Do not know what src type to use for " + ET.tostring(element, 'utf-8'))
def fetch_properties_from(element):
props = {}
for prop in element.iter('property'):
props[prop.find('key').text] = prop.find('value').text
return props if props else None
class FetchArtifactFile(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactFile("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcfile", self.__src_value
class FetchArtifactDir(CommonEqualityMixin):
def __init__(self, src_value):
self.__src_value = src_value
def __repr__(self):
return 'FetchArtifactDir("%s")' % self.__src_value
@property
def as_xml_type_and_value(self):
return "srcdir", self.__src_value
class Artifact(CommonEqualityMixin):
def __init__(self, src=None, dest=None, id=None, store_id=None, config=None, artifact_type='build'):
self._src = src
self._dest = dest
self._artifact_id = id
self._store_id = store_id
self._config = config
self._type = artifact_type
def __repr__(self):
if self._artifact_id is not None:
if self._config is None:
return '%s("%s", "%s")' % (self.constructor, self._artifact_id, self._store_id)
else:
return '%s("%s", "%s", %s)' % (self.constructor, self._artifact_id, self._store_id, self._config)
if self._dest is None:
return '%s("%s")' % (self.constructor, self._src)
else:
return '%s("%s", "%s")' % (self.constructor, self._src, self._dest)
@property
def constructor(self):
if self._type == "build":
return "BuildArtifact"
if self._type == "test":
return "TestArtifact"
if self._type == "external":
return "ExternalArtifact"
raise RuntimeError("Unknown artifact type %s" % self._type)
def append_to(self, element, gocd_18_3_and_above=False):
if gocd_18_3_and_above:
self._append_to_gocd_18_3_and_above(element)
else:
self._append_to_gocd_18_2_and_below(element)
def _append_to_gocd_18_3_and_above(self, element):
if self._artifact_id is not None:
if self._config is None:
element.append(ET.fromstring('<artifact id="%s" storeId="%s" type="%s" />' % (self._artifact_id, self._store_id, self._type)))
else:
properties_xml = "".join(["<property><key>{}</key><value>{}</value></property>".format(k, str(v or '')) for k, v in self._config.items()])
new_element = ET.fromstring('<artifact id="{}" storeId="{}" type="{}"><configuration>{}</configuration></artifact>'.format(self._artifact_id, self._store_id, self._type, properties_xml))
element.append(new_element)
elif self._dest is None:
element.append(ET.fromstring('<artifact src="%s" type="%s" />' % (self._src, self._type)))
else:
element.append(ET.fromstring('<artifact src="%s" dest="%s" type="%s" />' % (self._src, self._dest, self._type)))
def _append_to_gocd_18_2_and_below(self, element):
if not self._type == 'build' and not self._type == 'test':
raise RuntimeError("Artifact type '%s' not supported in GoCD 18.2 and below" % self._type)
tag = 'artifact' if self._type == 'build' else 'test'
if self._dest is None:
element.append(ET.fromstring('<%s src="%s" />' % (tag, self._src)))
else:
element.append(ET.fromstring('<%s src="%s" dest="%s" />' % (tag, self._src, self._dest)))
@classmethod
def get_artifact_for(cls, element):
src = element.attrib.get('src', None)
dest = element.attrib.get('dest', None)
id = element.attrib.get('id', None)
store_id = element.attrib.get('storeId', None)
artifact_type_attribute = element.attrib.get('type', None)
if id is not None:
return cls(id=id, store_id=store_id, config=fetch_properties_from(element), artifact_type=artifact_type_attribute)
if artifact_type_attribute is None:
_type = 'build' if element.tag == 'artifact' else 'test'
return cls(src=src, dest=dest, artifact_type=_type)
else:
return cls(src=src, dest=dest, artifact_type=artifact_type_attribute)
@classmethod
def get_build_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='build')
@classmethod
def get_test_artifact(cls, src, dest=None):
return cls(src=src, dest=dest, artifact_type='test')
@classmethod
def get_external_artifact(cls, id, store_id, config=None):
return cls(id=id, store_id=store_id, config=config, artifact_type='external')
ArtifactFor = Artifact.get_artifact_for
BuildArtifact = Artifact.get_build_artifact
TestArtifact = Artifact.get_test_artifact
ExternalArtifact = Artifact.get_external_artifact
| true
| true
|
f7089239c29071e4fcbee245e9912b787606573e
| 6,882
|
py
|
Python
|
multiphonon/ui/getdos0.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 1
|
2019-05-22T08:46:09.000Z
|
2019-05-22T08:46:09.000Z
|
multiphonon/ui/getdos0.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 118
|
2016-04-04T12:27:15.000Z
|
2021-08-18T01:46:13.000Z
|
multiphonon/ui/getdos0.py
|
granrothge/multiphonon
|
486a998eeb6b73b964a58ba0f98fe3ece15bdf6e
|
[
"MIT"
] | 5
|
2017-09-28T16:01:12.000Z
|
2020-01-31T18:58:09.000Z
|
def notebookUI(samplenxs, mtnxs, initdos=None, options=None, load_options_path=None):
import yaml
if options is not None and load_options_path:
raise RuntimeError(
"Both options and load_options_path were set: %s, %s" %(
options, load_options_path)
)
if load_options_path:
with open(load_options_path) as stream:
options = yaml.load(stream)
if options is None:
options = default_options
#
import ipywidgets as widgets
from IPython.display import display
w_mt_fraction = widgets.BoundedFloatText(description="mt_fraction", min=0., max=100., value=options['mt_fraction'])
w_const_bg_fraction = widgets.BoundedFloatText(description="const_bg_fraction", min=0., max=1., value=options.get('const_bg_fraction', 0.0))
w_Emin = widgets.BoundedFloatText(description="Emin", min=-1000., max=0., value=options['Emin'])
w_Emax = widgets.BoundedFloatText(description="Emax", min=0., max=1000., value=options['Emax'])
w_dE = widgets.BoundedFloatText(description="dE", min=0, max=50., value=options['dE'])
w_Qmin = widgets.BoundedFloatText(description="Qmin", min=0, max=50., value=options['Qmin'])
w_Qmax = widgets.BoundedFloatText(description="Qmax", min=0., max=50., value=options['Qmax'])
w_dQ = widgets.BoundedFloatText(description="dQ", min=0, max=5., value=options['dQ'])
w_T = widgets.BoundedFloatText(description="Temperature", min=0., max=5000., value=options['T'])
w_Ecutoff = widgets.BoundedFloatText(description="Max energy of phonons", min=0, max=1000., value=options['Ecutoff'])
w_ElasticPeakMin = widgets.BoundedFloatText(description="Emin of elastic peak", min=-300., max=0., value=options['ElasticPeakMin'])
w_ElasticPeakMax = widgets.BoundedFloatText(description="Emax of elastic peak", min=0., max=300., value=options['ElasticPeakMax'])
w_M = widgets.BoundedFloatText(description="Average atom mass", min=1., max=1000., value=options['M'])
w_C_ms = widgets.BoundedFloatText(description="C_ms", min=0., max=10., value=options['C_ms'])
w_Ei = widgets.BoundedFloatText(description="Ei", min=0, max=2000., value=options['Ei'])
w_workdir = widgets.Text(description="work dir", value=options['workdir'])
update_strategy_weights = options.get('update_strategy_weights', (.5, .5))
w_update_weight_continuity = widgets.BoundedFloatText(
description='"enforce continuity" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[0])
w_update_weight_area = widgets.BoundedFloatText(
description='"area conservation" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[1])
w_inputs = (
w_mt_fraction, w_const_bg_fraction,
w_Emin, w_Emax, w_dE,
w_Qmin, w_Qmax, w_dQ,
w_T, w_Ecutoff,
w_ElasticPeakMin, w_ElasticPeakMax,
w_M, w_C_ms, w_Ei, w_workdir,
w_update_weight_continuity, w_update_weight_area
)
w_Run = widgets.Button(description="Run")
w_all = w_inputs + (w_Run,)
def submit(b):
# suppress warning from h5py
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
dos_update_weights = _get_dos_update_weights(w_update_weight_continuity.value, w_update_weight_area.value)
#
kargs = dict(
mt_fraction = w_mt_fraction.value,
const_bg_fraction = w_const_bg_fraction.value,
Emin=w_Emin.value, Emax=w_Emax.value, dE=w_dE.value,
Qmin=w_Qmin.value, Qmax=w_Qmax.value, dQ=w_dQ.value,
T=w_T.value, Ecutoff=w_Ecutoff.value,
elastic_E_cutoff=(w_ElasticPeakMin.value, w_ElasticPeakMax.value),
M=w_M.value,
C_ms=w_C_ms.value, Ei=w_Ei.value,
workdir=w_workdir.value,
initdos=initdos,
update_strategy_weights = dos_update_weights,
)
import pprint, os, yaml
# pprint.pprint(samplenxs)
# pprint.pprint(mtnxs)
# pprint.pprint(kargs)
workdir = kargs['workdir']
if not os.path.exists(workdir):
os.makedirs(workdir)
options = dict(kargs)
options['ElasticPeakMin']=w_ElasticPeakMin.value
options['ElasticPeakMax']=w_ElasticPeakMax.value
with open(os.path.join(workdir, 'getdos-opts.yaml'), 'wt') as stream:
yaml.dump(options, stream)
maxiter = 10
close = lambda w: w.close()
list(map(close, w_all))
from ..getdos import getDOS
log_progress(getDOS(samplenxs, mt_nxs=mtnxs, maxiter=maxiter, **kargs), every=1, size=maxiter+2)
return
w_Run.on_click( submit )
display(*w_all)
return
def _get_dos_update_weights(*w):
# w should be all positive
wsum = sum(w)
if wsum <= 0:
N = len(w)
return [1./N]*N
return [t/wsum for t in w]
# modified from https://github.com/alexanderkuk/log-progress
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, msg in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = 'Running: {index} / ?: {msg}...'.format(index=index, msg=msg)
else:
progress.value = index
label.value = 'Running: {index} / {size}: {msg}...'.format(
index=index,
size=size,
msg=msg
)
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = size
# label.value = str(index or '?')
label.value = 'Done.'
default_options = dict(
mt_fraction = 0.9,
const_bg_fraction = 0.,
Emin = -70,
Emax = 70,
dE = 1.,
Qmin = 0.,
Qmax = 14.,
dQ = 0.1,
T = 300.,
Ecutoff = 50.,
ElasticPeakMin = -20,
ElasticPeakMax = 7.,
M = 50.94,
C_ms = 0.3,
Ei = 100.,
workdir = 'work',
)
| 39.102273
| 144
| 0.622784
|
def notebookUI(samplenxs, mtnxs, initdos=None, options=None, load_options_path=None):
import yaml
if options is not None and load_options_path:
raise RuntimeError(
"Both options and load_options_path were set: %s, %s" %(
options, load_options_path)
)
if load_options_path:
with open(load_options_path) as stream:
options = yaml.load(stream)
if options is None:
options = default_options
import ipywidgets as widgets
from IPython.display import display
w_mt_fraction = widgets.BoundedFloatText(description="mt_fraction", min=0., max=100., value=options['mt_fraction'])
w_const_bg_fraction = widgets.BoundedFloatText(description="const_bg_fraction", min=0., max=1., value=options.get('const_bg_fraction', 0.0))
w_Emin = widgets.BoundedFloatText(description="Emin", min=-1000., max=0., value=options['Emin'])
w_Emax = widgets.BoundedFloatText(description="Emax", min=0., max=1000., value=options['Emax'])
w_dE = widgets.BoundedFloatText(description="dE", min=0, max=50., value=options['dE'])
w_Qmin = widgets.BoundedFloatText(description="Qmin", min=0, max=50., value=options['Qmin'])
w_Qmax = widgets.BoundedFloatText(description="Qmax", min=0., max=50., value=options['Qmax'])
w_dQ = widgets.BoundedFloatText(description="dQ", min=0, max=5., value=options['dQ'])
w_T = widgets.BoundedFloatText(description="Temperature", min=0., max=5000., value=options['T'])
w_Ecutoff = widgets.BoundedFloatText(description="Max energy of phonons", min=0, max=1000., value=options['Ecutoff'])
w_ElasticPeakMin = widgets.BoundedFloatText(description="Emin of elastic peak", min=-300., max=0., value=options['ElasticPeakMin'])
w_ElasticPeakMax = widgets.BoundedFloatText(description="Emax of elastic peak", min=0., max=300., value=options['ElasticPeakMax'])
w_M = widgets.BoundedFloatText(description="Average atom mass", min=1., max=1000., value=options['M'])
w_C_ms = widgets.BoundedFloatText(description="C_ms", min=0., max=10., value=options['C_ms'])
w_Ei = widgets.BoundedFloatText(description="Ei", min=0, max=2000., value=options['Ei'])
w_workdir = widgets.Text(description="work dir", value=options['workdir'])
update_strategy_weights = options.get('update_strategy_weights', (.5, .5))
w_update_weight_continuity = widgets.BoundedFloatText(
description='"enforce continuity" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[0])
w_update_weight_area = widgets.BoundedFloatText(
description='"area conservation" weight for DOS update strategy',
min=0., max=1., value=update_strategy_weights[1])
w_inputs = (
w_mt_fraction, w_const_bg_fraction,
w_Emin, w_Emax, w_dE,
w_Qmin, w_Qmax, w_dQ,
w_T, w_Ecutoff,
w_ElasticPeakMin, w_ElasticPeakMax,
w_M, w_C_ms, w_Ei, w_workdir,
w_update_weight_continuity, w_update_weight_area
)
w_Run = widgets.Button(description="Run")
w_all = w_inputs + (w_Run,)
def submit(b):
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
dos_update_weights = _get_dos_update_weights(w_update_weight_continuity.value, w_update_weight_area.value)
kargs = dict(
mt_fraction = w_mt_fraction.value,
const_bg_fraction = w_const_bg_fraction.value,
Emin=w_Emin.value, Emax=w_Emax.value, dE=w_dE.value,
Qmin=w_Qmin.value, Qmax=w_Qmax.value, dQ=w_dQ.value,
T=w_T.value, Ecutoff=w_Ecutoff.value,
elastic_E_cutoff=(w_ElasticPeakMin.value, w_ElasticPeakMax.value),
M=w_M.value,
C_ms=w_C_ms.value, Ei=w_Ei.value,
workdir=w_workdir.value,
initdos=initdos,
update_strategy_weights = dos_update_weights,
)
import pprint, os, yaml
workdir = kargs['workdir']
if not os.path.exists(workdir):
os.makedirs(workdir)
options = dict(kargs)
options['ElasticPeakMin']=w_ElasticPeakMin.value
options['ElasticPeakMax']=w_ElasticPeakMax.value
with open(os.path.join(workdir, 'getdos-opts.yaml'), 'wt') as stream:
yaml.dump(options, stream)
maxiter = 10
close = lambda w: w.close()
list(map(close, w_all))
from ..getdos import getDOS
log_progress(getDOS(samplenxs, mt_nxs=mtnxs, maxiter=maxiter, **kargs), every=1, size=maxiter+2)
return
w_Run.on_click( submit )
display(*w_all)
return
def _get_dos_update_weights(*w):
wsum = sum(w)
if wsum <= 0:
N = len(w)
return [1./N]*N
return [t/wsum for t in w]
def log_progress(sequence, every=None, size=None):
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200)
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, msg in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = 'Running: {index} / ?: {msg}...'.format(index=index, msg=msg)
else:
progress.value = index
label.value = 'Running: {index} / {size}: {msg}...'.format(
index=index,
size=size,
msg=msg
)
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = size
label.value = 'Done.'
default_options = dict(
mt_fraction = 0.9,
const_bg_fraction = 0.,
Emin = -70,
Emax = 70,
dE = 1.,
Qmin = 0.,
Qmax = 14.,
dQ = 0.1,
T = 300.,
Ecutoff = 50.,
ElasticPeakMin = -20,
ElasticPeakMax = 7.,
M = 50.94,
C_ms = 0.3,
Ei = 100.,
workdir = 'work',
)
| true
| true
|
f708928e9cd5afabafcf6b37d400298162bd755f
| 15,941
|
py
|
Python
|
mars/dataframe/base/drop.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 2,413
|
2018-12-06T09:37:11.000Z
|
2022-03-30T15:47:39.000Z
|
mars/dataframe/base/drop.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 1,335
|
2018-12-07T03:06:18.000Z
|
2022-03-31T11:45:57.000Z
|
mars/dataframe/base/drop.py
|
hxri/mars
|
f7864f00911883b94800b63856f0e57648d3d9b4
|
[
"Apache-2.0"
] | 329
|
2018-12-07T03:12:41.000Z
|
2022-03-29T21:49:57.000Z
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from collections import OrderedDict
import numpy as np
from ... import opcodes
from ...core import Entity, Chunk, CHUNK_TYPE, OutputType, recursive_tile
from ...serialization.serializables import AnyField, StringField
from ..core import IndexValue, DATAFRAME_TYPE, SERIES_TYPE, INDEX_CHUNK_TYPE
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index, validate_axis
class DataFrameDrop(DataFrameOperandMixin, DataFrameOperand):
_op_type_ = opcodes.DATAFRAME_DROP
_index = AnyField('index')
_columns = AnyField('columns')
_level = AnyField('level')
_errors = StringField('errors')
def __init__(self, index=None, columns=None, level=None, errors=None, **kw):
super().__init__(_index=index, _columns=columns, _level=level, _errors=errors,
**kw)
@property
def index(self):
return self._index
@property
def columns(self):
return self._columns
@property
def level(self):
return self._level
@property
def errors(self):
return self._errors
def _filter_dtypes(self, dtypes, ignore_errors=False):
if self._columns:
return dtypes.drop(index=self._columns, level=self._level,
errors='ignore' if ignore_errors else self._errors)
else:
return dtypes
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs[1:])
if len(self._inputs) > 1:
self._index = next(inputs_iter)
def __call__(self, df_or_series):
params = df_or_series.params.copy()
shape_list = list(df_or_series.shape)
if self._index is not None:
if isinstance(df_or_series.index_value.value, IndexValue.RangeIndex):
params['index_value'] = parse_index(None, (df_or_series.key, df_or_series.index_value.key))
shape_list[0] = np.nan
if isinstance(df_or_series, DATAFRAME_TYPE):
new_dtypes = self._filter_dtypes(df_or_series.dtypes)
params['columns_value'] = parse_index(new_dtypes.index, store_data=True)
params['dtypes'] = new_dtypes
shape_list[1] = len(new_dtypes)
self.output_types = [OutputType.dataframe]
elif isinstance(df_or_series, SERIES_TYPE):
self.output_types = [OutputType.series]
else:
self.output_types = [OutputType.index]
params['shape'] = tuple(shape_list)
inputs = [df_or_series]
if isinstance(self._index, Entity):
inputs.append(self._index)
return self.new_tileable(inputs, **params)
@classmethod
def tile(cls, op: 'DataFrameDrop'):
inp = op.inputs[0]
out = op.outputs[0]
if len(op.inputs) > 1:
rechunked = yield from recursive_tile(
op.index.rechunk({0: (op.index.shape[0],)}))
index_chunk = rechunked.chunks[0]
else:
index_chunk = op.index
col_to_args = OrderedDict()
chunks = []
for c in inp.chunks:
params = c.params.copy()
if isinstance(inp, DATAFRAME_TYPE):
new_dtypes, new_col_id = col_to_args.get(c.index[1], (None, None))
if new_dtypes is None:
new_col_id = len(col_to_args)
new_dtypes = op._filter_dtypes(c.dtypes, ignore_errors=True)
if len(new_dtypes) == 0:
continue
col_to_args[c.index[1]] = (new_dtypes, new_col_id)
params.update(dict(dtypes=new_dtypes, index=(c.index[0], new_col_id),
index_value=c.index_value,
columns_value=parse_index(new_dtypes.index, store_data=True)))
if op.index is not None:
params.update(dict(shape=(np.nan, len(new_dtypes)),
index_value=parse_index(None, (c.key, c.index_value.key))))
else:
params['shape'] = (c.shape[0], len(new_dtypes))
elif op.index is not None:
params.update(dict(shape=(np.nan,), index_value=parse_index(None, (c.key, c.index_value.key))))
chunk_inputs = [c]
if isinstance(index_chunk, Chunk):
chunk_inputs.append(index_chunk)
new_op = op.copy().reset_key()
new_op._index = index_chunk
chunks.append(new_op.new_chunk(chunk_inputs, **params))
new_op = op.copy().reset_key()
params = out.params.copy()
if op.index is not None:
nsplits_list = [(np.nan,) * inp.chunk_shape[0]]
else:
nsplits_list = [inp.nsplits[0]]
if isinstance(inp, DATAFRAME_TYPE):
nsplits_list.append(tuple(len(dt) for dt, _ in col_to_args.values()))
params.update(dict(chunks=chunks, nsplits=tuple(nsplits_list)))
return new_op.new_tileables(op.inputs, **params)
@classmethod
def execute(cls, ctx, op: 'DataFrameDrop'):
inp = op.inputs[0]
if isinstance(op.index, CHUNK_TYPE):
index_val = ctx[op.index.key]
else:
index_val = op.index
if isinstance(inp, INDEX_CHUNK_TYPE):
ctx[op.outputs[0].key] = ctx[inp.key].drop(index_val, errors='ignore')
else:
ctx[op.outputs[0].key] = ctx[inp.key].drop(
index=index_val, columns=op.columns, level=op.level, errors='ignore')
def _drop(df_or_series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
axis = validate_axis(axis, df_or_series)
if labels is not None:
if axis == 0:
index = labels
else:
columns = labels
if index is not None and errors == 'raise':
warnings.warn('Errors will not raise for non-existing indices')
if isinstance(columns, Entity):
raise NotImplementedError('Columns cannot be Mars objects')
op = DataFrameDrop(index=index, columns=columns, level=level, errors=errors)
df = op(df_or_series)
if inplace:
df_or_series.data = df.data
else:
return df
def df_drop(df, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped. Note that errors for missing indices will not raise.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import mars.dataframe as md
>>> df = md.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df.execute()
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1).execute()
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C']).execute()
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1]).execute()
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = md.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df.execute()
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small').execute()
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1).execute()
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return _drop(df, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def df_pop(df, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> import numpy as np
>>> import mars.dataframe as md
>>> df = md.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df.execute()
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class').execute()
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df.execute()
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
series = df.data[item]
df_drop(df, item, axis=1, inplace=True)
return series
def series_drop(series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Return Series with specified index labels removed.
Remove elements of a Series based on specifying the index labels.
When using a multi-index, labels on different levels can be removed
by specifying the level.
Parameters
----------
labels : single label or list-like
Index labels to drop.
axis : 0, default 0
Redundant for application on Series.
index : single label or list-like
Redundant for application on Series, but 'index' can be used instead
of 'labels'.
.. versionadded:: 0.21.0
columns : single label or list-like
No change is made to the Series; use 'index' or 'labels' instead.
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level for which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
Note that this argument is kept only for compatibility, and errors
will not raise even if ``errors=='raise'``.
Returns
-------
Series
Series with specified index labels removed.
Raises
------
KeyError
If none of the labels are found in the index.
See Also
--------
Series.reindex : Return only specified index labels of Series.
Series.dropna : Return series without null values.
Series.drop_duplicates : Return Series with duplicate values removed.
DataFrame.drop : Drop specified labels from rows or columns.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> import mars.dataframe as md
>>> s = md.Series(data=np.arange(3), index=['A', 'B', 'C'])
>>> s.execute()
A 0
B 1
C 2
dtype: int64
Drop labels B en C
>>> s.drop(labels=['B', 'C']).execute()
A 0
dtype: int64
Drop 2nd level label in MultiIndex Series
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> s = md.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
... index=midx)
>>> s.execute()
lama speed 45.0
weight 200.0
length 1.2
cow speed 30.0
weight 250.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
dtype: float64
>>> s.drop(labels='weight', level=1).execute()
lama speed 45.0
length 1.2
cow speed 30.0
length 1.5
falcon speed 320.0
length 0.3
dtype: float64
"""
return _drop(series, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def index_drop(index, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
Note that this argument is kept only for compatibility, and errors
will not raise even if ``errors=='raise'``.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
return _drop(index, labels=labels, errors=errors)
| 32.93595
| 111
| 0.56753
|
import warnings
from collections import OrderedDict
import numpy as np
from ... import opcodes
from ...core import Entity, Chunk, CHUNK_TYPE, OutputType, recursive_tile
from ...serialization.serializables import AnyField, StringField
from ..core import IndexValue, DATAFRAME_TYPE, SERIES_TYPE, INDEX_CHUNK_TYPE
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import parse_index, validate_axis
class DataFrameDrop(DataFrameOperandMixin, DataFrameOperand):
_op_type_ = opcodes.DATAFRAME_DROP
_index = AnyField('index')
_columns = AnyField('columns')
_level = AnyField('level')
_errors = StringField('errors')
def __init__(self, index=None, columns=None, level=None, errors=None, **kw):
super().__init__(_index=index, _columns=columns, _level=level, _errors=errors,
**kw)
@property
def index(self):
return self._index
@property
def columns(self):
return self._columns
@property
def level(self):
return self._level
@property
def errors(self):
return self._errors
def _filter_dtypes(self, dtypes, ignore_errors=False):
if self._columns:
return dtypes.drop(index=self._columns, level=self._level,
errors='ignore' if ignore_errors else self._errors)
else:
return dtypes
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs[1:])
if len(self._inputs) > 1:
self._index = next(inputs_iter)
def __call__(self, df_or_series):
params = df_or_series.params.copy()
shape_list = list(df_or_series.shape)
if self._index is not None:
if isinstance(df_or_series.index_value.value, IndexValue.RangeIndex):
params['index_value'] = parse_index(None, (df_or_series.key, df_or_series.index_value.key))
shape_list[0] = np.nan
if isinstance(df_or_series, DATAFRAME_TYPE):
new_dtypes = self._filter_dtypes(df_or_series.dtypes)
params['columns_value'] = parse_index(new_dtypes.index, store_data=True)
params['dtypes'] = new_dtypes
shape_list[1] = len(new_dtypes)
self.output_types = [OutputType.dataframe]
elif isinstance(df_or_series, SERIES_TYPE):
self.output_types = [OutputType.series]
else:
self.output_types = [OutputType.index]
params['shape'] = tuple(shape_list)
inputs = [df_or_series]
if isinstance(self._index, Entity):
inputs.append(self._index)
return self.new_tileable(inputs, **params)
@classmethod
def tile(cls, op: 'DataFrameDrop'):
inp = op.inputs[0]
out = op.outputs[0]
if len(op.inputs) > 1:
rechunked = yield from recursive_tile(
op.index.rechunk({0: (op.index.shape[0],)}))
index_chunk = rechunked.chunks[0]
else:
index_chunk = op.index
col_to_args = OrderedDict()
chunks = []
for c in inp.chunks:
params = c.params.copy()
if isinstance(inp, DATAFRAME_TYPE):
new_dtypes, new_col_id = col_to_args.get(c.index[1], (None, None))
if new_dtypes is None:
new_col_id = len(col_to_args)
new_dtypes = op._filter_dtypes(c.dtypes, ignore_errors=True)
if len(new_dtypes) == 0:
continue
col_to_args[c.index[1]] = (new_dtypes, new_col_id)
params.update(dict(dtypes=new_dtypes, index=(c.index[0], new_col_id),
index_value=c.index_value,
columns_value=parse_index(new_dtypes.index, store_data=True)))
if op.index is not None:
params.update(dict(shape=(np.nan, len(new_dtypes)),
index_value=parse_index(None, (c.key, c.index_value.key))))
else:
params['shape'] = (c.shape[0], len(new_dtypes))
elif op.index is not None:
params.update(dict(shape=(np.nan,), index_value=parse_index(None, (c.key, c.index_value.key))))
chunk_inputs = [c]
if isinstance(index_chunk, Chunk):
chunk_inputs.append(index_chunk)
new_op = op.copy().reset_key()
new_op._index = index_chunk
chunks.append(new_op.new_chunk(chunk_inputs, **params))
new_op = op.copy().reset_key()
params = out.params.copy()
if op.index is not None:
nsplits_list = [(np.nan,) * inp.chunk_shape[0]]
else:
nsplits_list = [inp.nsplits[0]]
if isinstance(inp, DATAFRAME_TYPE):
nsplits_list.append(tuple(len(dt) for dt, _ in col_to_args.values()))
params.update(dict(chunks=chunks, nsplits=tuple(nsplits_list)))
return new_op.new_tileables(op.inputs, **params)
@classmethod
def execute(cls, ctx, op: 'DataFrameDrop'):
inp = op.inputs[0]
if isinstance(op.index, CHUNK_TYPE):
index_val = ctx[op.index.key]
else:
index_val = op.index
if isinstance(inp, INDEX_CHUNK_TYPE):
ctx[op.outputs[0].key] = ctx[inp.key].drop(index_val, errors='ignore')
else:
ctx[op.outputs[0].key] = ctx[inp.key].drop(
index=index_val, columns=op.columns, level=op.level, errors='ignore')
def _drop(df_or_series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
axis = validate_axis(axis, df_or_series)
if labels is not None:
if axis == 0:
index = labels
else:
columns = labels
if index is not None and errors == 'raise':
warnings.warn('Errors will not raise for non-existing indices')
if isinstance(columns, Entity):
raise NotImplementedError('Columns cannot be Mars objects')
op = DataFrameDrop(index=index, columns=columns, level=level, errors=errors)
df = op(df_or_series)
if inplace:
df_or_series.data = df.data
else:
return df
def df_drop(df, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
return _drop(df, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def df_pop(df, item):
series = df.data[item]
df_drop(df, item, axis=1, inplace=True)
return series
def series_drop(series, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
return _drop(series, labels=labels, axis=axis, index=index, columns=columns,
level=level, inplace=inplace, errors=errors)
def index_drop(index, labels, errors='raise'):
return _drop(index, labels=labels, errors=errors)
| true
| true
|
f70892be43edd841758689d19686bf38cf8dd5a0
| 626
|
py
|
Python
|
app/data.py
|
tjdaley/publicdataws
|
1aa4a98cf47fae10cc0f59a8d01168df806b4919
|
[
"MIT"
] | null | null | null |
app/data.py
|
tjdaley/publicdataws
|
1aa4a98cf47fae10cc0f59a8d01168df806b4919
|
[
"MIT"
] | null | null | null |
app/data.py
|
tjdaley/publicdataws
|
1aa4a98cf47fae10cc0f59a8d01168df806b4919
|
[
"MIT"
] | null | null | null |
def Articles():
return [
{
'id': 1,
'title': 'Article 1',
'body': 'Body of first article',
'author': 'Tom Daley',
'create_date': '07-28-2019'
},
{
'id': 2,
'title': 'Article 2',
'body': 'Body of second article',
'author': 'Ava Daley',
'create_date': '07-28-2019'
},
{
'id': 3,
'title': 'Article 3',
'body': 'Body of third article',
'author': 'Marissa Daley',
'create_date': '07-28-2019'
}
]
| 26.083333
| 45
| 0.375399
|
def Articles():
return [
{
'id': 1,
'title': 'Article 1',
'body': 'Body of first article',
'author': 'Tom Daley',
'create_date': '07-28-2019'
},
{
'id': 2,
'title': 'Article 2',
'body': 'Body of second article',
'author': 'Ava Daley',
'create_date': '07-28-2019'
},
{
'id': 3,
'title': 'Article 3',
'body': 'Body of third article',
'author': 'Marissa Daley',
'create_date': '07-28-2019'
}
]
| true
| true
|
f70895bc8cf25fe899a820773083413febdc7000
| 1,033
|
py
|
Python
|
SidToS3/aws/s3.py
|
brian-nelson/SupersidUtilities
|
0bdd24dc424d7b67d6a72de575487a31f0cb4565
|
[
"MIT"
] | null | null | null |
SidToS3/aws/s3.py
|
brian-nelson/SupersidUtilities
|
0bdd24dc424d7b67d6a72de575487a31f0cb4565
|
[
"MIT"
] | null | null | null |
SidToS3/aws/s3.py
|
brian-nelson/SupersidUtilities
|
0bdd24dc424d7b67d6a72de575487a31f0cb4565
|
[
"MIT"
] | null | null | null |
import boto3
import botocore
class S3:
def __init__(self, key, secret, bucket):
self.Key = key
self.Secret = secret
self.Bucket = bucket
return
def upload_file(self, local_file, remote_file):
s3 = boto3.resource(
's3',
aws_access_key_id=self.Key,
aws_secret_access_key=self.Secret)
try:
s3.Bucket(self.Bucket).upload_file(local_file, remote_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def download_file(self, remote_file, local_file):
s3 = boto3.resource('s3')
try:
s3.Bucket(self.Bucket).download_file(remote_file, local_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
| 25.195122
| 73
| 0.560503
|
import boto3
import botocore
class S3:
def __init__(self, key, secret, bucket):
self.Key = key
self.Secret = secret
self.Bucket = bucket
return
def upload_file(self, local_file, remote_file):
s3 = boto3.resource(
's3',
aws_access_key_id=self.Key,
aws_secret_access_key=self.Secret)
try:
s3.Bucket(self.Bucket).upload_file(local_file, remote_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
def download_file(self, remote_file, local_file):
s3 = boto3.resource('s3')
try:
s3.Bucket(self.Bucket).download_file(remote_file, local_file)
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
| true
| true
|
f70895bfda8c4b1ae18bdbfc9932d3a9e1ad00b9
| 11,278
|
py
|
Python
|
var/spack/repos/builtin/packages/octave/package.py
|
alkino/spack
|
b87ff60c7e23d7b50fac620ad60c8e2537312ebd
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/octave/package.py
|
alkino/spack
|
b87ff60c7e23d7b50fac620ad60c8e2537312ebd
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/octave/package.py
|
alkino/spack
|
b87ff60c7e23d7b50fac620ad60c8e2537312ebd
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os.path
import shutil
import sys
import tempfile
import spack.util.environment
class Octave(AutotoolsPackage, GNUMirrorPackage):
"""GNU Octave is a high-level language, primarily intended for numerical
computations.
It provides a convenient command line interface for solving linear and
nonlinear problems numerically, and for performing other numerical
experiments using a language that is mostly compatible with Matlab.
It may also be used as a batch-oriented language.
"""
homepage = "https://www.gnu.org/software/octave/"
gnu_mirror_path = "octave/octave-4.0.0.tar.gz"
maintainers = ['mtmiller']
extendable = True
version('5.1.0', sha256='e36b1124cac27c7caa51cc57de408c31676d5f0096349b4d50b57bfe1bcd7495')
version('4.4.1', sha256='09fbd0f212f4ef21e53f1d9c41cf30ce3d7f9450fb44911601e21ed64c67ae97')
version('4.4.0', sha256='72f846379fcec7e813d46adcbacd069d72c4f4d8f6003bcd92c3513aafcd6e96')
version('4.2.2', sha256='77b84395d8e7728a1ab223058fe5e92dc38c03bc13f7358e6533aab36f76726e')
version('4.2.1', sha256='80c28f6398576b50faca0e602defb9598d6f7308b0903724442c2a35a605333b')
version('4.2.0', sha256='443ba73782f3531c94bcf016f2f0362a58e186ddb8269af7dcce973562795567')
version('4.0.2', sha256='39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1')
version('4.0.0', sha256='4c7ee0957f5dd877e3feb9dfe07ad5f39b311f9373932f0d2a289dc97cca3280')
# patches
# see https://savannah.gnu.org/bugs/?50234
patch('patch_4.2.1_inline.diff', when='@4.2.1')
# Variants
variant('readline', default=True)
variant('arpack', default=False)
variant('curl', default=False)
variant('fftw', default=False)
variant('fltk', default=False)
variant('fontconfig', default=False)
variant('freetype', default=False)
variant('glpk', default=False)
variant('gl2ps', default=False)
variant('gnuplot', default=False)
variant('magick', default=False)
variant('hdf5', default=False)
variant('jdk', default=False)
variant('llvm', default=False)
variant('opengl', default=False)
variant('qhull', default=False)
variant('qrupdate', default=False)
variant('qscintilla', default=False)
variant('qt', default=False)
variant('suitesparse', default=False)
variant('zlib', default=False)
# Required dependencies
depends_on('blas')
depends_on('lapack')
# Octave does not configure with sed from darwin:
depends_on('sed', when=sys.platform == 'darwin', type='build')
depends_on('pcre')
depends_on('pkgconfig', type='build')
# Strongly recommended dependencies
depends_on('readline', when='+readline')
# Optional dependencies
depends_on('arpack-ng', when='+arpack')
depends_on('curl', when='+curl')
depends_on('fftw', when='+fftw')
depends_on('fltk', when='+fltk')
depends_on('fontconfig', when='+fontconfig')
depends_on('freetype', when='+freetype')
depends_on('glpk', when='+glpk')
depends_on('gl2ps', when='+gl2ps')
depends_on('gnuplot', when='+gnuplot')
depends_on('imagemagick', when='+magick')
depends_on('hdf5', when='+hdf5')
depends_on('java', when='+jdk') # TODO: requires Java 6 ?
depends_on('llvm', when='+llvm')
# depends_on('opengl', when='+opengl') # TODO: add package
depends_on('qhull', when='+qhull')
depends_on('qrupdate', when='+qrupdate')
# depends_on('qscintilla', when='+qscintilla) # TODO: add package
depends_on('qt+opengl', when='+qt')
depends_on('suite-sparse', when='+suitesparse')
depends_on('zlib', when='+zlib')
def patch(self):
# Filter mkoctfile.in.cc to use underlying compilers and not
# Spack compiler wrappers. We are patching the template file
# and not mkoctfile.cc since the latter is generated as part
# of the build.
mkoctfile_in = os.path.join(
self.stage.source_path, 'src', 'mkoctfile.in.cc'
)
quote = lambda s: '"' + s + '"'
entries_to_patch = {
r'%OCTAVE_CONF_MKOCTFILE_CC%': quote(self.compiler.cc),
r'%OCTAVE_CONF_MKOCTFILE_CXX%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_F77%': quote(self.compiler.f77),
r'%OCTAVE_CONF_MKOCTFILE_DL_LD%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_LD_CXX%': quote(self.compiler.cxx)
}
for pattern, subst in entries_to_patch.items():
filter_file(pattern, subst, mkoctfile_in)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_mkoctfile_works_outside_of_build_env(self):
# Check that mkoctfile is properly configured and can compile
# Octave extensions outside of the build env
mkoctfile = Executable(os.path.join(self.prefix, 'bin', 'mkoctfile'))
helloworld_cc = os.path.join(
os.path.dirname(__file__), 'helloworld.cc'
)
tmp_dir = tempfile.mkdtemp()
shutil.copy(helloworld_cc, tmp_dir)
# We need to unset these variables since we are still within
# Spack's build environment when running tests
vars_to_unset = ['CC', 'CXX', 'F77', 'FC']
with spack.util.environment.preserve_environment(*vars_to_unset):
# Delete temporarily the environment variables that point
# to Spack compiler wrappers
for v in vars_to_unset:
del os.environ[v]
# Check that mkoctfile outputs the expected value for CC
cc = mkoctfile('-p', 'CC', output=str)
msg = "mkoctfile didn't output the expected CC compiler"
assert self.compiler.cc in cc, msg
# Try to compile an Octave extension
shutil.copy(helloworld_cc, tmp_dir)
with working_dir(tmp_dir):
mkoctfile('helloworld.cc')
def configure_args(self):
# See
# https://github.com/macports/macports-ports/blob/master/math/octave/
# https://github.com/Homebrew/homebrew-science/blob/master/octave.rb
spec = self.spec
config_args = []
# Required dependencies
config_args.extend([
"--with-blas=%s" % spec['blas'].libs.ld_flags,
"--with-lapack=%s" % spec['lapack'].libs.ld_flags
])
# Strongly recommended dependencies
if '+readline' in spec:
config_args.append('--enable-readline')
else:
config_args.append('--disable-readline')
# Optional dependencies
if '+arpack' in spec:
sa = spec['arpack-ng']
config_args.extend([
"--with-arpack-includedir=%s" % sa.prefix.include,
"--with-arpack-libdir=%s" % sa.prefix.lib
])
else:
config_args.append("--without-arpack")
if '+curl' in spec:
config_args.extend([
"--with-curl-includedir=%s" % spec['curl'].prefix.include,
"--with-curl-libdir=%s" % spec['curl'].prefix.lib
])
else:
config_args.append("--without-curl")
if '+fftw' in spec:
config_args.extend([
"--with-fftw3-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3-libdir=%s" % spec['fftw'].prefix.lib,
"--with-fftw3f-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3f-libdir=%s" % spec['fftw'].prefix.lib
])
else:
config_args.extend([
"--without-fftw3",
"--without-fftw3f"
])
if '+fltk' in spec:
config_args.extend([
"--with-fltk-prefix=%s" % spec['fltk'].prefix,
"--with-fltk-exec-prefix=%s" % spec['fltk'].prefix
])
else:
config_args.append("--without-fltk")
if '+glpk' in spec:
config_args.extend([
"--with-glpk-includedir=%s" % spec['glpk'].prefix.include,
"--with-glpk-libdir=%s" % spec['glpk'].prefix.lib
])
else:
config_args.append("--without-glpk")
if '+magick' in spec:
config_args.append("--with-magick=%s"
% spec['imagemagick'].prefix.lib)
else:
config_args.append("--without-magick")
if '+hdf5' in spec:
config_args.extend([
"--with-hdf5-includedir=%s" % spec['hdf5'].prefix.include,
"--with-hdf5-libdir=%s" % spec['hdf5'].prefix.lib
])
else:
config_args.append("--without-hdf5")
if '+jdk' in spec:
config_args.extend([
"--with-java-homedir=%s" % spec['java'].home,
"--with-java-includedir=%s" % spec['java'].home.include,
"--with-java-libdir=%s" % spec['java'].libs.directories[0]
])
else:
config_args.append("--disable-java")
if '~opengl' in spec:
config_args.extend([
"--without-opengl",
"--without-framework-opengl"
])
# TODO: opengl dependency and package is missing?
if '+qhull' in spec:
config_args.extend([
"--with-qhull-includedir=%s" % spec['qhull'].prefix.include,
"--with-qhull-libdir=%s" % spec['qhull'].prefix.lib
])
else:
config_args.append("--without-qhull")
if '+qrupdate' in spec:
config_args.extend([
"--with-qrupdate-includedir=%s"
% spec['qrupdate'].prefix.include,
"--with-qrupdate-libdir=%s" % spec['qrupdate'].prefix.lib
])
else:
config_args.append("--without-qrupdate")
if '+zlib' in spec:
config_args.extend([
"--with-z-includedir=%s" % spec['zlib'].prefix.include,
"--with-z-libdir=%s" % spec['zlib'].prefix.lib
])
else:
config_args.append("--without-z")
return config_args
# ========================================================================
# Set up environment to make install easy for Octave extensions.
# ========================================================================
def setup_dependent_package(self, module, dependent_spec):
"""Called before Octave modules' install() methods.
In most cases, extensions will only need to have one line:
octave('--eval', 'pkg install %s' % self.stage.archive_file)
"""
# Octave extension builds can have a global Octave executable function
module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
| 39.57193
| 95
| 0.582373
|
import os.path
import shutil
import sys
import tempfile
import spack.util.environment
class Octave(AutotoolsPackage, GNUMirrorPackage):
homepage = "https://www.gnu.org/software/octave/"
gnu_mirror_path = "octave/octave-4.0.0.tar.gz"
maintainers = ['mtmiller']
extendable = True
version('5.1.0', sha256='e36b1124cac27c7caa51cc57de408c31676d5f0096349b4d50b57bfe1bcd7495')
version('4.4.1', sha256='09fbd0f212f4ef21e53f1d9c41cf30ce3d7f9450fb44911601e21ed64c67ae97')
version('4.4.0', sha256='72f846379fcec7e813d46adcbacd069d72c4f4d8f6003bcd92c3513aafcd6e96')
version('4.2.2', sha256='77b84395d8e7728a1ab223058fe5e92dc38c03bc13f7358e6533aab36f76726e')
version('4.2.1', sha256='80c28f6398576b50faca0e602defb9598d6f7308b0903724442c2a35a605333b')
version('4.2.0', sha256='443ba73782f3531c94bcf016f2f0362a58e186ddb8269af7dcce973562795567')
version('4.0.2', sha256='39cd8fd36c218fc00adace28d74a6c7c9c6faab7113a5ba3c4372324c755bdc1')
version('4.0.0', sha256='4c7ee0957f5dd877e3feb9dfe07ad5f39b311f9373932f0d2a289dc97cca3280')
patch('patch_4.2.1_inline.diff', when='@4.2.1')
variant('readline', default=True)
variant('arpack', default=False)
variant('curl', default=False)
variant('fftw', default=False)
variant('fltk', default=False)
variant('fontconfig', default=False)
variant('freetype', default=False)
variant('glpk', default=False)
variant('gl2ps', default=False)
variant('gnuplot', default=False)
variant('magick', default=False)
variant('hdf5', default=False)
variant('jdk', default=False)
variant('llvm', default=False)
variant('opengl', default=False)
variant('qhull', default=False)
variant('qrupdate', default=False)
variant('qscintilla', default=False)
variant('qt', default=False)
variant('suitesparse', default=False)
variant('zlib', default=False)
depends_on('blas')
depends_on('lapack')
depends_on('sed', when=sys.platform == 'darwin', type='build')
depends_on('pcre')
depends_on('pkgconfig', type='build')
depends_on('readline', when='+readline')
depends_on('arpack-ng', when='+arpack')
depends_on('curl', when='+curl')
depends_on('fftw', when='+fftw')
depends_on('fltk', when='+fltk')
depends_on('fontconfig', when='+fontconfig')
depends_on('freetype', when='+freetype')
depends_on('glpk', when='+glpk')
depends_on('gl2ps', when='+gl2ps')
depends_on('gnuplot', when='+gnuplot')
depends_on('imagemagick', when='+magick')
depends_on('hdf5', when='+hdf5')
depends_on('java', when='+jdk')
depends_on('llvm', when='+llvm')
ull', when='+qhull')
depends_on('qrupdate', when='+qrupdate')
depends_on('qt+opengl', when='+qt')
depends_on('suite-sparse', when='+suitesparse')
depends_on('zlib', when='+zlib')
def patch(self):
# Filter mkoctfile.in.cc to use underlying compilers and not
# Spack compiler wrappers. We are patching the template file
# and not mkoctfile.cc since the latter is generated as part
# of the build.
mkoctfile_in = os.path.join(
self.stage.source_path, 'src', 'mkoctfile.in.cc'
)
quote = lambda s: '"' + s + '"'
entries_to_patch = {
r'%OCTAVE_CONF_MKOCTFILE_CC%': quote(self.compiler.cc),
r'%OCTAVE_CONF_MKOCTFILE_CXX%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_F77%': quote(self.compiler.f77),
r'%OCTAVE_CONF_MKOCTFILE_DL_LD%': quote(self.compiler.cxx),
r'%OCTAVE_CONF_MKOCTFILE_LD_CXX%': quote(self.compiler.cxx)
}
for pattern, subst in entries_to_patch.items():
filter_file(pattern, subst, mkoctfile_in)
@run_after('install')
@on_package_attributes(run_tests=True)
def check_mkoctfile_works_outside_of_build_env(self):
# Check that mkoctfile is properly configured and can compile
# Octave extensions outside of the build env
mkoctfile = Executable(os.path.join(self.prefix, 'bin', 'mkoctfile'))
helloworld_cc = os.path.join(
os.path.dirname(__file__), 'helloworld.cc'
)
tmp_dir = tempfile.mkdtemp()
shutil.copy(helloworld_cc, tmp_dir)
# We need to unset these variables since we are still within
# Spack's build environment when running tests
vars_to_unset = ['CC', 'CXX', 'F77', 'FC']
with spack.util.environment.preserve_environment(*vars_to_unset):
for v in vars_to_unset:
del os.environ[v]
cc = mkoctfile('-p', 'CC', output=str)
msg = "mkoctfile didn't output the expected CC compiler"
assert self.compiler.cc in cc, msg
# Try to compile an Octave extension
shutil.copy(helloworld_cc, tmp_dir)
with working_dir(tmp_dir):
mkoctfile('helloworld.cc')
def configure_args(self):
# See
# https://github.com/macports/macports-ports/blob/master/math/octave/
# https://github.com/Homebrew/homebrew-science/blob/master/octave.rb
spec = self.spec
config_args = []
# Required dependencies
config_args.extend([
"--with-blas=%s" % spec['blas'].libs.ld_flags,
"--with-lapack=%s" % spec['lapack'].libs.ld_flags
])
# Strongly recommended dependencies
if '+readline' in spec:
config_args.append('--enable-readline')
else:
config_args.append('--disable-readline')
# Optional dependencies
if '+arpack' in spec:
sa = spec['arpack-ng']
config_args.extend([
"--with-arpack-includedir=%s" % sa.prefix.include,
"--with-arpack-libdir=%s" % sa.prefix.lib
])
else:
config_args.append("--without-arpack")
if '+curl' in spec:
config_args.extend([
"--with-curl-includedir=%s" % spec['curl'].prefix.include,
"--with-curl-libdir=%s" % spec['curl'].prefix.lib
])
else:
config_args.append("--without-curl")
if '+fftw' in spec:
config_args.extend([
"--with-fftw3-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3-libdir=%s" % spec['fftw'].prefix.lib,
"--with-fftw3f-includedir=%s" % spec['fftw'].prefix.include,
"--with-fftw3f-libdir=%s" % spec['fftw'].prefix.lib
])
else:
config_args.extend([
"--without-fftw3",
"--without-fftw3f"
])
if '+fltk' in spec:
config_args.extend([
"--with-fltk-prefix=%s" % spec['fltk'].prefix,
"--with-fltk-exec-prefix=%s" % spec['fltk'].prefix
])
else:
config_args.append("--without-fltk")
if '+glpk' in spec:
config_args.extend([
"--with-glpk-includedir=%s" % spec['glpk'].prefix.include,
"--with-glpk-libdir=%s" % spec['glpk'].prefix.lib
])
else:
config_args.append("--without-glpk")
if '+magick' in spec:
config_args.append("--with-magick=%s"
% spec['imagemagick'].prefix.lib)
else:
config_args.append("--without-magick")
if '+hdf5' in spec:
config_args.extend([
"--with-hdf5-includedir=%s" % spec['hdf5'].prefix.include,
"--with-hdf5-libdir=%s" % spec['hdf5'].prefix.lib
])
else:
config_args.append("--without-hdf5")
if '+jdk' in spec:
config_args.extend([
"--with-java-homedir=%s" % spec['java'].home,
"--with-java-includedir=%s" % spec['java'].home.include,
"--with-java-libdir=%s" % spec['java'].libs.directories[0]
])
else:
config_args.append("--disable-java")
if '~opengl' in spec:
config_args.extend([
"--without-opengl",
"--without-framework-opengl"
])
# TODO: opengl dependency and package is missing?
if '+qhull' in spec:
config_args.extend([
"--with-qhull-includedir=%s" % spec['qhull'].prefix.include,
"--with-qhull-libdir=%s" % spec['qhull'].prefix.lib
])
else:
config_args.append("--without-qhull")
if '+qrupdate' in spec:
config_args.extend([
"--with-qrupdate-includedir=%s"
% spec['qrupdate'].prefix.include,
"--with-qrupdate-libdir=%s" % spec['qrupdate'].prefix.lib
])
else:
config_args.append("--without-qrupdate")
if '+zlib' in spec:
config_args.extend([
"--with-z-includedir=%s" % spec['zlib'].prefix.include,
"--with-z-libdir=%s" % spec['zlib'].prefix.lib
])
else:
config_args.append("--without-z")
return config_args
# ========================================================================
# Set up environment to make install easy for Octave extensions.
# ========================================================================
def setup_dependent_package(self, module, dependent_spec):
# Octave extension builds can have a global Octave executable function
module.octave = Executable(join_path(self.spec.prefix.bin, 'octave'))
| true
| true
|
f708960fdb8e9cb12cb0f17da5644933690a1490
| 395
|
py
|
Python
|
travellog/wsgi.py
|
jacobian/django-travellog
|
1712cbeebb8ccceb620757ada3927c545793b7b8
|
[
"BSD-3-Clause"
] | 9
|
2018-04-17T18:39:47.000Z
|
2021-08-18T06:27:57.000Z
|
travellog/wsgi.py
|
jacobian/django-travellog
|
1712cbeebb8ccceb620757ada3927c545793b7b8
|
[
"BSD-3-Clause"
] | null | null | null |
travellog/wsgi.py
|
jacobian/django-travellog
|
1712cbeebb8ccceb620757ada3927c545793b7b8
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for travellog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travellog.settings")
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travellog.settings")
application = get_wsgi_application()
| true
| true
|
f70896259323c12a6a8a8989b79dfbdb5e1f2dfa
| 1,080
|
py
|
Python
|
libp2p/network/stream/net_stream.py
|
ChihChengLiang/py-libp2p
|
f0046fa3e0952b492837a698b1988e05c9821f47
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
libp2p/network/stream/net_stream.py
|
ChihChengLiang/py-libp2p
|
f0046fa3e0952b492837a698b1988e05c9821f47
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
libp2p/network/stream/net_stream.py
|
ChihChengLiang/py-libp2p
|
f0046fa3e0952b492837a698b1988e05c9821f47
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
from .net_stream_interface import INetStream
class NetStream(INetStream):
def __init__(self, muxed_stream):
self.muxed_stream = muxed_stream
self.mplex_conn = muxed_stream.mplex_conn
self.protocol_id = None
def get_protocol(self):
"""
:return: protocol id that stream runs on
"""
return self.protocol_id
def set_protocol(self, protocol_id):
"""
:param protocol_id: protocol id that stream runs on
:return: true if successful
"""
self.protocol_id = protocol_id
async def read(self):
"""
read from stream
:return: bytes of input until EOF
"""
return await self.muxed_stream.read()
async def write(self, data):
"""
write to stream
:return: number of bytes written
"""
return await self.muxed_stream.write(data)
async def close(self):
"""
close stream
:return: true if successful
"""
await self.muxed_stream.close()
return True
| 24
| 59
| 0.587963
|
from .net_stream_interface import INetStream
class NetStream(INetStream):
def __init__(self, muxed_stream):
self.muxed_stream = muxed_stream
self.mplex_conn = muxed_stream.mplex_conn
self.protocol_id = None
def get_protocol(self):
return self.protocol_id
def set_protocol(self, protocol_id):
self.protocol_id = protocol_id
async def read(self):
return await self.muxed_stream.read()
async def write(self, data):
return await self.muxed_stream.write(data)
async def close(self):
await self.muxed_stream.close()
return True
| true
| true
|
f70897147c867957abfe46861db62aa36828a286
| 5,274
|
py
|
Python
|
pandas/tests/indexing/common.py
|
oricou/pandas
|
9405e58d9268041f5416711c051cf5429a19bf49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-05-07T04:58:36.000Z
|
2021-05-07T04:58:59.000Z
|
pandas/tests/indexing/common.py
|
oricou/pandas
|
9405e58d9268041f5416711c051cf5429a19bf49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/tests/indexing/common.py
|
oricou/pandas
|
9405e58d9268041f5416711c051cf5429a19bf49
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2021-06-16T07:19:12.000Z
|
2021-12-16T10:24:44.000Z
|
""" common utilities """
import itertools
import numpy as np
from pandas import (
DataFrame,
Float64Index,
MultiIndex,
Series,
UInt64Index,
date_range,
)
import pandas._testing as tm
def _mklbl(prefix, n):
return [f"{prefix}{i}" for i in range(n)]
def _axify(obj, key, axis):
# create a tuple accessor
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
""" indexing comprehensive base class """
_kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series(dtype=object)
# form agglomerates
for kind in self._kinds:
d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
setattr(self, kind, d)
def generate_indices(self, f, values=False):
"""
generate the indices
if values is True , use the axis values
is False, use the range
"""
axes = f.axes
if values:
axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
def get_value(self, name, f, i, values=False):
""" return the value for the location i """
# check against values
if values:
return f.values[i]
elif name == "iat":
return f.iloc[i]
else:
assert name == "at"
return f.loc[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
# check against values
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, method, key, typs=None, axes=None, fails=None):
def _eq(axis, obj, key):
""" compare equal for these 2 keys """
axified = _axify(obj, key, axis)
try:
getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
# if we are in fails, the ok, otherwise raise it
if fails is not None:
if isinstance(detail, fails):
return
raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
else:
assert axes in [0, 1]
axes = [axes]
# check
for kind in self._kinds:
d = getattr(self, kind)
for ax in axes:
for typ in typs:
assert typ in self._typs
obj = d[typ]
if ax < obj.ndim:
_eq(axis=ax, obj=obj, key=key)
| 27.904762
| 88
| 0.520288
|
import itertools
import numpy as np
from pandas import (
DataFrame,
Float64Index,
MultiIndex,
Series,
UInt64Index,
date_range,
)
import pandas._testing as tm
def _mklbl(prefix, n):
return [f"{prefix}{i}" for i in range(n)]
def _axify(obj, key, axis):
axes = [slice(None)] * obj.ndim
axes[axis] = key
return tuple(axes)
class Base:
_kinds = {"series", "frame"}
_typs = {
"ints",
"uints",
"labels",
"mixed",
"ts",
"floats",
"empty",
"ts_rev",
"multi",
}
def setup_method(self, method):
self.series_ints = Series(np.random.rand(4), index=np.arange(0, 8, 2))
self.frame_ints = DataFrame(
np.random.randn(4, 4), index=np.arange(0, 8, 2), columns=np.arange(0, 12, 3)
)
self.series_uints = Series(
np.random.rand(4), index=UInt64Index(np.arange(0, 8, 2))
)
self.frame_uints = DataFrame(
np.random.randn(4, 4),
index=UInt64Index(range(0, 8, 2)),
columns=UInt64Index(range(0, 12, 3)),
)
self.series_floats = Series(
np.random.rand(4), index=Float64Index(range(0, 8, 2))
)
self.frame_floats = DataFrame(
np.random.randn(4, 4),
index=Float64Index(range(0, 8, 2)),
columns=Float64Index(range(0, 12, 3)),
)
m_idces = [
MultiIndex.from_product([[1, 2], [3, 4]]),
MultiIndex.from_product([[5, 6], [7, 8]]),
MultiIndex.from_product([[9, 10], [11, 12]]),
]
self.series_multi = Series(np.random.rand(4), index=m_idces[0])
self.frame_multi = DataFrame(
np.random.randn(4, 4), index=m_idces[0], columns=m_idces[1]
)
self.series_labels = Series(np.random.randn(4), index=list("abcd"))
self.frame_labels = DataFrame(
np.random.randn(4, 4), index=list("abcd"), columns=list("ABCD")
)
self.series_mixed = Series(np.random.randn(4), index=[2, 4, "null", 8])
self.frame_mixed = DataFrame(np.random.randn(4, 4), index=[2, 4, "null", 8])
self.series_ts = Series(
np.random.randn(4), index=date_range("20130101", periods=4)
)
self.frame_ts = DataFrame(
np.random.randn(4, 4), index=date_range("20130101", periods=4)
)
dates_rev = date_range("20130101", periods=4).sort_values(ascending=False)
self.series_ts_rev = Series(np.random.randn(4), index=dates_rev)
self.frame_ts_rev = DataFrame(np.random.randn(4, 4), index=dates_rev)
self.frame_empty = DataFrame()
self.series_empty = Series(dtype=object)
for kind in self._kinds:
d = {}
for typ in self._typs:
d[typ] = getattr(self, f"{kind}_{typ}")
setattr(self, kind, d)
def generate_indices(self, f, values=False):
axes = f.axes
if values:
axes = (list(range(len(ax))) for ax in axes)
return itertools.product(*axes)
def get_value(self, name, f, i, values=False):
if values:
return f.values[i]
elif name == "iat":
return f.iloc[i]
else:
assert name == "at"
return f.loc[i]
def check_values(self, f, func, values=False):
if f is None:
return
axes = f.axes
indicies = itertools.product(*axes)
for i in indicies:
result = getattr(f, func)[i]
if values:
expected = f.values[i]
else:
expected = f
for a in reversed(i):
expected = expected.__getitem__(a)
tm.assert_almost_equal(result, expected)
def check_result(self, method, key, typs=None, axes=None, fails=None):
def _eq(axis, obj, key):
axified = _axify(obj, key, axis)
try:
getattr(obj, method).__getitem__(axified)
except (IndexError, TypeError, KeyError) as detail:
if fails is not None:
if isinstance(detail, fails):
return
raise
if typs is None:
typs = self._typs
if axes is None:
axes = [0, 1]
else:
assert axes in [0, 1]
axes = [axes]
for kind in self._kinds:
d = getattr(self, kind)
for ax in axes:
for typ in typs:
assert typ in self._typs
obj = d[typ]
if ax < obj.ndim:
_eq(axis=ax, obj=obj, key=key)
| true
| true
|
f708974711d14541ae6b980973337a70daf55ace
| 420
|
py
|
Python
|
website/example_problem_graders/simpleai.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
website/example_problem_graders/simpleai.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
website/example_problem_graders/simpleai.py
|
pshen24/cmimc-online
|
7d2435e506381fa19f3512635eb615f7a02e5f03
|
[
"MIT"
] | null | null | null |
from .base import BaseGrader
class SimpleAI(BaseGrader):
def grade(self, submission, score):
try:
points = int(submission.text)
except ValueError:
points = 0
submission.points = points
submission.is_graded = True
submission.save()
score.points = points
score.save()
submission.competitor.update_total_score()
| 24.705882
| 51
| 0.585714
|
from .base import BaseGrader
class SimpleAI(BaseGrader):
def grade(self, submission, score):
try:
points = int(submission.text)
except ValueError:
points = 0
submission.points = points
submission.is_graded = True
submission.save()
score.points = points
score.save()
submission.competitor.update_total_score()
| true
| true
|
f70897b4f50c3d304da65ac9f86af36b1c7be865
| 2,183
|
py
|
Python
|
Examples/SHMExample.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
Examples/SHMExample.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
Examples/SHMExample.py
|
vd1371/GIAMS
|
dd6551f344b8d0377131d4496846eb5d03b6189c
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------- #
# This example was designed to show the project-level optimization
# option in GIAMS. This example was used in the original paper as well
# -------------------------------------------------------------------- #
import time
import ast
from Network import IndianaNetwork
from LifeCycleAnalyzer.Simulators import MainSimulator
from LifeCycleAnalyzer import LCA
from Optimizer import HillClimbing
from Optimizer import BruteForce
from Optimizer import GA
from Optimizer import IUC
from Optimizer import PSO
from utils.PredictiveModels.Linear import Linear
from utils.AwesomeTimeIt import timeit
from utils.GeneralSettings import *
class GeneralSettings:
n_elements = 1
n_states = 8
dt = 2
horizon = 20
discount_rate = 0.03
init_year = 0
n_steps = int(horizon/dt)
def lca_instance():
# Creating the settings instance
settings = GeneralSettings()
# Creating the network
session_name = 'IndianaSHM'
mynetwork = DummySHMNetwork(file_name = "INDIANA2019",
settings = settings,
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0, settings = settings))
mynetwork.set_npv_budget_limit(10000)
# Creating the simulator
simulator = MainSimulator(settings = settings)
# shaping the main LCA
lca = LCA(lca_name = session_name,
settings = settings,
network = mynetwork,
simulator = simulator,
random = True,
is_hazard = True,
n_simulations = 10,
should_report = True)
return lca
def obj_func(**kwargs):
return kwargs['Utility'] / kwargs['UserCost'] ** 0.2
def GA_test():
optimizer = GA(lca_instance)
optimizer.set_hyperparameters(crossver_prob = 0.75,
mutation_prob = 0.03,
population_size = 200,
n_generations = 200,
n_elites = 5,
optimzition_type = 'max',
n_jobs = 1)
# optimizer.optimize(rounds = 3)
optimizer.validate()
if __name__ == "__main__":
example1()
GA_test(lca_instance)
| 23.223404
| 86
| 0.674301
|
import time
import ast
from Network import IndianaNetwork
from LifeCycleAnalyzer.Simulators import MainSimulator
from LifeCycleAnalyzer import LCA
from Optimizer import HillClimbing
from Optimizer import BruteForce
from Optimizer import GA
from Optimizer import IUC
from Optimizer import PSO
from utils.PredictiveModels.Linear import Linear
from utils.AwesomeTimeIt import timeit
from utils.GeneralSettings import *
class GeneralSettings:
n_elements = 1
n_states = 8
dt = 2
horizon = 20
discount_rate = 0.03
init_year = 0
n_steps = int(horizon/dt)
def lca_instance():
settings = GeneralSettings()
session_name = 'IndianaSHM'
mynetwork = DummySHMNetwork(file_name = "INDIANA2019",
settings = settings,
n_assets = 1,
is_deck = False,
is_superstructure = True,
is_substructure = False)
mynetwork.load_network()
mynetwork.set_current_budget_limit(100000)
mynetwork.set_budget_limit_model(Linear(X0 = 100000, drift = 0, settings = settings))
mynetwork.set_npv_budget_limit(10000)
simulator = MainSimulator(settings = settings)
lca = LCA(lca_name = session_name,
settings = settings,
network = mynetwork,
simulator = simulator,
random = True,
is_hazard = True,
n_simulations = 10,
should_report = True)
return lca
def obj_func(**kwargs):
return kwargs['Utility'] / kwargs['UserCost'] ** 0.2
def GA_test():
optimizer = GA(lca_instance)
optimizer.set_hyperparameters(crossver_prob = 0.75,
mutation_prob = 0.03,
population_size = 200,
n_generations = 200,
n_elites = 5,
optimzition_type = 'max',
n_jobs = 1)
optimizer.validate()
if __name__ == "__main__":
example1()
GA_test(lca_instance)
| true
| true
|
f70897f9eb8aeca2cd474fecd27ec7d2df2c1157
| 5,421
|
py
|
Python
|
addons/stock_dropshipping/tests/test_dropship.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/stock_dropshipping/tests/test_dropship.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
addons/stock_dropshipping/tests/test_dropship.py
|
jjiege/odoo
|
fd5b8ad387c1881f349d125cbd56433f4d49398f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.tests import common, Form
from odoo.tools import mute_logger
class TestDropship(common.TransactionCase):
def test_change_qty(self):
# enable the dropship and MTO route on the product
prod = self.env.ref('product.product_product_8')
dropshipping_route = self.env.ref('stock_dropshipping.route_drop_shipping')
mto_route = self.env.ref('stock.route_warehouse0_mto')
prod.write({'route_ids': [(6, 0, [dropshipping_route.id, mto_route.id])]})
# add a vendor
vendor1 = self.env['res.partner'].create({'name': 'vendor1'})
seller1 = self.env['product.supplierinfo'].create({
'name': vendor1.id,
'price': 8,
})
prod.write({'seller_ids': [(6, 0, [seller1.id])]})
# sell one unit of this product
cust = self.env['res.partner'].create({'name': 'customer1'})
so = self.env['sale.order'].create({
'partner_id': cust.id,
'partner_invoice_id': cust.id,
'partner_shipping_id': cust.id,
'order_line': [(0, 0, {
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 1.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
so.action_confirm()
po = self.env['purchase.order'].search([('group_id', '=', so.procurement_group_id.id)])
po_line = po.order_line
# Check the qty on the P0
self.assertAlmostEqual(po_line.product_qty, 1.00)
# Update qty on SO and check PO
so.order_line.product_uom_qty = 2.00
self.assertAlmostEqual(po_line.product_qty, 2.00)
# Create a new so line
sol2 = self.env['sale.order.line'].create({
'order_id': so.id,
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 3.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})
# there is a new line
pol2 = po.order_line - po_line
# the first line is unchanged
self.assertAlmostEqual(po_line.product_qty, 2.00)
# the new line matches the new line on the so
self.assertAlmostEqual(pol2.product_qty, sol2.product_uom_qty)
def test_00_dropship(self):
# Create a vendor
supplier_dropship = self.env['res.partner'].create({'name': 'Vendor of Dropshipping test'})
# Create new product without any routes
drop_shop_product = self.env['product.product'].create({
'name': "Pen drive",
'type': "product",
'categ_id': self.env.ref('product.product_category_1').id,
'lst_price': 100.0,
'standard_price': 0.0,
'uom_id': self.env.ref('uom.product_uom_unit').id,
'uom_po_id': self.env.ref('uom.product_uom_unit').id,
'seller_ids': [(0, 0, {
'delay': 1,
'name': supplier_dropship.id,
'min_qty': 2.0
})]
})
# Create a sales order with a line of 200 PCE incoming shipment, with route_id drop shipping
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_2')
so_form.payment_term_id = self.env.ref('account.account_payment_term')
with mute_logger('odoo.tests.common.onchange'):
# otherwise complains that there's not enough inventory and
# apparently that's normal according to @jco and @sle
with so_form.order_line.new() as line:
line.product_id = drop_shop_product
line.product_uom_qty = 200
line.price_unit = 1.00
line.route_id = self.env.ref('stock_dropshipping.route_drop_shipping')
sale_order_drp_shpng = so_form.save()
# Confirm sales order
sale_order_drp_shpng.action_confirm()
# Check the sales order created a procurement group which has a procurement of 200 pieces
self.assertTrue(sale_order_drp_shpng.procurement_group_id, 'SO should have procurement group')
# Check a quotation was created to a certain vendor and confirm so it becomes a confirmed purchase order
purchase = self.env['purchase.order'].search([('partner_id', '=', supplier_dropship.id)])
self.assertTrue(purchase, "an RFQ should have been created by the scheduler")
purchase.button_confirm()
self.assertEquals(purchase.state, 'purchase', 'Purchase order should be in the approved state')
self.assertEquals(len(purchase.ids), 1, 'There should be one picking')
# Send the 200 pieces
purchase.picking_ids.move_lines.quantity_done = purchase.picking_ids.move_lines.product_qty
purchase.picking_ids.button_validate()
# Check one move line was created in Customers location with 200 pieces
move_line = self.env['stock.move.line'].search([
('location_dest_id', '=', self.env.ref('stock.stock_location_customers').id),
('product_id', '=', drop_shop_product.id)])
self.assertEquals(len(move_line.ids), 1, 'There should be exactly one move line')
| 43.717742
| 112
| 0.611695
|
from odoo.tests import common, Form
from odoo.tools import mute_logger
class TestDropship(common.TransactionCase):
def test_change_qty(self):
prod = self.env.ref('product.product_product_8')
dropshipping_route = self.env.ref('stock_dropshipping.route_drop_shipping')
mto_route = self.env.ref('stock.route_warehouse0_mto')
prod.write({'route_ids': [(6, 0, [dropshipping_route.id, mto_route.id])]})
vendor1 = self.env['res.partner'].create({'name': 'vendor1'})
seller1 = self.env['product.supplierinfo'].create({
'name': vendor1.id,
'price': 8,
})
prod.write({'seller_ids': [(6, 0, [seller1.id])]})
cust = self.env['res.partner'].create({'name': 'customer1'})
so = self.env['sale.order'].create({
'partner_id': cust.id,
'partner_invoice_id': cust.id,
'partner_shipping_id': cust.id,
'order_line': [(0, 0, {
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 1.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
so.action_confirm()
po = self.env['purchase.order'].search([('group_id', '=', so.procurement_group_id.id)])
po_line = po.order_line
self.assertAlmostEqual(po_line.product_qty, 1.00)
so.order_line.product_uom_qty = 2.00
self.assertAlmostEqual(po_line.product_qty, 2.00)
sol2 = self.env['sale.order.line'].create({
'order_id': so.id,
'name': prod.name,
'product_id': prod.id,
'product_uom_qty': 3.00,
'product_uom': prod.uom_id.id,
'price_unit': 12,
})
pol2 = po.order_line - po_line
self.assertAlmostEqual(po_line.product_qty, 2.00)
self.assertAlmostEqual(pol2.product_qty, sol2.product_uom_qty)
def test_00_dropship(self):
supplier_dropship = self.env['res.partner'].create({'name': 'Vendor of Dropshipping test'})
drop_shop_product = self.env['product.product'].create({
'name': "Pen drive",
'type': "product",
'categ_id': self.env.ref('product.product_category_1').id,
'lst_price': 100.0,
'standard_price': 0.0,
'uom_id': self.env.ref('uom.product_uom_unit').id,
'uom_po_id': self.env.ref('uom.product_uom_unit').id,
'seller_ids': [(0, 0, {
'delay': 1,
'name': supplier_dropship.id,
'min_qty': 2.0
})]
})
so_form = Form(self.env['sale.order'])
so_form.partner_id = self.env.ref('base.res_partner_2')
so_form.payment_term_id = self.env.ref('account.account_payment_term')
with mute_logger('odoo.tests.common.onchange'):
# apparently that's normal according to @jco and @sle
with so_form.order_line.new() as line:
line.product_id = drop_shop_product
line.product_uom_qty = 200
line.price_unit = 1.00
line.route_id = self.env.ref('stock_dropshipping.route_drop_shipping')
sale_order_drp_shpng = so_form.save()
sale_order_drp_shpng.action_confirm()
self.assertTrue(sale_order_drp_shpng.procurement_group_id, 'SO should have procurement group')
purchase = self.env['purchase.order'].search([('partner_id', '=', supplier_dropship.id)])
self.assertTrue(purchase, "an RFQ should have been created by the scheduler")
purchase.button_confirm()
self.assertEquals(purchase.state, 'purchase', 'Purchase order should be in the approved state')
self.assertEquals(len(purchase.ids), 1, 'There should be one picking')
purchase.picking_ids.move_lines.quantity_done = purchase.picking_ids.move_lines.product_qty
purchase.picking_ids.button_validate()
move_line = self.env['stock.move.line'].search([
('location_dest_id', '=', self.env.ref('stock.stock_location_customers').id),
('product_id', '=', drop_shop_product.id)])
self.assertEquals(len(move_line.ids), 1, 'There should be exactly one move line')
| true
| true
|
f7089a3189e9200c44cfa339fb72e02b96aba0ef
| 241
|
py
|
Python
|
imagr_site/urls.py
|
cewing/cfpydev-imagr
|
423ec9d9b38be990ab7dca027877e1c12f3d07fe
|
[
"MIT"
] | null | null | null |
imagr_site/urls.py
|
cewing/cfpydev-imagr
|
423ec9d9b38be990ab7dca027877e1c12f3d07fe
|
[
"MIT"
] | null | null | null |
imagr_site/urls.py
|
cewing/cfpydev-imagr
|
423ec9d9b38be990ab7dca027877e1c12f3d07fe
|
[
"MIT"
] | null | null | null |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('imagr_users.urls'))
)
| 21.909091
| 51
| 0.701245
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/', include('imagr_users.urls'))
)
| true
| true
|
f7089a89f944e9cd852c6eb3d6019ff7857667e6
| 12,486
|
py
|
Python
|
.history/implementations/pixelda/pixelda_20190101201505.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
.history/implementations/pixelda/pixelda_20190101201505.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
.history/implementations/pixelda/pixelda_20190101201505.py
|
Napkin-DL/PyTorch-GAN
|
4668fb434a74a4e4771631944e4abfb0ec1c8795
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
# Calculate output of image discriminator (PatchGAN)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
(2(128-1)-64 +3)/2
### ENCODER
self.encode_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.encode_block : {}".format(self.encode_block))
self.decode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.decode_block : {}".format(self.decode_block))
def forward(self, x):
encode_x = self.encode_block(x)
decode_x = self.decode_block(encode_x)
# decode_x = decode_x[:, :, :-1, :-1]
# decode_x = F.sigmoid(decode_x)
return x + decode_x
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# Fully-connected layer which constructs image channel shaped output from noise
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
# resblocks.append(ResidualBlock())
resblocks.append(ResidualBlock())
self.resblocks = nn.Sequential(*resblocks)
self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
out = self.l1(gen_input)
out = self.resblocks(out)
img_ = self.l2(out)
return img_
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
"""Discriminator block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
"""Classifier block"""
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
# Loss function
adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
# Loss weights
lambda_adv = 1
lambda_task = 0.1
# Initialize generator and discriminator
generator = Generator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
generator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
task_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
# Optimizers
optimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
# ----------
# Training
# ----------
# Keeps 100 accuracy measurements
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
# Adversarial ground truths
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Sample noise
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
# Generate a batch of images
fake_B = generator(imgs_A, z)
# Perform task on translated source image
label_pred = classifier(fake_B)
# Calculate the task loss
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
# Loss measures generator's ability to fool the discriminator
g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(imgs_B), valid)
fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
# ---------------------------------------
# Evaluate Performance on target domain
# ---------------------------------------
# Evaluate performance on translated Domain A
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
# Evaluate performance on Domain B
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| 37.271642
| 129
| 0.610764
|
import argparse
import os
import numpy as np
import math
import itertools
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from mnistm import MNISTM
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator')
parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input')
parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset')
parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples')
opt = parser.parse_args()
print(opt)
patch = int(opt.img_size / 2**4)
patch = (1, patch, patch)
cuda = True if torch.cuda.is_available() else False
def weights_init_normal(m):
classname = m.__class__.__name__
print("classname : {}".format(classname))
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
class ResidualBlock_back(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
self.block = nn.Sequential(
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features),
nn.ReLU(inplace=True),
nn.Conv2d(in_features, in_features, 3, 1, 1),
nn.BatchNorm2d(in_features)
)
def forward(self, x):
return x + self.block(x)
class ResidualBlock(nn.Module):
def __init__(self, in_features=64, out_features=64):
super(ResidualBlock, self).__init__()
(2(128-1)-64 +3)/2
e_block = nn.Sequential(
nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2),
nn.BatchNorm2d(4*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.encode_block : {}".format(self.encode_block))
self.decode_block = nn.Sequential(
nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2),
nn.BatchNorm2d(2*in_features),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0),
nn.BatchNorm2d(1*in_features),
nn.LeakyReLU(inplace=True)
)
print("self.decode_block : {}".format(self.decode_block))
def forward(self, x):
encode_x = self.encode_block(x)
decode_x = self.decode_block(encode_x)
return x + decode_x
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2)
self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True))
resblocks = []
for _ in range(opt.n_residual_blocks):
resblocks.append(ResidualBlock())
self.resblocks = nn.Sequential(*resblocks)
self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh())
def forward(self, img, z):
gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1)
out = self.l1(gen_input)
out = self.resblocks(out)
img_ = self.l2(out)
return img_
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512),
nn.Conv2d(512, 1, 3, 1, 1)
)
def forward(self, img):
validity = self.model(img)
return validity
class Classifier(nn.Module):
def __init__(self):
super(Classifier, self).__init__()
def block(in_features, out_features, normalization=True):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1),
nn.LeakyReLU(0.2, inplace=True) ]
if normalization:
layers.append(nn.InstanceNorm2d(out_features))
return layers
self.model = nn.Sequential(
*block(opt.channels, 64, normalization=False),
*block(64, 128),
*block(128, 256),
*block(256, 512)
)
input_size = opt.img_size // 2**4
self.output_layer = nn.Sequential(
nn.Linear(512*input_size**2, opt.n_classes),
nn.Softmax()
)
def forward(self, img):
feature_repr = self.model(img)
feature_repr = feature_repr.view(feature_repr.size(0), -1)
label = self.output_layer(feature_repr)
return label
adversarial_loss = torch.nn.MSELoss()
task_loss = torch.nn.CrossEntropyLoss()
lambda_adv = 1
lambda_task = 0.1
generator = Generator()
discriminator = Discriminator()
classifier = Classifier()
if cuda:
generator.cuda()
discriminator.cuda()
classifier.cuda()
adversarial_loss.cuda()
task_loss.cuda()
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
classifier.apply(weights_init_normal)
os.makedirs('../../data/mnist', exist_ok=True)
dataloader_A = torch.utils.data.DataLoader(
datasets.MNIST('../../data/mnist', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
os.makedirs('../../data/mnistm', exist_ok=True)
dataloader_B = torch.utils.data.DataLoader(
MNISTM('../../data/mnistm', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(opt.img_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])),
batch_size=opt.batch_size, shuffle=True)
optimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()),
lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
task_performance = []
target_performance = []
for epoch in range(opt.n_epochs):
for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)):
batch_size = imgs_A.size(0)
valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False)
fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False)
imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size))
labels_A = Variable(labels_A.type(LongTensor))
imgs_B = Variable(imgs_B.type(FloatTensor))
optimizer_G.zero_grad()
z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim))))
fake_B = generator(imgs_A, z)
label_pred = classifier(fake_B)
task_loss_ = (task_loss(label_pred, labels_A) + \
task_loss(classifier(imgs_A), labels_A)) / 2
g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \
lambda_task * task_loss_
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(imgs_B), valid)
fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
d_loss.backward()
optimizer_D.step()
acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy())
task_performance.append(acc)
if len(task_performance) > 100:
task_performance.pop(0)
pred_B = classifier(imgs_B)
target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy())
target_performance.append(target_acc)
if len(target_performance) > 100:
target_performance.pop(0)
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" %
(epoch, opt.n_epochs,
i, len(dataloader_A),
d_loss.item(), g_loss.item(),
100*acc, 100*np.mean(task_performance),
100*target_acc, 100*np.mean(target_performance)))
batches_done = len(dataloader_A) * epoch + i
if batches_done % opt.sample_interval == 0:
sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2)
save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
| true
| true
|
f7089ba093dc0bbb1c054cb1d3da1322b5b180f0
| 346
|
py
|
Python
|
plotting-beginner-plotting-cookbook/pltcp.py
|
hrayatnia/SciPy
|
a50dcbb6b8adffbc526eec93f5009f09943786e3
|
[
"BSD-3-Clause"
] | null | null | null |
plotting-beginner-plotting-cookbook/pltcp.py
|
hrayatnia/SciPy
|
a50dcbb6b8adffbc526eec93f5009f09943786e3
|
[
"BSD-3-Clause"
] | null | null | null |
plotting-beginner-plotting-cookbook/pltcp.py
|
hrayatnia/SciPy
|
a50dcbb6b8adffbc526eec93f5009f09943786e3
|
[
"BSD-3-Clause"
] | 1
|
2021-08-14T23:05:03.000Z
|
2021-08-14T23:05:03.000Z
|
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
ax = plt.axes(polar = True)
theta = np.linspace(0, 2 * np.pi, 8, endpoint = False)
radius = .25 + .75 * np.random.random(size = len(theta))
points = np.vstack((theta, radius)).transpose()
plt.gca().add_patch(patches.Polygon(points, color = '.75'))
plt.show()
| 38.444444
| 59
| 0.710983
|
import numpy as np
import matplotlib.patches as patches
import matplotlib.pyplot as plt
ax = plt.axes(polar = True)
theta = np.linspace(0, 2 * np.pi, 8, endpoint = False)
radius = .25 + .75 * np.random.random(size = len(theta))
points = np.vstack((theta, radius)).transpose()
plt.gca().add_patch(patches.Polygon(points, color = '.75'))
plt.show()
| true
| true
|
f7089c9da38300abd4358f58c0aaa203dffd7c0e
| 710
|
py
|
Python
|
weatherterm/core/parser_loader.py
|
eustone/weather-app
|
06b85178cf9e8a195c69d3622af73cc2d15ed7a8
|
[
"MIT"
] | null | null | null |
weatherterm/core/parser_loader.py
|
eustone/weather-app
|
06b85178cf9e8a195c69d3622af73cc2d15ed7a8
|
[
"MIT"
] | null | null | null |
weatherterm/core/parser_loader.py
|
eustone/weather-app
|
06b85178cf9e8a195c69d3622af73cc2d15ed7a8
|
[
"MIT"
] | null | null | null |
import os
import re
import inspect
def _get_parser_list(dirname):
files = [ f.replace('.py','') for f in os.listdir(dirname) if not f.startswith('__')
]
return files
def _import_parsers(parserfiles):
m = re.compile('.+parsers',re.I)
_modules = __import__('weatherterm.parsers',globals(),locals(),parserfiles,0)
_parsers = [(k,v) for k,v in inspect.getmembers(_modules) if inspect.ismodule(v) and m.match(k)]
_classes = dict()
for k,v in _parsers:
_classes.update({k:v for k,v in inspect.getmembers(v) if inspect.isclass(v) and m.match(k)})
return _classes
def load(dirname):
parserfiles = _get_parser_list(dirname)
return _import_parsers(parserfiles)
| 32.272727
| 100
| 0.690141
|
import os
import re
import inspect
def _get_parser_list(dirname):
files = [ f.replace('.py','') for f in os.listdir(dirname) if not f.startswith('__')
]
return files
def _import_parsers(parserfiles):
m = re.compile('.+parsers',re.I)
_modules = __import__('weatherterm.parsers',globals(),locals(),parserfiles,0)
_parsers = [(k,v) for k,v in inspect.getmembers(_modules) if inspect.ismodule(v) and m.match(k)]
_classes = dict()
for k,v in _parsers:
_classes.update({k:v for k,v in inspect.getmembers(v) if inspect.isclass(v) and m.match(k)})
return _classes
def load(dirname):
parserfiles = _get_parser_list(dirname)
return _import_parsers(parserfiles)
| true
| true
|
f7089df20096a6a03930477e8e401ffe4dc43232
| 1,115
|
py
|
Python
|
setup.py
|
ajgomez529/google-maps-services-python
|
9c38623cdd2400caade224d9968abd1bce610daa
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ajgomez529/google-maps-services-python
|
9c38623cdd2400caade224d9968abd1bce610daa
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
ajgomez529/google-maps-services-python
|
9c38623cdd2400caade224d9968abd1bce610daa
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
requirements = ["requests>=2.20.0,<3.0"]
with open("README.md") as f:
readme = f.read()
with open("CHANGELOG.md") as f:
changelog = f.read()
setup(
name="googlemaps",
version="4.4.4",
description="Python client library for Google Maps Platform",
long_description=readme + changelog,
long_description_content_type="text/markdown",
scripts=[],
url="https://github.com/googlemaps/google-maps-services-python",
packages=["googlemaps"],
license="Apache 2.0",
platforms="Posix; MacOS X; Windows",
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet",
],
python_requires='>=3.5'
)
| 28.589744
| 68
| 0.632287
|
from setuptools import setup
requirements = ["requests>=2.20.0,<3.0"]
with open("README.md") as f:
readme = f.read()
with open("CHANGELOG.md") as f:
changelog = f.read()
setup(
name="googlemaps",
version="4.4.4",
description="Python client library for Google Maps Platform",
long_description=readme + changelog,
long_description_content_type="text/markdown",
scripts=[],
url="https://github.com/googlemaps/google-maps-services-python",
packages=["googlemaps"],
license="Apache 2.0",
platforms="Posix; MacOS X; Windows",
setup_requires=requirements,
install_requires=requirements,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Internet",
],
python_requires='>=3.5'
)
| true
| true
|
f7089dfe6fe74007cb8636beea498cb478d8602a
| 21,442
|
py
|
Python
|
auto_enc.py
|
jobcpf/auto_encrypted
|
39efd7b76e5efa9035654fd5cf9877a24a7caa08
|
[
"BSD-3-Clause"
] | null | null | null |
auto_enc.py
|
jobcpf/auto_encrypted
|
39efd7b76e5efa9035654fd5cf9877a24a7caa08
|
[
"BSD-3-Clause"
] | null | null | null |
auto_enc.py
|
jobcpf/auto_encrypted
|
39efd7b76e5efa9035654fd5cf9877a24a7caa08
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
"""
Auto Mount Encrypted Drives on external Credentials
@Author: oliver.blakeman@carbonprojectfinance.co.uk
@Date: 2018-07-25
Shebangs: (amend #!/ path at top based on env and app)
ferret: #!/usr/bin/python
"""
# Standard import
import sys
import os
import pwd
import time
# other
from subprocess import call, STDOUT, PIPE, Popen
FNULL = open(os.devnull, 'w') # write to /dev/null
import Tkinter as tk
# logging
import logging
logfile = "/tmp/auto_enc_test.log"
logging.basicConfig(filename=logfile,level=logging.DEBUG)
#logging.basicConfig(filename=logfile,level=logging.ERROR)
################## env #################################### env #################################### env ##################
# path
current_env = os.environ['HOME']
base_dir = os.path.join(current_env, 'dev','auto_encrypted')
sys.path.append(base_dir)
# get user credentials
user_details = pwd.getpwuid(os.getuid())#[0]
user_name = user_details[0]
UID = user_details[2]
GID = user_details[3]
logging.debug('%s:%s: Script run as: %s (UID %s, GID %s)' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config', user_name, UID, GID))
# cli passed args
try:
action = os.path.basename(sys.argv[1])
try:
device = os.path.basename(sys.argv[2])
logging.debug('%s:%s: Search for volumes on device: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config', device))
except IndexError as e: # no second arg passed
device = False
logging.debug('%s:%s: Search for volumes on ALL external devices.' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config'))
except IndexError as e:
logging.debug('%s:%s: No arguments passed to script' % (time.strftime('%Y-%m-%d %H:%M:%S'), 'config'))
action = False
################## modules #################################### modules #################################### modules ##################
from crypt.secure import test_keys, secure_config, get_config
################## vars #################################### vars #################################### vars ##################
import config as config
mnt_ids = config.MNT_IDS.format(uid=UID,gid=GID) # format mount ids for user
################## functions #################################### functions #################################### functions ##################
def getpwd():
"""Password pop up dialogue."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running password dialogue script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
global password
password = True
# main screen
root = tk.Tk()
root.title("Mount Encrypted")
root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
# text
tk.Label(root, text = 'Enter Password').pack(side = 'top', padx=60, pady=10)
# password box
pwdbox = tk.Entry(root, show = '*')
pwdbox.pack(side = 'top', padx=60, pady=10)
pwdbox.focus_set() # put cursor in pw box
def onpwdentry(evt):
global password
pw_retrieve = pwdbox.get()
if pw_retrieve:
password = pw_retrieve
root.destroy()
def onokclick():
global password
pw_retrieve = pwdbox.get()
if pw_retrieve:
password = pw_retrieve
root.destroy()
def oncancelclick():
global password
password = False
root.destroy()
# actions
pwdbox.bind('<Return>', onpwdentry)
tk.Button(root, command=onokclick, text = 'OK').pack(side = 'left', padx=20, pady=10)
tk.Button(root, command=oncancelclick, text = 'Cancel').pack(side = 'right', padx=20, pady=10)
root.mainloop()
return password
def confirm_mount(header,message):
"""Confirmation pop up dialogue."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running confirmation dialogue script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
# main screen
root = tk.Tk()
root.title(header)
root.eval('tk::PlaceWindow %s center' % root.winfo_pathname(root.winfo_id()))
# text
tk.Label(root, text = message).pack(side = 'top', padx=60, pady=10)
def onokclick():
root.destroy()
# actions
tk.Button(root, command=onokclick, text = 'OK').pack(side = 'top', padx=60, pady=10)
root.mainloop()
return True
def auth_device(private_key):
"""Authorize public / private keypair on device."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find and auth device private key.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
# traverse auth device for public key
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR, topdown=True):
for file_name in file_names:
if config.PUB_KF in file_name:
# get public_key
with open(os.path.join(dir_name, file_name), "r") as pub_file:
public_key = pub_file.read()
authed = test_keys(private_key,public_key)
if authed :
return True
return False
def get_mnt_devs():
"""Get list of eligible devices to mount - excluding config.MNT_EXC list."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find ALL available device volumes.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
mount_list =[]
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.DEV_DIR):
for file_name in file_names :
# get only eligible volumes
if config.DEV in file_name and file_name[:3] not in config.MNT_EXC and len(file_name) == 4:
mount_dir = os.path.join(dir_name, file_name)
mount_list.append(mount_dir)
return mount_list
def get_base_mnt_devs():
"""Get list of eligible volumes to mount for given base device."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to find volumes for device from base device: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, device))
mount_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.DEV_DIR):
for file_name in file_names :
# get only eligible volumes
if device in file_name and len(file_name) > len(device):
mount_dir = os.path.join(dir_name, file_name)
mount_list.append(mount_dir)
return mount_list
def usb_unmount():
"""Unmount device from mount dir"""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to unmount device.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
u_command = "sudo umount %s" % (config.MNT_DIR) # unmount command using mount dir
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: Device %s unmounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config.MNT_DIR, success))
return success
def usb_mount(private_key):
"""Mount and verify external devices
1. Mount available drives.
2. Authorize using public / private key pair if required by config.MNT_AUTH, return true if Authed
3. Dismount if not authed
4. Return False if no authed devices
> dev: mount device
< True, False
"""
func_name = sys._getframe().f_code.co_name
## mount and auth
logging.debug('%s:%s: Running script to mount & auth device.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
if device : # get volumes from device
mount_list = get_base_mnt_devs()
else: # get all device volumes
mount_list = get_mnt_devs()
## iterate devices
for dev in mount_list:
logging.debug('%s:%s: Testing device volume: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev))
# define mount commands
u_command = "sudo umount %s" % (dev)
m_command = "sudo mount -r -o %s --source %s --target %s" % (mnt_ids, dev, config.MNT_DIR)
#m_command = 'sudo mount -o %s,context="system_u:object_r:samba_share_t:s0" --source %s --target %s' % (mnt_ids, dev, config.MNT_DIR)
# call unmount - in case already mounted
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s dismounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
time.sleep(config.SYS_SLEEP)
# call mount
success = call(m_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s mounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
# Auth device
authed = auth_device(private_key)
# check if authed
if authed :
return True
else:
# call unmount
success = call(u_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: %s dismounted %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, dev, success))
return False
def get_configs(private_key):
"""Get list of encrypted mount configurations."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to decrypt encrypted mount configs.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
enc_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR):
for file_name in file_names :
# iter required keyfiles
for enc_cfg in config.ENC_VOL_CFE :
# match key to file
if enc_cfg == file_name :
# prevent duplicates
config.ENC_VOL_CFE.remove(enc_cfg)
# decrypt config
enc_config = get_config(private_key, os.path.join(dir_name, file_name))
if enc_config:
enc_list.append(enc_config)
if config.ENC_VOL_CFE :
logging.error('%s:%s: Could not retrieve all configs, remaining: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config.ENC_VOL_CFE))
return enc_list
def get_keyfiles(keyfiles):
"""Get list of keyfiles for mount."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running script to identify and return keyfiles.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
kf_list = []
# find devices to mount
for dir_name, subdirs_name, file_names in os.walk(config.MNT_DIR):
for file_name in file_names :
# iter required keyfiles
for key in keyfiles :
# match key to file
if key == file_name :
# prevent duplicates
keyfiles.remove(key)
kf_path = os.path.join(dir_name, file_name)
kf_list.append(kf_path)
if keyfiles :
logging.error('%s:%s: Could not retrieve all keyfiles, remaining: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, keyfiles))
return kf_list
def dismount_encrypted():
"""Dismount encrypted volumes."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running encrypted volume dismount ALL script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
denc_command = "sudo {vc} --force --dismount".format(vc=config.VC)
proc = Popen(denc_command, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt report: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line))
proc.wait()
logging.debug('%s:%s: Veracrypt dismount ALL, reported: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, proc.returncode))
return True
def mount_encrypted():
"""Mount encrypted volumes."""
func_name = sys._getframe().f_code.co_name
logging.debug('%s:%s: Running encrypted volume mount script.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
## get private_key
try:
pkf = os.path.join(config.PRV_KEY_DIR.format(home=current_env), config.PRV_KF)
with open(pkf, "r") as prv_file:
private_key = prv_file.read()
except IOError as e:
logging.error('%s:%s: Private key not present: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, pkf))
return False
## mount and ID device (pb/pk)
mounted = usb_mount(private_key)
if not mounted:
logging.error('%s:%s: No device mounted.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
return False
## get configuration files
enc_cfg_list = get_configs(private_key)
if not enc_cfg_list:
logging.error('%s:%s: No configurations present.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
return False
# incrementing & control
slot = 10
abort_mount = False
## iterate configured volumes
for enc_vol in enc_cfg_list :
## Get keyfiles
keyfiles = get_keyfiles(enc_vol.get('keyfiles',[]))
logging.debug('%s:%s: Retrieved keyfiles' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
## password
pw = enc_vol.get('pw',True)
# password retrieval logic
if isinstance(pw,(bool,type(None))):
if pw:
# get password from dialogue
password = getpwd()
if not password:
logging.debug('%s:%s: Dialogue yielded no password - abort.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
return True # prevent encrypt dismount
else:
password = None
else:
password = pw
logging.debug('%s:%s: Retrieved password: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, '****'))
## get volume data
try:
volume = enc_vol['volume']
mount_point = enc_vol['mount_point']
except IndexError as e :
logging.error('%s:%s: Could not retrieve volume information: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, e))
return False
# get interactive mode
interactive = enc_vol.get('interactive',False)
if interactive:
interactive = ''
else:
interactive = '-t --non-interactive'
## check if volume is mounted on mount_point
mount_point_taken = os.path.ismount(mount_point) # returns boolean
if mount_point_taken :
## unmount usb
logging.debug('%s:%s: Calling unmount for device' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
return True
## build veracrypt command
# keyfiles and password
if keyfiles and password:
kf_string = ','.join(keyfiles)
enc_command = "{vc} {ia} --keyfiles={kf} --password='{pw}' --slot={sl} {vo} {mt}".format(vc=config.VC,
ia=interactive,
kf=kf_string,
pw=password,
sl=slot,
vo=volume,
mt=mount_point)
# keyfiles only
elif keyfiles:
kf_string = ','.join(keyfiles)
enc_command = "{vc} {ia} --keyfiles={kf} --slot={sl} {vo} {mt}".format(vc=config.VC,
ia=interactive,
kf=kf_string,
sl=slot,
vo=volume,
mt=mount_point)
# password only
elif password:
enc_command = """{vc} {ia} --password='{pw}' --slot={sl} {vo} {mt}""".format(vc=config.VC,
ia=interactive,
pw=password,
sl=slot,
vo=volume,
mt=mount_point)
# no password or keyfiles ??
else:
enc_command = """{vc} {ia} --slot={sl} {vo} {mt}""".format(vc=config.VC,
ia=interactive,
sl=slot,
vo=volume,
mt=mount_point)
## make veracrypt call
logging.debug('%s:%s: Calling veracrypt mount: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, enc_command))
proc = Popen(enc_command, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt mount output: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line))
proc.wait()
logging.debug('%s:%s: veracrypt mount success: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, proc.returncode))
# attempt dismount volume if reported error on mount, e.g. already mounted
if proc.returncode > 0 :
enc_command = "{vc} -t --non-interactive --dismount {vo}".format(vc=config.VC, vo=volume)
success = call(enc_command, stdout=FNULL, stderr=STDOUT, shell=True)
logging.debug('%s:%s: Veracrypt attempted dismount of volume %s, reported: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, volume, success))
return False
slot += 1
## unmount usb
logging.debug('%s:%s: Calling unmount for device' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
unmounted = usb_unmount()
# report mounted volumes
enc_list = "{vc} -t -lv".format(vc=config.VC) # verbose list
proc = Popen(enc_list, stdout=PIPE, stderr=STDOUT, shell=True)
for line in proc.stdout:
logging.debug('%s:%s: veracrypt report: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, line.rstrip()))
proc.wait()
return True
################## script #################################### script #################################### script ##################
# run script if called directly
if __name__ == "__main__":
func_name = 'auto_encrypted.__main__'
logging.debug('%s:%s: Running script as main.' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name))
if action == 'mount' : # mount encrypted files
# sleep to avoid mount conflicts
time.sleep(config.SYS_SLEEP)
# perform mount
mounted = mount_encrypted()
logging.debug('%s:%s: Mounted encrypted volumes: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, mounted))
# attempt to dismount all
if not mounted:
dismount_encrypted()
usb_unmount()
# dialogue
confirmed = confirm_mount('No Mounted Volumes','No credentials available. \nAll encrypted volumes have been dismounted.')
exit(0)
elif action == 'config' : # generate encrypted configs
config_secured = secure_config(current_env)
logging.debug('%s:%s: Secured config files: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, config_secured))
# dialogue
confirmed = confirm_mount('Config Encrypted','Config file successfully encrypted.')
elif not action: # dismout all encrypted drives
dismount_encrypted()
usb_unmount()
# dialogue
confirmed = confirm_mount('Dismounted','All encrypted volumes have been dismounted.')
exit(0)
logging.debug('%s:%s: Argument not recognised: %s' % (time.strftime('%Y-%m-%d %H:%M:%S'), func_name, action))
exit(1)
| 39.634011
| 158
| 0.523878
|
import sys
import os
import pwd
import time
from subprocess import call, STDOUT, PIPE, Popen
FNULL = open(os.devnull, 'w')
import Tkinter as tk
import logging
logfile = "/tmp/auto_enc_test.log"
logging.basicConfig(filename=logfile,level=logging.DEBUG)
| true
| true
|
f7089e12767a1d19a0a295981ad26d0728e640fe
| 139
|
py
|
Python
|
1170.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | 3
|
2021-09-21T18:50:20.000Z
|
2021-12-14T13:07:31.000Z
|
1170.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
1170.py
|
gabzin/uri
|
177bdf3f87bacfd924bd031a973b8db877379fe5
|
[
"MIT"
] | null | null | null |
for n in range(int(input())):
c=float(input())
aux=0
while c>1:
c/=2
aux+=1
print(f"{aux} dias")
aux=0
| 15.444444
| 29
| 0.460432
|
for n in range(int(input())):
c=float(input())
aux=0
while c>1:
c/=2
aux+=1
print(f"{aux} dias")
aux=0
| true
| true
|
f7089e39411df691557a32c6561b3e8f84d6cb00
| 17,037
|
py
|
Python
|
lib/python3.8/site-packages/ansible/plugins/callback/__init__.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | 4
|
2021-09-16T01:32:29.000Z
|
2022-03-24T07:32:10.000Z
|
lib/python3.8/site-packages/ansible/plugins/callback/__init__.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
lib/python3.8/site-packages/ansible/plugins/callback/__init__.py
|
cjsteel/python3-venv-ansible-2.10.5
|
c95395c4cae844dc66fddde9b4343966f4b2ecd5
|
[
"Apache-1.1"
] | null | null | null |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import difflib
import json
import os
import sys
import warnings
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins import AnsiblePlugin, get_plugin_class
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
if PY3:
# OrderedDict is needed for a backwards compat shim on Python3.x only
# https://github.com/ansible/ansible/pull/49512
from collections import OrderedDict
else:
OrderedDict = None
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
class CallbackBase(AnsiblePlugin):
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
''' helper for callbacks, so they don't all have to include deepcopy '''
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = v
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
''' This is different than the normal plugin method as callbacks get called early and really don't accept keywords.
Also _options was already taken for CLI args and callbacks use _plugin_options instead.
'''
# load from config
self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
try:
jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
# Python3 bug: throws an exception when keys are non-homogenous types:
# https://bugs.python.org/issue25457
# sort into an OrderedDict and then json.dumps() that instead
if not OrderedDict:
raise
jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
return jsonified_results
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
# difflib in Python 2.6 adds trailing spaces after
# filenames in the -- before/++ after headers.
difflines[0] = difflines[0].replace(u' \n', u'\n')
difflines[1] = difflines[1].replace(u' \n', u'\n')
# it also treats empty files differently
difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
''' retrieves the value to be displayed as a label for an item entry from a result object'''
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _get_item(self, result):
''' here for backwards compat, really should have always been named: _get_item_label'''
cback = getattr(self, 'NAME', os.path.basename(__file__))
self._display.deprecated("The %s callback plugin should be updated to use the _get_item_label method instead" % cback,
version="2.11", collection_name='ansible.builtin')
return self._get_item_label(result)
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
''' removes data from results for display '''
# mostly controls that debug only outputs what it was meant to
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
# msg should be alone
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
# 'var' value as field, so eliminate others and what is left should be varname
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
# V2 METHODS, by default they call v1 counterparts if possible
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
# FIXME: not called
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
# FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
# FIXME: not called
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
# FIXME: not called
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
# FIXME: not called
def v2_playbook_on_cleanup_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_handler_task_start(self, task):
pass # no v1 correspondence
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
# FIXME: not called
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
# FIXME: not called
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass # no v1 correspondence
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
"""Event used when host begins execution of a task
.. versionadded:: 2.8
"""
pass
| 38.545249
| 160
| 0.624523
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import difflib
import json
import os
import sys
import warnings
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils.common._collections_compat import MutableMapping
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_text
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins import AnsiblePlugin, get_plugin_class
from ansible.utils.color import stringc
from ansible.utils.display import Display
from ansible.vars.clean import strip_internal_keys, module_response_deepcopy
if PY3:
from collections import OrderedDict
else:
OrderedDict = None
global_display = Display()
__all__ = ["CallbackBase"]
_DEBUG_ALLOWED_KEYS = frozenset(('msg', 'exception', 'warnings', 'deprecations'))
class CallbackBase(AnsiblePlugin):
def __init__(self, display=None, options=None):
if display:
self._display = display
else:
self._display = global_display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, sys.modules[self.__module__].__file__))
self.disabled = False
self._plugin_options = {}
if options is not None:
self.set_options(options)
self._hide_in_debug = ('changed', 'failed', 'skipped', 'invocation', 'skip_reason')
_copy_result = deepcopy
def set_option(self, k, v):
self._plugin_options[k] = v
def get_option(self, k):
return self._plugin_options[k]
def set_options(self, task_keys=None, var_options=None, direct=None):
self._plugin_options = C.config.get_plugin_options(get_plugin_class(self), self._load_name, keys=task_keys, variables=var_options, direct=direct)
def _run_is_verbose(self, result, verbosity=0):
return ((self._display.verbosity > verbosity or result._result.get('_ansible_verbose_always', False) is True)
and result._result.get('_ansible_verbose_override', False) is False)
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
abridged_result = strip_internal_keys(module_response_deepcopy(result))
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
if 'exception' in abridged_result:
del abridged_result['exception']
try:
jsonified_results = json.dumps(abridged_result, cls=AnsibleJSONEncoder, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
except TypeError:
if not OrderedDict:
raise
jsonified_results = json.dumps(OrderedDict(sorted(abridged_result.items(), key=to_text)),
cls=AnsibleJSONEncoder, indent=indent,
ensure_ascii=False, sort_keys=False)
return jsonified_results
def _handle_warnings(self, res):
if C.ACTION_WARNINGS:
if 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
del res['warnings']
if 'deprecations' in res and res['deprecations']:
for warning in res['deprecations']:
self._display.deprecated(**warning)
del res['deprecations']
def _handle_exception(self, result, use_stderr=False):
if 'exception' in result:
msg = "An exception occurred during task execution. "
if self._display.verbosity < 3:
error = result['exception'].strip().split('\n')[-1]
msg += "To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "The full traceback is:\n" + result['exception']
del result['exception']
self._display.display(msg, color=C.COLOR_ERROR, stderr=use_stderr)
def _serialize_diff(self, diff):
return json.dumps(diff, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
if 'dst_binary' in diff:
ret.append(u"diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append(u"diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append(u"diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append(u"diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
for x in ['before', 'after']:
if isinstance(diff[x], MutableMapping):
diff[x] = self._serialize_diff(diff[x])
elif diff[x] is None:
diff[x] = ''
if 'before_header' in diff:
before_header = u"before: %s" % diff['before_header']
else:
before_header = u'before'
if 'after_header' in diff:
after_header = u"after: %s" % diff['after_header']
else:
after_header = u'after'
before_lines = diff['before'].splitlines(True)
after_lines = diff['after'].splitlines(True)
if before_lines and not before_lines[-1].endswith(u'\n'):
before_lines[-1] += u'\n\\ No newline at end of file\n'
if after_lines and not after_lines[-1].endswith('\n'):
after_lines[-1] += u'\n\\ No newline at end of file\n'
differ = difflib.unified_diff(before_lines,
after_lines,
fromfile=before_header,
tofile=after_header,
fromfiledate=u'',
tofiledate=u'',
n=C.DIFF_CONTEXT)
difflines = list(differ)
if len(difflines) >= 3 and sys.version_info[:2] == (2, 6):
difflines[0] = difflines[0].replace(u' \n', u'\n')
difflines[1] = difflines[1].replace(u' \n', u'\n')
difflines[2] = difflines[2].replace(u'-1,0', u'-0,0').replace(u'+1,0', u'+0,0')
has_diff = False
for line in difflines:
has_diff = True
if line.startswith(u'+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith(u'-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith(u'@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(diff['prepared'])
return u''.join(ret)
def _get_item_label(self, result):
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
else:
item = result.get('_ansible_item_label', result.get('item'))
return item
def _get_item(self, result):
cback = getattr(self, 'NAME', os.path.basename(__file__))
self._display.deprecated("The %s callback plugin should be updated to use the _get_item_label method instead" % cback,
version="2.11", collection_name='ansible.builtin')
return self._get_item_label(result)
def _process_items(self, result):
del result._result['results']
def _clean_results(self, result, task_name):
if task_name in C._ACTION_DEBUG:
if 'msg' in result:
for key in list(result.keys()):
if key not in _DEBUG_ALLOWED_KEYS and not key.startswith('_'):
result.pop(key)
else:
for hidme in self._hide_in_debug:
result.pop(hidme, None)
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item_label(getattr(result._result, 'results', {})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, handler, host):
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass
def v2_playbook_on_handler_task_start(self, task):
pass
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default, unsafe)
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
def v2_runner_on_start(self, host, task):
pass
| true
| true
|
f7089ef67286d4bd93b7bbb603e0c655b36ca314
| 3,090
|
py
|
Python
|
docs/source/conf.py
|
swiftycloud/swifty
|
5167df9c1ce213fa14887d3bda28beb02f688e33
|
[
"MIT"
] | 33
|
2019-04-16T06:28:16.000Z
|
2021-07-30T11:11:05.000Z
|
docs/source/conf.py
|
swiftycloud/swifty
|
5167df9c1ce213fa14887d3bda28beb02f688e33
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
swiftycloud/swifty
|
5167df9c1ce213fa14887d3bda28beb02f688e33
|
[
"MIT"
] | 2
|
2019-04-23T15:09:40.000Z
|
2020-11-22T00:19:24.000Z
|
# -*- coding: utf-8 -*-
#
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = ['sphinx.ext.doctest','rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Swifty'
copyright = u'2017 The Swifty Authors'
author = u'The Swifty Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'classic'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
#html_theme = 'pyramid'
#html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 31.212121
| 79
| 0.721036
|
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Swifty'
copyright = u'2017 The Swifty Authors'
author = u'The Swifty Authors'
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'classic'
#html_theme = 'sphinxdoc'
#html_theme = 'scrolls'
#html_theme = 'agogo'
#html_theme = 'traditional'
#html_theme = 'nature'
#html_theme = 'haiku'
#html_theme = 'pyramid'
#html_theme = 'bizstyle'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| true
| true
|
f7089f47db2e2b77a6377a7ce32ab1734102e04b
| 12,276
|
py
|
Python
|
src/fuzzysearch/substitutions_only.py
|
klauer/fuzzysearch
|
55fc21e469495bc84fe6f81b0c148e105765182d
|
[
"MIT"
] | null | null | null |
src/fuzzysearch/substitutions_only.py
|
klauer/fuzzysearch
|
55fc21e469495bc84fe6f81b0c148e105765182d
|
[
"MIT"
] | null | null | null |
src/fuzzysearch/substitutions_only.py
|
klauer/fuzzysearch
|
55fc21e469495bc84fe6f81b0c148e105765182d
|
[
"MIT"
] | null | null | null |
from collections import deque, defaultdict
from itertools import islice
from functools import wraps
from fuzzysearch.common import FuzzySearchBase, Match, \
count_differences_with_maximum, get_best_match_in_group, group_matches
from fuzzysearch.compat import text_type
from fuzzysearch.search_exact import search_exact
def _check_arguments(subsequence, sequence, max_substitutions):
if not subsequence:
raise ValueError('Given subsequence is empty!')
if max_substitutions is None or max_substitutions < 0:
raise ValueError('Maximum number of substitutions must be >= 0!')
def has_near_match_substitutions(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
for start_index in search_exact(subsequence, sequence):
return True
return False
elif len(subsequence) // (max_substitutions + 1) >= 3:
return has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return has_near_match_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions(subsequence, sequence, max_substitutions):
"""Find near-matches of the subsequence in the sequence.
This chooses a suitable fuzzy search implementation according to the given
parameters.
Returns a list of fuzzysearch.Match objects describing the matching parts
of the sequence.
"""
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
return [
Match(start_index, start_index + len(subsequence), 0,
sequence[start_index:start_index + len(subsequence)])
for start_index in search_exact(subsequence, sequence)
]
elif len(subsequence) // (max_substitutions + 1) >= 3:
return find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return find_near_matches_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
return list(_find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions))
def _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
# simple optimization: prepare some often used things in advance
_SUBSEQ_LEN = len(subsequence)
_SUBSEQ_LEN_MINUS_ONE = _SUBSEQ_LEN - 1
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
# prepare quick lookup of where a character appears in the subsequence
char_indexes_in_subsequence = defaultdict(list)
for (index, char) in enumerate(subsequence):
char_indexes_in_subsequence[char].append(index)
# we'll iterate over the sequence once, but the iteration is split into two
# for loops; therefore we prepare an iterator in advance which will be used
# in both of the loops
sequence_enum_iter = enumerate(sequence)
# We'll count the number of matching characters assuming various attempted
# alignments of the subsequence to the sequence. At any point in the
# sequence there will be N such alignments to update. We'll keep
# these in a "circular array" (a.k.a. a ring) which we'll rotate after each
# iteration to re-align the indexing.
# Initialize the candidate counts by iterating over the first N-1 items in
# the sequence. No possible matches in this step!
candidates = deque([0], maxlen=_SUBSEQ_LEN)
for (index, char) in islice(sequence_enum_iter, _SUBSEQ_LEN_MINUS_ONE):
for subseq_index in [idx for idx in char_indexes_in_subsequence[char] if idx <= index]:
candidates[subseq_index] += 1
candidates.appendleft(0)
# From the N-th item onwards, we'll update the candidate counts exactly as
# above, and additionally check if the part of the sequence whic began N-1
# items before the current index was a near enough match to the given
# sub-sequence.
for (index, char) in sequence_enum_iter:
for subseq_index in char_indexes_in_subsequence[char]:
candidates[subseq_index] += 1
# rotate the ring of candidate counts
candidates.rotate(1)
# fetch the count for the candidate which started N-1 items ago
n_substitutions = _SUBSEQ_LEN - candidates[0]
# set the count for the next index to zero
candidates[0] = 0
# if the candidate had few enough mismatches, yield a match
if n_substitutions <= max_substitutions:
yield make_match(
start=index - _SUBSEQ_LEN_MINUS_ONE,
end=index + 1,
dist=n_substitutions,
)
def has_near_match_substitutions_lp(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
return True
return False
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
match_starts = set()
matches = []
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if match.start not in match_starts:
match_starts.add(match.start)
matches.append(match)
return sorted(matches, key=lambda match: match.start)
def _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
subseq_len = len(subsequence)
seq_len = len(sequence)
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
ngram_len = subseq_len // (max_substitutions + 1)
if ngram_len == 0:
raise ValueError(
"The subsequence's length must be greater than max_substitutions!"
)
for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len):
ngram_end = ngram_start + ngram_len
subseq_before = subsequence[:ngram_start]
subseq_after = subsequence[ngram_end:]
for index in search_exact(
subsequence[ngram_start:ngram_end], sequence,
ngram_start, seq_len - (subseq_len - ngram_end),
):
n_substitutions = 0
seq_before = sequence[index - ngram_start:index]
if subseq_before != seq_before:
n_substitutions += count_differences_with_maximum(
seq_before, subseq_before,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
seq_after = sequence[index + ngram_len:index - ngram_start + subseq_len]
if subseq_after != seq_after:
if n_substitutions == max_substitutions:
continue
n_substitutions += count_differences_with_maximum(
seq_after, subseq_after,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
yield make_match(
start=index - ngram_start,
end=index - ngram_start + subseq_len,
dist=n_substitutions,
)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
"""search for near-matches of subsequence in sequence
This searches for near-matches, where the nearly-matching parts of the
sequence must meet the following limitations (relative to the subsequence):
* the number of character substitutions must be less than max_substitutions
* no deletions or insertions are allowed
"""
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
return True
return False
try:
from fuzzysearch._substitutions_only import \
substitutions_only_has_near_matches_ngrams_byteslike, \
substitutions_only_find_near_matches_ngrams_byteslike as \
_subs_only_fnm_ngram_byteslike
except ImportError:
pass
else:
py_has_near_match_substitutions_ngrams = has_near_match_substitutions_ngrams
@wraps(py_has_near_match_substitutions_ngrams)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
return substitutions_only_has_near_matches_ngrams_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
return py_has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions)
py_find_near_matches_substitutions_ngrams = \
find_near_matches_substitutions_ngrams
@wraps(py_find_near_matches_substitutions_ngrams)
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
results = _subs_only_fnm_ngram_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
else:
matches = [
Match(
index,
index + len(subsequence),
count_differences_with_maximum(
sequence[index:index+len(subsequence)],
subsequence,
max_substitutions + 1,
),
matched=sequence[index:index + len(subsequence)],
)
for index in results
]
return [
get_best_match_in_group(group)
for group in group_matches(matches)
]
return py_find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions)
class SubstitutionsOnlySearch(FuzzySearchBase):
@classmethod
def search(cls, subsequence, sequence, search_params):
actual_max_subs = min(
x for x in [search_params.max_l_dist,
search_params.max_substitutions]
if x is not None
)
return find_near_matches_substitutions(subsequence, sequence,
actual_max_subs)
@classmethod
def extra_items_for_chunked_search(cls, subsequence, search_params):
return 0
| 39.220447
| 95
| 0.646546
|
from collections import deque, defaultdict
from itertools import islice
from functools import wraps
from fuzzysearch.common import FuzzySearchBase, Match, \
count_differences_with_maximum, get_best_match_in_group, group_matches
from fuzzysearch.compat import text_type
from fuzzysearch.search_exact import search_exact
def _check_arguments(subsequence, sequence, max_substitutions):
if not subsequence:
raise ValueError('Given subsequence is empty!')
if max_substitutions is None or max_substitutions < 0:
raise ValueError('Maximum number of substitutions must be >= 0!')
def has_near_match_substitutions(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
for start_index in search_exact(subsequence, sequence):
return True
return False
elif len(subsequence) // (max_substitutions + 1) >= 3:
return has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return has_near_match_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
if max_substitutions == 0:
return [
Match(start_index, start_index + len(subsequence), 0,
sequence[start_index:start_index + len(subsequence)])
for start_index in search_exact(subsequence, sequence)
]
elif len(subsequence) // (max_substitutions + 1) >= 3:
return find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions,
)
else:
return find_near_matches_substitutions_lp(
subsequence, sequence, max_substitutions,
)
def find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
return list(_find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions))
def _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
_SUBSEQ_LEN = len(subsequence)
_SUBSEQ_LEN_MINUS_ONE = _SUBSEQ_LEN - 1
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
char_indexes_in_subsequence = defaultdict(list)
for (index, char) in enumerate(subsequence):
char_indexes_in_subsequence[char].append(index)
# for loops; therefore we prepare an iterator in advance which will be used
# in both of the loops
sequence_enum_iter = enumerate(sequence)
# We'll count the number of matching characters assuming various attempted
# these in a "circular array" (a.k.a. a ring) which we'll rotate after each
candidates = deque([0], maxlen=_SUBSEQ_LEN)
for (index, char) in islice(sequence_enum_iter, _SUBSEQ_LEN_MINUS_ONE):
for subseq_index in [idx for idx in char_indexes_in_subsequence[char] if idx <= index]:
candidates[subseq_index] += 1
candidates.appendleft(0)
# above, and additionally check if the part of the sequence whic began N-1
# items before the current index was a near enough match to the given
# sub-sequence.
for (index, char) in sequence_enum_iter:
for subseq_index in char_indexes_in_subsequence[char]:
candidates[subseq_index] += 1
# rotate the ring of candidate counts
candidates.rotate(1)
# fetch the count for the candidate which started N-1 items ago
n_substitutions = _SUBSEQ_LEN - candidates[0]
# set the count for the next index to zero
candidates[0] = 0
# if the candidate had few enough mismatches, yield a match
if n_substitutions <= max_substitutions:
yield make_match(
start=index - _SUBSEQ_LEN_MINUS_ONE,
end=index + 1,
dist=n_substitutions,
)
def has_near_match_substitutions_lp(subsequence, sequence, max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_lp(subsequence, sequence,
max_substitutions):
return True
return False
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
match_starts = set()
matches = []
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if match.start not in match_starts:
match_starts.add(match.start)
matches.append(match)
return sorted(matches, key=lambda match: match.start)
def _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
subseq_len = len(subsequence)
seq_len = len(sequence)
def make_match(start, end, dist):
return Match(start, end, dist, matched=sequence[start:end])
ngram_len = subseq_len // (max_substitutions + 1)
if ngram_len == 0:
raise ValueError(
"The subsequence's length must be greater than max_substitutions!"
)
for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len):
ngram_end = ngram_start + ngram_len
subseq_before = subsequence[:ngram_start]
subseq_after = subsequence[ngram_end:]
for index in search_exact(
subsequence[ngram_start:ngram_end], sequence,
ngram_start, seq_len - (subseq_len - ngram_end),
):
n_substitutions = 0
seq_before = sequence[index - ngram_start:index]
if subseq_before != seq_before:
n_substitutions += count_differences_with_maximum(
seq_before, subseq_before,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
seq_after = sequence[index + ngram_len:index - ngram_start + subseq_len]
if subseq_after != seq_after:
if n_substitutions == max_substitutions:
continue
n_substitutions += count_differences_with_maximum(
seq_after, subseq_after,
max_substitutions - n_substitutions + 1)
if n_substitutions > max_substitutions:
continue
yield make_match(
start=index - ngram_start,
end=index - ngram_start + subseq_len,
dist=n_substitutions,
)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
_check_arguments(subsequence, sequence, max_substitutions)
for match in _find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
return True
return False
try:
from fuzzysearch._substitutions_only import \
substitutions_only_has_near_matches_ngrams_byteslike, \
substitutions_only_find_near_matches_ngrams_byteslike as \
_subs_only_fnm_ngram_byteslike
except ImportError:
pass
else:
py_has_near_match_substitutions_ngrams = has_near_match_substitutions_ngrams
@wraps(py_has_near_match_substitutions_ngrams)
def has_near_match_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
return substitutions_only_has_near_matches_ngrams_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
return py_has_near_match_substitutions_ngrams(
subsequence, sequence, max_substitutions)
py_find_near_matches_substitutions_ngrams = \
find_near_matches_substitutions_ngrams
@wraps(py_find_near_matches_substitutions_ngrams)
def find_near_matches_substitutions_ngrams(subsequence, sequence,
max_substitutions):
if not (
isinstance(subsequence, text_type) or
isinstance(sequence, text_type)
):
try:
results = _subs_only_fnm_ngram_byteslike(
subsequence, sequence, max_substitutions)
except TypeError:
pass
else:
matches = [
Match(
index,
index + len(subsequence),
count_differences_with_maximum(
sequence[index:index+len(subsequence)],
subsequence,
max_substitutions + 1,
),
matched=sequence[index:index + len(subsequence)],
)
for index in results
]
return [
get_best_match_in_group(group)
for group in group_matches(matches)
]
return py_find_near_matches_substitutions_ngrams(
subsequence, sequence, max_substitutions)
class SubstitutionsOnlySearch(FuzzySearchBase):
@classmethod
def search(cls, subsequence, sequence, search_params):
actual_max_subs = min(
x for x in [search_params.max_l_dist,
search_params.max_substitutions]
if x is not None
)
return find_near_matches_substitutions(subsequence, sequence,
actual_max_subs)
@classmethod
def extra_items_for_chunked_search(cls, subsequence, search_params):
return 0
| true
| true
|
f7089fc1652a59fe4bfd3a036437c82422a909ff
| 4,551
|
py
|
Python
|
opensanctions/crawlers/us_trade_csl.py
|
opensanctions/opensanctions
|
7dff9597f982d8918699b2cde3c7c337a941622d
|
[
"MIT"
] | 23
|
2022-02-09T12:50:36.000Z
|
2022-03-30T16:04:19.000Z
|
opensanctions/crawlers/us_trade_csl.py
|
opensanctions/opennames
|
39675797b0e70e71f54edff2b8e623e23aef9c15
|
[
"MIT"
] | 10
|
2022-02-03T08:44:03.000Z
|
2022-03-21T15:27:40.000Z
|
opensanctions/crawlers/us_trade_csl.py
|
opensanctions/opennames
|
39675797b0e70e71f54edff2b8e623e23aef9c15
|
[
"MIT"
] | 2
|
2022-02-16T11:51:05.000Z
|
2022-03-02T16:55:08.000Z
|
import json
from banal import ensure_list
from functools import cache
from pantomime.types import JSON
from requests.exceptions import RequestException
from opensanctions.core import Dataset, Context
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
@cache
def deref_url(context: Context, url):
try:
res = context.fetch_response(url)
return str(res.url)
except RequestException:
return url
def parse_result(context: Context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = context.make_slug(entity_number, dataset="us_ofac_sdn")
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
# TODO: make adjacent owner entity
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("provisions", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("authorityId", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
# TODO: deref
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
context.emit(sanction)
context.emit(entity, target=True)
h.audit_data(result, ignore=["standard_order"])
def crawl(context: Context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| 37
| 81
| 0.63964
|
import json
from banal import ensure_list
from functools import cache
from pantomime.types import JSON
from requests.exceptions import RequestException
from opensanctions.core import Dataset, Context
from opensanctions import helpers as h
FORMATS = ["%d %b %Y", "%d %B %Y", "%Y", "%b %Y", "%B %Y"]
@cache
def deref_url(context: Context, url):
try:
res = context.fetch_response(url)
return str(res.url)
except RequestException:
return url
def parse_result(context: Context, result):
type_ = result.pop("type", None)
schema = context.lookup_value("type", type_)
if schema is None:
context.log.error("Unknown result type", type=type_)
return
entity = context.make(schema)
entity.id = context.make_slug(result.pop("id"))
entity_number = result.pop("entity_number", None)
if entity_number is not None:
assert int(entity_number)
entity.id = context.make_slug(entity_number, dataset="us_ofac_sdn")
name = result.pop("name", None)
name = name.replace("and any successor, sub-unit, or subsidiary thereof", "")
entity.add("name", name)
for alias in ensure_list(result.pop("alt_names", "")):
entity.add("alias", alias.split("; "))
entity.add("notes", result.pop("remarks", None))
entity.add("country", result.pop("country", None))
if entity.schema.is_a("Person"):
entity.add("position", result.pop("title", None))
entity.add("nationality", result.pop("nationalities", None))
entity.add("nationality", result.pop("citizenships", None))
for dob in result.pop("dates_of_birth", []):
entity.add("birthDate", h.parse_date(dob, FORMATS))
entity.add("birthPlace", result.pop("places_of_birth", None))
elif entity.schema.is_a("Vessel"):
entity.add("flag", result.pop("vessel_flag", None))
entity.add("callSign", result.pop("call_sign", None))
entity.add("type", result.pop("vessel_type", None))
grt = result.pop("gross_registered_tonnage", None)
entity.add("grossRegisteredTonnage", grt)
gt = result.pop("gross_tonnage", None)
entity.add("tonnage", gt)
result.pop("vessel_owner", None)
assert result.pop("title", None) is None
assert not len(result.pop("nationalities", []))
assert not len(result.pop("citizenships", []))
assert not len(result.pop("dates_of_birth", []))
assert not len(result.pop("places_of_birth", []))
for address in result.pop("addresses", []):
obj = h.make_address(
context,
street=address.get("address"),
city=address.get("city"),
postal_code=address.get("postal_code"),
region=address.get("state"),
country=address.get("country"),
)
h.apply_address(context, entity, obj)
for ident in result.pop("ids", []):
country = ident.pop("country")
entity.add("country", country)
h.apply_feature(
context,
entity,
ident.pop("type"),
ident.pop("number"),
country=country,
date_formats=FORMATS,
start_date=ident.pop("issue_date", None),
end_date=ident.pop("expiration_date", None),
)
sanction = context.make("Sanction")
sanction.id = context.make_id(entity.id, "Sanction")
sanction.add("entity", entity)
sanction.add("program", result.pop("programs", []))
sanction.add("provisions", result.pop("license_policy", []))
sanction.add("reason", result.pop("license_requirement", []))
sanction.add("authorityId", result.pop("federal_register_notice", None))
sanction.add("startDate", result.pop("start_date", None))
sanction.add("endDate", result.pop("end_date", None))
sanction.add("country", "us")
sanction.add("authority", result.pop("source", None))
source_url = deref_url(context, result.pop("source_information_url"))
sanction.add("sourceUrl", source_url)
result.pop("source_list_url")
context.emit(sanction)
context.emit(entity, target=True)
h.audit_data(result, ignore=["standard_order"])
def crawl(context: Context):
path = context.fetch_resource("source.json", context.dataset.data.url)
context.export_resource(path, JSON, title=context.SOURCE_TITLE)
with open(path, "r") as file:
data = json.load(file)
for result in data.get("results"):
parse_result(context, result)
| true
| true
|
f7089fd57cec358e4874c6bc3d56e045107f7023
| 6,383
|
py
|
Python
|
lottery_ticket/foundations/trainer.py
|
mitchellgordon95/lottery-ticket-hypothesis
|
3b2abee4b1e9ba00fe8501ac86652e2604736405
|
[
"Apache-2.0"
] | 1
|
2019-06-05T03:13:48.000Z
|
2019-06-05T03:13:48.000Z
|
lottery_ticket/foundations/trainer.py
|
mitchellgordon95/lottery-ticket-hypothesis
|
3b2abee4b1e9ba00fe8501ac86652e2604736405
|
[
"Apache-2.0"
] | null | null | null |
lottery_ticket/foundations/trainer.py
|
mitchellgordon95/lottery-ticket-hypothesis
|
3b2abee4b1e9ba00fe8501ac86652e2604736405
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A function that trains a network on a dataset."""
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import save_restore
import tensorflow as tf
def train(sess, dataset, model, optimizer_fn, training_len, output_dir,
**params):
"""Train a model on a dataset.
Training continues until training_len iterations or epochs have taken place.
Args:
sess: A tensorflow session
dataset: The dataset on which to train (a child of dataset_base.DatasetBase)
model: The model to train (a child of model_base.ModelBase)
optimizer_fn: A function that, when called, returns an instance of an
optimizer object to be used to optimize the network.
training_len: A tuple whose first value is the unit of measure
("epochs" or "iterations") and whose second value is the number of
units for which the network should be trained.
output_dir: The directory to which any output should be saved.
**params: Other parameters.
save_summaries is whether to save summary data.
save_network is whether to save the network before and after training.
test_interval is None if the test set should not be evaluated; otherwise,
frequency (in iterations) at which the test set should be run.
validate_interval is analogous to test_interval.
Returns:
A dictionary containing the weights before training and the weights after
training, as well as the trained model.
"""
# Create initial session parameters.
optimize = optimizer_fn().minimize(model.loss)
sess.run(tf.global_variables_initializer())
initial_weights = model.get_current_weights(sess)
train_handle = dataset.get_train_handle(sess)
test_handle = dataset.get_test_handle(sess)
validate_handle = dataset.get_validate_handle(sess)
# Optional operations to perform before training.
if params.get('save_summaries', False):
writer = tf.summary.FileWriter(paths.summaries(output_dir))
train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')
test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')
validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')
if params.get('save_network', False):
save_restore.save_network(paths.initial(output_dir), initial_weights)
save_restore.save_network(paths.masks(output_dir), model.masks)
# Helper functions to collect and record summaries.
def record_summaries(iteration, records, fp):
"""Records summaries obtained from evaluating the network.
Args:
iteration: The current training iteration as an integer.
records: A list of records to be written.
fp: A file to which the records should be logged in an easier-to-parse
format than the tensorflow summary files.
"""
if params.get('save_summaries', False):
log = ['iteration', str(iteration)]
for record in records:
# Log to tensorflow summaries for tensorboard.
writer.add_summary(record, iteration)
# Log to text file for convenience.
summary_proto = tf.Summary()
summary_proto.ParseFromString(record)
value = summary_proto.value[0]
log += [value.tag, str(value.simple_value)]
fp.write(','.join(log) + '\n')
def collect_test_summaries(iteration):
if (params.get('save_summaries', False) and
'test_interval' in params and
iteration % params['test_interval'] == 0):
sess.run(dataset.test_initializer)
records = sess.run(model.test_summaries, {dataset.handle: test_handle})
record_summaries(iteration, records, test_file)
def collect_validate_summaries(iteration):
if (params.get('save_summaries', False) and
'validate_interval' in params and
iteration % params['validate_interval'] == 0):
sess.run(dataset.validate_initializer)
records = sess.run(model.validate_summaries,
{dataset.handle: validate_handle})
record_summaries(iteration, records, validate_file)
# Train for the specified number of epochs. This behavior is encapsulated
# in a function so that it is possible to break out of multiple loops
# simultaneously.
def training_loop():
"""The main training loop encapsulated in a function."""
iteration = 0
epoch = 0
last_train_acc = None
while True:
sess.run(dataset.train_initializer)
epoch += 1
# End training if we have passed the epoch limit.
if training_len[0] == 'epochs' and epoch > training_len[1]:
return last_train_acc
# One training epoch.
while True:
try:
iteration += 1
# End training if we have passed the iteration limit.
if training_len[0] == 'iterations' and iteration > training_len[1]:
return last_train_acc
# Train.
results = sess.run([optimize, model.accuracy] + model.train_summaries,
{dataset.handle: train_handle})
last_train_acc = results[1]
records = results[2:]
record_summaries(iteration, records, train_file)
# Collect test and validation data if applicable.
collect_test_summaries(iteration)
collect_validate_summaries(iteration)
# End of epoch handling.
except tf.errors.OutOfRangeError:
break
# Run the training loop.
final_train_acc = training_loop()
# Clean up.
if params.get('save_summaries', False):
train_file.close()
test_file.close()
validate_file.close()
# Retrieve the final weights of the model.
final_weights = model.get_current_weights(sess)
if params.get('save_network', False):
save_restore.save_network(paths.final(output_dir), final_weights)
return initial_weights, final_weights, final_train_acc
| 38.920732
| 80
| 0.704841
|
from lottery_ticket.foundations import paths
from lottery_ticket.foundations import save_restore
import tensorflow as tf
def train(sess, dataset, model, optimizer_fn, training_len, output_dir,
**params):
optimize = optimizer_fn().minimize(model.loss)
sess.run(tf.global_variables_initializer())
initial_weights = model.get_current_weights(sess)
train_handle = dataset.get_train_handle(sess)
test_handle = dataset.get_test_handle(sess)
validate_handle = dataset.get_validate_handle(sess)
if params.get('save_summaries', False):
writer = tf.summary.FileWriter(paths.summaries(output_dir))
train_file = tf.gfile.GFile(paths.log(output_dir, 'train'), 'w')
test_file = tf.gfile.GFile(paths.log(output_dir, 'test'), 'w')
validate_file = tf.gfile.GFile(paths.log(output_dir, 'validate'), 'w')
if params.get('save_network', False):
save_restore.save_network(paths.initial(output_dir), initial_weights)
save_restore.save_network(paths.masks(output_dir), model.masks)
def record_summaries(iteration, records, fp):
if params.get('save_summaries', False):
log = ['iteration', str(iteration)]
for record in records:
writer.add_summary(record, iteration)
summary_proto = tf.Summary()
summary_proto.ParseFromString(record)
value = summary_proto.value[0]
log += [value.tag, str(value.simple_value)]
fp.write(','.join(log) + '\n')
def collect_test_summaries(iteration):
if (params.get('save_summaries', False) and
'test_interval' in params and
iteration % params['test_interval'] == 0):
sess.run(dataset.test_initializer)
records = sess.run(model.test_summaries, {dataset.handle: test_handle})
record_summaries(iteration, records, test_file)
def collect_validate_summaries(iteration):
if (params.get('save_summaries', False) and
'validate_interval' in params and
iteration % params['validate_interval'] == 0):
sess.run(dataset.validate_initializer)
records = sess.run(model.validate_summaries,
{dataset.handle: validate_handle})
record_summaries(iteration, records, validate_file)
def training_loop():
iteration = 0
epoch = 0
last_train_acc = None
while True:
sess.run(dataset.train_initializer)
epoch += 1
if training_len[0] == 'epochs' and epoch > training_len[1]:
return last_train_acc
while True:
try:
iteration += 1
if training_len[0] == 'iterations' and iteration > training_len[1]:
return last_train_acc
results = sess.run([optimize, model.accuracy] + model.train_summaries,
{dataset.handle: train_handle})
last_train_acc = results[1]
records = results[2:]
record_summaries(iteration, records, train_file)
collect_test_summaries(iteration)
collect_validate_summaries(iteration)
except tf.errors.OutOfRangeError:
break
final_train_acc = training_loop()
if params.get('save_summaries', False):
train_file.close()
test_file.close()
validate_file.close()
final_weights = model.get_current_weights(sess)
if params.get('save_network', False):
save_restore.save_network(paths.final(output_dir), final_weights)
return initial_weights, final_weights, final_train_acc
| true
| true
|
f708a0ea971eb7efea305a6a5c363b305b1237e7
| 30,838
|
py
|
Python
|
tensorflow_probability/python/experimental/mcmc/windowed_sampling_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 3,670
|
2018-02-14T03:29:40.000Z
|
2022-03-30T01:19:52.000Z
|
tensorflow_probability/python/experimental/mcmc/windowed_sampling_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,395
|
2018-02-24T02:28:49.000Z
|
2022-03-31T16:12:06.000Z
|
tensorflow_probability/python/experimental/mcmc/windowed_sampling_test.py
|
jakee417/probability-1
|
ae7117f37ac441bc7a888167ea23e5e620c5bcde
|
[
"Apache-2.0"
] | 1,135
|
2018-02-14T01:51:10.000Z
|
2022-03-28T02:24:11.000Z
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the _License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for windowed sampling."""
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.experimental import distribute
from tensorflow_probability.python.experimental.mcmc import windowed_sampling
from tensorflow_probability.python.internal import callable_util
from tensorflow_probability.python.internal import distribute_test_lib
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal import unnest
JAX_MODE = False
tfb = tfp.bijectors
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root
NUM_SCHOOLS = 8 # number of schools
TREATMENT_EFFECTS = [28., 8, -3, 7, -1, 1, 18, 12]
TREATMENT_STDDEVS = [15., 10, 16, 11, 9, 11, 10, 18]
def eight_schools_coroutine():
@tfd.JointDistributionCoroutine
def model():
avg_effect = yield Root(tfd.Normal(0., 5., name='avg_effect'))
avg_stddev = yield Root(tfd.HalfNormal(5., name='avg_stddev'))
school_effects_std = yield Root(
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'))
yield tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')
return model
def eight_schools_sequential():
model = tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev'),
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
lambda school_effects_std, avg_stddev, avg_effect: tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')])
# pylint: enable=g-long-lambda
return model
def eight_schools_named():
model = tfd.JointDistributionNamed(
dict(
avg_effect=tfd.Normal(0., 5., name='avg_effect'),
avg_stddev=tfd.HalfNormal(5., name='avg_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
treatment_effects=lambda school_effects_std, avg_stddev, avg_effect:
tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
# pylint: enable=g-long-lambda
return model
def eight_schools_nested():
model = tfd.JointDistributionNamed(
dict(
effect_and_stddev=tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev')], name='effect_and_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
# pylint: disable=g-long-lambda
treatment_effects=lambda school_effects_std, effect_and_stddev:
tfd.Independent(
tfd.Normal(loc=(effect_and_stddev[0][..., tf.newaxis] +
effect_and_stddev[1][..., tf.newaxis] *
school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
# pylint: enable=g-long-lambda
return model
def _gen_gaussian_updating_example(x_dim, y_dim, seed):
"""An implementation of section 2.3.3 from [1].
We initialize a joint distribution
x ~ N(mu, Lambda^{-1})
y ~ N(Ax, L^{-1})
Then condition the model on an observation for y. We can test to confirm that
Cov(p(x | y_obs)) is near to
Sigma = (Lambda + A^T L A)^{-1}
This test can actually check whether the posterior samples have the proper
covariance, and whether the windowed tuning recovers 1 / diag(Sigma) as the
diagonal scaling factor.
References:
[1] Bishop, Christopher M. Pattern Recognition and Machine Learning.
Springer, 2006.
Args:
x_dim: int
y_dim: int
seed: PRNG seed; see `tfp.random.sanitize_seed` for details.
Returns:
(tfd.JointDistribution, tf.Tensor), representing the joint distribution
above, and the posterior variance.
"""
seeds = samplers.split_seed(seed, 6)
x_mean = samplers.normal((x_dim,), seed=seeds[0])
x_scale_diag = samplers.normal((x_dim,), seed=seeds[1])
y_scale_diag = samplers.normal((y_dim,), seed=seeds[2])
scale_mat = samplers.normal((y_dim, x_dim), seed=seeds[3])
y_shift = samplers.normal((y_dim,), seed=seeds[4])
@tfd.JointDistributionCoroutine
def model():
x = yield Root(tfd.MultivariateNormalDiag(
x_mean, scale_diag=x_scale_diag, name='x'))
yield tfd.MultivariateNormalDiag(
tf.linalg.matvec(scale_mat, x) + y_shift,
scale_diag=y_scale_diag,
name='y')
dists, _ = model.sample_distributions(seed=seeds[5])
precision_x = tf.linalg.inv(dists.x.covariance())
precision_y = tf.linalg.inv(dists.y.covariance())
true_cov = tf.linalg.inv(precision_x +
tf.linalg.matmul(
tf.linalg.matmul(scale_mat, precision_y,
transpose_a=True),
scale_mat))
return model, tf.linalg.diag_part(true_cov)
@test_util.test_graph_and_eager_modes
class WindowedSamplingTest(test_util.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_hmc_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function(autograph=False)
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
3, model, num_leapfrog_steps=2, num_adaptation_steps=21,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_nuts_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_nuts(
3, model, max_tree_depth=2, num_adaptation_steps=50,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
def test_hmc_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
400, model, num_leapfrog_steps=12, seed=seed,
**pins)
draws, _ = do_sample(test_util.test_seed())
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.5)
def test_nuts_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample():
return tfp.experimental.mcmc.windowed_adaptive_nuts(
200, model, max_tree_depth=5, seed=test_util.test_seed(),
**pins)
draws, _ = do_sample()
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.05)
@parameterized.named_parameters(
dict(testcase_name=f'_{num_draws}', num_draws=num_draws)
for num_draws in [0, 1, 500, 499, 100, 10000])
def test_get_window_sizes(self, num_draws):
[first_window,
slow_window,
last_window] = windowed_sampling._get_window_sizes(num_draws)
self.assertEqual(first_window +
slow_window +
2 * slow_window +
4 * slow_window +
8 * slow_window +
last_window, num_draws)
if num_draws == 500:
self.assertEqual(slow_window, 25)
self.assertEqual(first_window, 75)
self.assertEqual(last_window, 50)
def test_explicit_init(self):
sample_dist = tfd.JointDistributionSequential(
[tfd.HalfNormal(1., name=f'dist_{idx}') for idx in range(4)])
explicit_init = [tf.ones(20) for _ in range(3)]
_, init, bijector, _, _, _ = windowed_sampling._setup_mcmc(
model=sample_dist,
n_chains=[20],
init_position=explicit_init,
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllEqual(self.evaluate(init),
tf.convert_to_tensor(bijector(explicit_init)))
def test_explicit_init_samples(self):
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model = tfd.JointDistributionNamed({
'x': tfd.HalfNormal(1.),
'y': lambda x: tfd.Normal(0., x)})
init = {'x': tf.ones(64)}
return tfp.experimental.mcmc.windowed_adaptive_hmc(
10,
jd_model,
num_adaptation_steps=200,
current_state=init,
num_leapfrog_steps=5,
discard_tuning=False,
y=tf.constant(1.),
seed=stream(),
trace_fn=None)
self.evaluate(do_sample())
def test_valid_init(self):
class _HalfNormal(tfd.HalfNormal):
def _default_event_space_bijector(self):
# This bijector is intentionally mis-specified so that ~50% of
# initialiations will fail.
return tfb.Identity(validate_args=self.validate_args)
tough_dist = tfd.JointDistributionSequential(
[_HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
# Twenty chains with three parameters gives a 1 / 2^60 chance of
# initializing with a finite log probability by chance.
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=tough_dist,
n_chains=[20],
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllGreater(self.evaluate(init), 0.)
def test_extra_pins_not_required(self):
model = tfd.JointDistributionSequential([
tfd.Normal(0., 1., name='x'),
lambda x: tfd.Normal(x, 1., name='y')
])
pinned = model.experimental_pin(y=4.2)
# No explicit pins are passed, since the model is already pinned.
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=pinned, n_chains=[20],
seed=test_util.test_seed())
self.assertLen(init, 1)
def test_hmc_fitting_gaussian(self):
# See docstring to _gen_gaussian_updating_example
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
num_leapfrog_steps=16,
discard_tuning=False,
y=y_val,
seed=stream())
# Get the final scaling used for the mass matrix - this is a measure
# of how well the windowed adaptation recovered the true variance
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.15)
def test_nuts_fitting_gaussian(self):
# See docstring to _gen_gaussian_updating_example
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
# Compute everything in a function so it is consistent in graph mode
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
max_tree_depth=5,
discard_tuning=False,
y=y_val,
seed=stream())
# Get the final scaling used for the mass matrix - this is a measure
# of how well the windowed adaptation recovered the true variance
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.1, atol=1e-3)
def test_f64_step_size(self):
dist = tfd.JointDistributionSequential([
tfd.Normal(
tf.constant(0., dtype=tf.float64),
tf.constant(1., dtype=tf.float64))
])
(target_log_prob_fn, initial_transformed_position, _, _, _, _
) = windowed_sampling._setup_mcmc(
dist, n_chains=[5], init_position=None, seed=test_util.test_seed())
init_step_size = windowed_sampling._get_step_size(
initial_transformed_position, target_log_prob_fn)
self.assertDTypeEqual(init_step_size, np.float64)
self.assertAllFinite(init_step_size)
def test_batch_of_problems_autobatched(self):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros([10, 3]), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp.x.shape)
self.assertEqual((10, 4), samp.y.shape)
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
2, model.experimental_pin(y=samp.y), num_leapfrog_steps=3,
num_adaptation_steps=100, init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states.x.shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_batch_of_problems_named(self):
def mk_y(x):
return tfd.Multinomial(logits=tfb.Pad([(0, 1)])(x), total_count=10)
model = tfd.JointDistributionNamed(dict(
x=tfd.MultivariateNormalDiag(tf.zeros([10, 3]), tf.ones(3)),
y=mk_y))
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp['x'].shape)
self.assertEqual((10, 4), samp['y'].shape)
states, trace = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
2,
model.experimental_pin(y=samp['y']),
num_leapfrog_steps=3,
num_adaptation_steps=100,
init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states['x'].shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_bijector(self):
dist = tfd.JointDistributionSequential([tfd.Dirichlet(tf.ones(2))])
bij, _ = windowed_sampling._get_flat_unconstraining_bijector(dist)
draw = dist.sample(seed=test_util.test_seed())
self.assertAllCloseNested(bij.inverse(bij(draw)), draw)
@parameterized.named_parameters(*(
(f'{kind}_{n_chains}', kind, n_chains) # pylint: disable=g-complex-comprehension
for kind in ('hmc', 'nuts') for n_chains in ([], 3, [2, 1], [2, 2, 2])))
def test_batches_of_chains(self, kind, n_chains):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros(3), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
5, model.experimental_pin(y=samp.y), n_chains=n_chains,
num_leapfrog_steps=3, num_adaptation_steps=100,
seed=test_util.test_seed()))
if isinstance(n_chains, int):
n_chains = [n_chains]
self.assertEqual((5, *n_chains, 3), states.x.shape)
self.assertEqual((5,), trace['step_size'].shape)
def test_dynamic_batch_shape(self):
"""Test correct handling of `TensorShape(None)`."""
if JAX_MODE:
self.skipTest('b/203858802')
n_features = 5
n_timepoints = 100
features = tfd.Normal(0., 1.).sample([100, n_features],
test_util.test_seed())
ar_sigma = 1.
rho = .25
@tfd.JointDistributionCoroutine
def jd_model():
beta = yield Root(tfd.Sample(tfd.Normal(0., 1.), n_features))
yhat = tf.einsum('ij,...j->...i', features, beta)
def ar_fun(y):
loc = tf.concat([tf.zeros_like(y[..., :1]), y[..., :-1]], axis=-1)
return tfd.Independent(
tfd.Normal(loc=loc * rho, scale=ar_sigma),
reinterpreted_batch_ndims=1)
# Autoregressive distribution defined as below introduce a batch shape:
# TensorShape(None)
yield tfd.Autoregressive(
distribution_fn=ar_fun,
sample0=tf.zeros_like(yhat),
num_steps=yhat.shape[-1],
name='y')
states, _ = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_nuts(
2,
jd_model,
num_adaptation_steps=25,
n_chains=3,
seed=test_util.test_seed()))
self.assertEqual((2, 3, n_timepoints), states.y.shape)
@parameterized.named_parameters(
('_nuts', tfp.experimental.mcmc.windowed_adaptive_nuts, {}),
('_hmc', tfp.experimental.mcmc.windowed_adaptive_hmc, {
'num_leapfrog_steps': 1
}),
)
def test_f64_state(self, method, method_kwargs):
states, _ = callable_util.get_output_spec(lambda: method( # pylint: disable=g-long-lambda
5,
tfd.Normal(tf.constant(0., tf.float64), 1.),
n_chains=2,
num_adaptation_steps=100,
seed=test_util.test_seed(),
**method_kwargs))
self.assertEqual(tf.float64, states.dtype)
@test_util.test_graph_and_eager_modes
class WindowedSamplingStepSizeTest(test_util.TestCase):
def test_supply_full_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': tf.reshape(tf.linspace(1., 2., 3), (3, 1)),
'b': tf.reshape(tf.linspace(1., 2., 9), (3, 3))}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
# Gets a newaxis because step size needs to have an event dimension.
self.assertAllCloseNested([init_step_size['a'],
init_step_size['b']],
[j[0] for j in actual_step_size])
def test_supply_partial_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': 1., 'b': 2.}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
actual_step = [j[0] for j in actual_step_size]
expected_step = [1., 2.]
self.assertAllCloseNested(expected_step, actual_step)
def test_supply_single_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = 1.
_, traced_step_size = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=20,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream()))
self.assertEqual((25 + 1,), traced_step_size.shape)
self.assertAllClose(1., traced_step_size[0])
def test_sequential_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionSequential(
[tfd.HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
init_step_size = [1., 2., 3.]
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
dist_3=tf.constant(1.),
seed=stream(),
)
self.assertAllCloseNested(init_step_size,
[j[0] for j in actual_step_size])
def _beta_binomial(trials):
"""Returns a function that constructs a beta binomial distribution."""
def _beta_binomial_distribution(mean, inverse_concentration):
"""Returns a beta binomial distribution with the given parameters."""
# Mean and inverse concentration are broadcast across days.
mean = mean[..., tf.newaxis]
inverse_concentration = inverse_concentration[..., tf.newaxis]
beta_binomial = tfd.BetaBinomial(
total_count=trials,
concentration0=(1 - mean) / inverse_concentration,
concentration1=mean / inverse_concentration)
return tfd.Independent(beta_binomial, reinterpreted_batch_ndims=2)
return _beta_binomial_distribution
def get_joint_distribution(
trials,
mean_prior=lambda: tfd.Uniform(0., 1.),
inverse_concentration_prior=lambda: tfd.HalfNormal(5.)):
"""Returns a joint distribution over parameters and successes."""
param_shape = ps.shape(trials)[:1]
mean = tfd.Sample(mean_prior(), param_shape)
inverse_concentration = tfd.Sample(inverse_concentration_prior(), param_shape)
return tfd.JointDistributionNamed(
dict(mean=mean,
inverse_concentration=inverse_concentration,
successes=_beta_binomial(trials)),
name='jd')
class PrecompiledTest(test_util.TestCase):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_base_kernel(self, kind):
self.skip_if_no_xla()
self.skipTest('b/195070752') # Test is broken by cl/393807414.
if JAX_MODE:
input_signature = None
else:
input_signature = (
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='trials'),
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='successes'),
tf.TensorSpec(
shape=[2], dtype=tf.int32, name='seed'))
@tf.function(jit_compile=True, input_signature=input_signature)
def do(trials, successes, seed):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=9,
joint_dist=get_joint_distribution(trials),
kind=kind,
n_chains=11,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=50,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
chain_axis_names=None,
seed=seed,
successes=successes)
self.evaluate(do(self.trials + 0., self.true_values['successes'],
test_util.test_seed(sampler_type='stateless')))
if JAX_MODE:
# TF runs into the `merge_call` error here (b/181800108).
@test_util.disable_test_for_backend(
disable_numpy=True,
reason='Sharding not available for NumPy backend.')
class DistributedTest(distribute_test_lib.DistributedTest):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
def test_can_extract_shard_axis_names_from_model(self):
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do():
_, _, _, _, _, shard_axis_names = windowed_sampling._setup_mcmc(
model=joint_dist,
n_chains=[20],
seed=test_util.test_seed(), z=1.)
# _setup_mcmc will flatten the distribution
self.assertListEqual(shard_axis_names, [[], ['i']])
self.strategy_run(do, args=(), in_axes=None)
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_data_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=None,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=(None, 0),
args=(samplers.zeros_seed(), self.shard_values(
tf.ones(distribute_test_lib.NUM_DEVICES)))))
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_chain_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = tfd.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: tfd.Sample(tfd.Normal(x, 1.), 4),
z=lambda y: tfd.Independent(tfd.Normal(y, 1.), 1)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=self.axis_name,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=None,
args=(samplers.zeros_seed(),
tf.ones(distribute_test_lib.NUM_DEVICES))))
if __name__ == '__main__':
test_util.main()
| 35.983664
| 94
| 0.646994
|
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.experimental import distribute
from tensorflow_probability.python.experimental.mcmc import windowed_sampling
from tensorflow_probability.python.internal import callable_util
from tensorflow_probability.python.internal import distribute_test_lib
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.internal import unnest
JAX_MODE = False
tfb = tfp.bijectors
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root
NUM_SCHOOLS = 8
TREATMENT_EFFECTS = [28., 8, -3, 7, -1, 1, 18, 12]
TREATMENT_STDDEVS = [15., 10, 16, 11, 9, 11, 10, 18]
def eight_schools_coroutine():
@tfd.JointDistributionCoroutine
def model():
avg_effect = yield Root(tfd.Normal(0., 5., name='avg_effect'))
avg_stddev = yield Root(tfd.HalfNormal(5., name='avg_stddev'))
school_effects_std = yield Root(
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'))
yield tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')
return model
def eight_schools_sequential():
model = tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev'),
tfd.Sample(tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
lambda school_effects_std, avg_stddev, avg_effect: tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')])
return model
def eight_schools_named():
model = tfd.JointDistributionNamed(
dict(
avg_effect=tfd.Normal(0., 5., name='avg_effect'),
avg_stddev=tfd.HalfNormal(5., name='avg_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
treatment_effects=lambda school_effects_std, avg_stddev, avg_effect:
tfd.Independent(
tfd.Normal(loc=(avg_effect[..., tf.newaxis] +
avg_stddev[..., tf.newaxis] * school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
return model
def eight_schools_nested():
model = tfd.JointDistributionNamed(
dict(
effect_and_stddev=tfd.JointDistributionSequential([
tfd.Normal(0., 5., name='avg_effect'),
tfd.HalfNormal(5., name='avg_stddev')], name='effect_and_stddev'),
school_effects_std=tfd.Sample(
tfd.Normal(0., 1.), NUM_SCHOOLS, name='school_effects_std'),
treatment_effects=lambda school_effects_std, effect_and_stddev:
tfd.Independent(
tfd.Normal(loc=(effect_and_stddev[0][..., tf.newaxis] +
effect_and_stddev[1][..., tf.newaxis] *
school_effects_std),
scale=tf.constant(TREATMENT_STDDEVS)),
reinterpreted_batch_ndims=1,
name='treatment_effects')))
return model
def _gen_gaussian_updating_example(x_dim, y_dim, seed):
seeds = samplers.split_seed(seed, 6)
x_mean = samplers.normal((x_dim,), seed=seeds[0])
x_scale_diag = samplers.normal((x_dim,), seed=seeds[1])
y_scale_diag = samplers.normal((y_dim,), seed=seeds[2])
scale_mat = samplers.normal((y_dim, x_dim), seed=seeds[3])
y_shift = samplers.normal((y_dim,), seed=seeds[4])
@tfd.JointDistributionCoroutine
def model():
x = yield Root(tfd.MultivariateNormalDiag(
x_mean, scale_diag=x_scale_diag, name='x'))
yield tfd.MultivariateNormalDiag(
tf.linalg.matvec(scale_mat, x) + y_shift,
scale_diag=y_scale_diag,
name='y')
dists, _ = model.sample_distributions(seed=seeds[5])
precision_x = tf.linalg.inv(dists.x.covariance())
precision_y = tf.linalg.inv(dists.y.covariance())
true_cov = tf.linalg.inv(precision_x +
tf.linalg.matmul(
tf.linalg.matmul(scale_mat, precision_y,
transpose_a=True),
scale_mat))
return model, tf.linalg.diag_part(true_cov)
@test_util.test_graph_and_eager_modes
class WindowedSamplingTest(test_util.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_hmc_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function(autograph=False)
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
3, model, num_leapfrog_steps=2, num_adaptation_steps=21,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
@parameterized.named_parameters(
dict(testcase_name='_' + fn.__name__, model_fn=fn) for fn in
[eight_schools_coroutine, eight_schools_named, eight_schools_sequential,
eight_schools_nested])
def test_nuts_type_checks(self, model_fn):
model = model_fn()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_nuts(
3, model, max_tree_depth=2, num_adaptation_steps=50,
seed=seed, **pins)
draws, _ = do_sample(test_util.test_seed())
self.evaluate(draws)
def test_hmc_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample(seed):
return tfp.experimental.mcmc.windowed_adaptive_hmc(
400, model, num_leapfrog_steps=12, seed=seed,
**pins)
draws, _ = do_sample(test_util.test_seed())
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.5)
def test_nuts_samples_well(self):
model = eight_schools_named()
pins = {'treatment_effects': tf.constant(TREATMENT_EFFECTS)}
@tf.function
def do_sample():
return tfp.experimental.mcmc.windowed_adaptive_nuts(
200, model, max_tree_depth=5, seed=test_util.test_seed(),
**pins)
draws, _ = do_sample()
flat_draws = tf.nest.flatten(
model.experimental_pin(**pins)._model_flatten(draws))
max_scale_reduction = tf.reduce_max(
tf.nest.map_structure(tf.reduce_max,
tfp.mcmc.potential_scale_reduction(flat_draws)))
self.assertLess(self.evaluate(max_scale_reduction), 1.05)
@parameterized.named_parameters(
dict(testcase_name=f'_{num_draws}', num_draws=num_draws)
for num_draws in [0, 1, 500, 499, 100, 10000])
def test_get_window_sizes(self, num_draws):
[first_window,
slow_window,
last_window] = windowed_sampling._get_window_sizes(num_draws)
self.assertEqual(first_window +
slow_window +
2 * slow_window +
4 * slow_window +
8 * slow_window +
last_window, num_draws)
if num_draws == 500:
self.assertEqual(slow_window, 25)
self.assertEqual(first_window, 75)
self.assertEqual(last_window, 50)
def test_explicit_init(self):
sample_dist = tfd.JointDistributionSequential(
[tfd.HalfNormal(1., name=f'dist_{idx}') for idx in range(4)])
explicit_init = [tf.ones(20) for _ in range(3)]
_, init, bijector, _, _, _ = windowed_sampling._setup_mcmc(
model=sample_dist,
n_chains=[20],
init_position=explicit_init,
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllEqual(self.evaluate(init),
tf.convert_to_tensor(bijector(explicit_init)))
def test_explicit_init_samples(self):
stream = test_util.test_seed_stream()
@tf.function
def do_sample():
jd_model = tfd.JointDistributionNamed({
'x': tfd.HalfNormal(1.),
'y': lambda x: tfd.Normal(0., x)})
init = {'x': tf.ones(64)}
return tfp.experimental.mcmc.windowed_adaptive_hmc(
10,
jd_model,
num_adaptation_steps=200,
current_state=init,
num_leapfrog_steps=5,
discard_tuning=False,
y=tf.constant(1.),
seed=stream(),
trace_fn=None)
self.evaluate(do_sample())
def test_valid_init(self):
class _HalfNormal(tfd.HalfNormal):
def _default_event_space_bijector(self):
return tfb.Identity(validate_args=self.validate_args)
tough_dist = tfd.JointDistributionSequential(
[_HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=tough_dist,
n_chains=[20],
seed=test_util.test_seed(),
dist_3=1.)
self.assertAllGreater(self.evaluate(init), 0.)
def test_extra_pins_not_required(self):
model = tfd.JointDistributionSequential([
tfd.Normal(0., 1., name='x'),
lambda x: tfd.Normal(x, 1., name='y')
])
pinned = model.experimental_pin(y=4.2)
_, init, _, _, _, _ = windowed_sampling._setup_mcmc(
model=pinned, n_chains=[20],
seed=test_util.test_seed())
self.assertLen(init, 1)
def test_hmc_fitting_gaussian(self):
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
num_leapfrog_steps=16,
discard_tuning=False,
y=y_val,
seed=stream())
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.15)
def test_nuts_fitting_gaussian(self):
x_dim = 3
y_dim = 12
stream = test_util.test_seed_stream()
@tf.function
def do_sample():
jd_model, true_var = _gen_gaussian_updating_example(
x_dim, y_dim, stream())
y_val = jd_model.sample(seed=stream()).y
_, trace = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
n_chains=1,
num_adaptation_steps=10000,
max_tree_depth=5,
discard_tuning=False,
y=y_val,
seed=stream())
final_scaling = 1. / trace['variance_scaling'][0][-1, 0, :]
return final_scaling, true_var
final_scaling, true_var = do_sample()
self.assertAllClose(true_var, final_scaling, rtol=0.1, atol=1e-3)
def test_f64_step_size(self):
dist = tfd.JointDistributionSequential([
tfd.Normal(
tf.constant(0., dtype=tf.float64),
tf.constant(1., dtype=tf.float64))
])
(target_log_prob_fn, initial_transformed_position, _, _, _, _
) = windowed_sampling._setup_mcmc(
dist, n_chains=[5], init_position=None, seed=test_util.test_seed())
init_step_size = windowed_sampling._get_step_size(
initial_transformed_position, target_log_prob_fn)
self.assertDTypeEqual(init_step_size, np.float64)
self.assertAllFinite(init_step_size)
def test_batch_of_problems_autobatched(self):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros([10, 3]), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp.x.shape)
self.assertEqual((10, 4), samp.y.shape)
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
2, model.experimental_pin(y=samp.y), num_leapfrog_steps=3,
num_adaptation_steps=100, init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states.x.shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_batch_of_problems_named(self):
def mk_y(x):
return tfd.Multinomial(logits=tfb.Pad([(0, 1)])(x), total_count=10)
model = tfd.JointDistributionNamed(dict(
x=tfd.MultivariateNormalDiag(tf.zeros([10, 3]), tf.ones(3)),
y=mk_y))
samp = model.sample(seed=test_util.test_seed())
self.assertEqual((10, 3), samp['x'].shape)
self.assertEqual((10, 4), samp['y'].shape)
states, trace = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
2,
model.experimental_pin(y=samp['y']),
num_leapfrog_steps=3,
num_adaptation_steps=100,
init_step_size=tf.ones([10, 1]),
seed=test_util.test_seed()))
self.assertEqual((2, 64, 10, 3), states['x'].shape)
self.assertEqual((2, 10, 1), trace['step_size'].shape)
def test_bijector(self):
dist = tfd.JointDistributionSequential([tfd.Dirichlet(tf.ones(2))])
bij, _ = windowed_sampling._get_flat_unconstraining_bijector(dist)
draw = dist.sample(seed=test_util.test_seed())
self.assertAllCloseNested(bij.inverse(bij(draw)), draw)
@parameterized.named_parameters(*(
(f'{kind}_{n_chains}', kind, n_chains)
for kind in ('hmc', 'nuts') for n_chains in ([], 3, [2, 1], [2, 2, 2])))
def test_batches_of_chains(self, kind, n_chains):
def model_fn():
x = yield tfd.MultivariateNormalDiag(
tf.zeros(3), tf.ones(3), name='x')
yield tfd.Multinomial(
logits=tfb.Pad([(0, 1)])(x), total_count=10, name='y')
model = tfd.JointDistributionCoroutineAutoBatched(model_fn, batch_ndims=1)
samp = model.sample(seed=test_util.test_seed())
states, trace = self.evaluate(tfp.experimental.mcmc.windowed_adaptive_hmc(
5, model.experimental_pin(y=samp.y), n_chains=n_chains,
num_leapfrog_steps=3, num_adaptation_steps=100,
seed=test_util.test_seed()))
if isinstance(n_chains, int):
n_chains = [n_chains]
self.assertEqual((5, *n_chains, 3), states.x.shape)
self.assertEqual((5,), trace['step_size'].shape)
def test_dynamic_batch_shape(self):
if JAX_MODE:
self.skipTest('b/203858802')
n_features = 5
n_timepoints = 100
features = tfd.Normal(0., 1.).sample([100, n_features],
test_util.test_seed())
ar_sigma = 1.
rho = .25
@tfd.JointDistributionCoroutine
def jd_model():
beta = yield Root(tfd.Sample(tfd.Normal(0., 1.), n_features))
yhat = tf.einsum('ij,...j->...i', features, beta)
def ar_fun(y):
loc = tf.concat([tf.zeros_like(y[..., :1]), y[..., :-1]], axis=-1)
return tfd.Independent(
tfd.Normal(loc=loc * rho, scale=ar_sigma),
reinterpreted_batch_ndims=1)
yield tfd.Autoregressive(
distribution_fn=ar_fun,
sample0=tf.zeros_like(yhat),
num_steps=yhat.shape[-1],
name='y')
states, _ = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_nuts(
2,
jd_model,
num_adaptation_steps=25,
n_chains=3,
seed=test_util.test_seed()))
self.assertEqual((2, 3, n_timepoints), states.y.shape)
@parameterized.named_parameters(
('_nuts', tfp.experimental.mcmc.windowed_adaptive_nuts, {}),
('_hmc', tfp.experimental.mcmc.windowed_adaptive_hmc, {
'num_leapfrog_steps': 1
}),
)
def test_f64_state(self, method, method_kwargs):
states, _ = callable_util.get_output_spec(lambda: method(
5,
tfd.Normal(tf.constant(0., tf.float64), 1.),
n_chains=2,
num_adaptation_steps=100,
seed=test_util.test_seed(),
**method_kwargs))
self.assertEqual(tf.float64, states.dtype)
@test_util.test_graph_and_eager_modes
class WindowedSamplingStepSizeTest(test_util.TestCase):
def test_supply_full_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': tf.reshape(tf.linspace(1., 2., 3), (3, 1)),
'b': tf.reshape(tf.linspace(1., 2., 9), (3, 3))}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
self.assertAllCloseNested([init_step_size['a'],
init_step_size['b']],
[j[0] for j in actual_step_size])
def test_supply_partial_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = {'a': 1., 'b': 2.}
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream(),
)
actual_step = [j[0] for j in actual_step_size]
expected_step = [1., 2.]
self.assertAllCloseNested(expected_step, actual_step)
def test_supply_single_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionNamed({
'a': tfd.Normal(0., 1.),
'b': tfd.MultivariateNormalDiag(
loc=tf.zeros(3), scale_diag=tf.constant([1., 2., 3.]))
})
init_step_size = 1.
_, traced_step_size = self.evaluate(
tfp.experimental.mcmc.windowed_adaptive_hmc(
1,
jd_model,
num_adaptation_steps=25,
n_chains=20,
init_step_size=init_step_size,
num_leapfrog_steps=5,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
seed=stream()))
self.assertEqual((25 + 1,), traced_step_size.shape)
self.assertAllClose(1., traced_step_size[0])
def test_sequential_step_size(self):
stream = test_util.test_seed_stream()
jd_model = tfd.JointDistributionSequential(
[tfd.HalfNormal(scale=1., name=f'dist_{idx}') for idx in range(4)])
init_step_size = [1., 2., 3.]
_, actual_step_size = tfp.experimental.mcmc.windowed_adaptive_nuts(
1,
jd_model,
num_adaptation_steps=25,
n_chains=3,
init_step_size=init_step_size,
discard_tuning=False,
trace_fn=lambda *args: unnest.get_innermost(args[-1], 'step_size'),
dist_3=tf.constant(1.),
seed=stream(),
)
self.assertAllCloseNested(init_step_size,
[j[0] for j in actual_step_size])
def _beta_binomial(trials):
def _beta_binomial_distribution(mean, inverse_concentration):
mean = mean[..., tf.newaxis]
inverse_concentration = inverse_concentration[..., tf.newaxis]
beta_binomial = tfd.BetaBinomial(
total_count=trials,
concentration0=(1 - mean) / inverse_concentration,
concentration1=mean / inverse_concentration)
return tfd.Independent(beta_binomial, reinterpreted_batch_ndims=2)
return _beta_binomial_distribution
def get_joint_distribution(
trials,
mean_prior=lambda: tfd.Uniform(0., 1.),
inverse_concentration_prior=lambda: tfd.HalfNormal(5.)):
param_shape = ps.shape(trials)[:1]
mean = tfd.Sample(mean_prior(), param_shape)
inverse_concentration = tfd.Sample(inverse_concentration_prior(), param_shape)
return tfd.JointDistributionNamed(
dict(mean=mean,
inverse_concentration=inverse_concentration,
successes=_beta_binomial(trials)),
name='jd')
class PrecompiledTest(test_util.TestCase):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_base_kernel(self, kind):
self.skip_if_no_xla()
self.skipTest('b/195070752')
if JAX_MODE:
input_signature = None
else:
input_signature = (
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='trials'),
tf.TensorSpec(
shape=[None, None], dtype=tf.float32, name='successes'),
tf.TensorSpec(
shape=[2], dtype=tf.int32, name='seed'))
@tf.function(jit_compile=True, input_signature=input_signature)
def do(trials, successes, seed):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=9,
joint_dist=get_joint_distribution(trials),
kind=kind,
n_chains=11,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=50,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
chain_axis_names=None,
seed=seed,
successes=successes)
self.evaluate(do(self.trials + 0., self.true_values['successes'],
test_util.test_seed(sampler_type='stateless')))
if JAX_MODE:
@test_util.disable_test_for_backend(
disable_numpy=True,
reason='Sharding not available for NumPy backend.')
class DistributedTest(distribute_test_lib.DistributedTest):
def setUp(self):
super().setUp()
arms = 2
days = 3
seed = test_util.test_seed()
trial_seed, value_seed = tfp.random.split_seed(seed)
self.trials = tfd.Poisson(100.).sample([arms, days], seed=trial_seed)
dist = get_joint_distribution(self.trials)
self.true_values = dist.sample(seed=value_seed)
def nuts_kwargs(self):
return {'max_tree_depth': 2}
def hmc_kwargs(self):
return {'num_leapfrog_steps': 3, 'store_parameters_in_results': True}
def test_can_extract_shard_axis_names_from_model(self):
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do():
_, _, _, _, _, shard_axis_names = windowed_sampling._setup_mcmc(
model=joint_dist,
n_chains=[20],
seed=test_util.test_seed(), z=1.)
self.assertListEqual(shard_axis_names, [[], ['i']])
self.strategy_run(do, args=(), in_axes=None)
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_data_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = distribute.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: distribute.Sharded(tfd.Normal(x, 1.), self.axis_name),
z=lambda y: distribute.Sharded(tfd.Normal(y, 1.), self.axis_name)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=None,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=(None, 0),
args=(samplers.zeros_seed(), self.shard_values(
tf.ones(distribute_test_lib.NUM_DEVICES)))))
@parameterized.named_parameters(('hmc_jit_sig', 'hmc'),
('nuts_jit_sig', 'nuts'))
def test_chain_sharding(self, kind):
self.skip_if_no_xla()
joint_dist = tfd.JointDistributionNamed(dict(
x=tfd.Normal(0., 1.),
y=lambda x: tfd.Sample(tfd.Normal(x, 1.), 4),
z=lambda y: tfd.Independent(tfd.Normal(y, 1.), 1)
))
def do(seed, z):
if kind == 'hmc':
proposal_kernel_kwargs = self.hmc_kwargs()
else:
proposal_kernel_kwargs = self.nuts_kwargs()
return windowed_sampling._windowed_adaptive_impl(
n_draws=10,
joint_dist=joint_dist,
kind=kind,
n_chains=2,
proposal_kernel_kwargs=proposal_kernel_kwargs,
num_adaptation_steps=21,
current_state=None,
dual_averaging_kwargs={'target_accept_prob': 0.76},
trace_fn=None,
return_final_kernel_results=False,
discard_tuning=True,
seed=seed,
chain_axis_names=self.axis_name,
z=z)
self.evaluate(self.strategy_run(
do,
in_axes=None,
args=(samplers.zeros_seed(),
tf.ones(distribute_test_lib.NUM_DEVICES))))
if __name__ == '__main__':
test_util.main()
| true
| true
|
f708a2f9c97d4edc9089d4d6c7b978043dd53de3
| 4,813
|
py
|
Python
|
s3prl/downstream/voxceleb1/expert.py
|
andybi7676/s3prl
|
0e5acc5d499a629f946d561d87e8924ba3eb004b
|
[
"MIT"
] | 3
|
2021-08-07T19:12:56.000Z
|
2022-03-29T15:16:31.000Z
|
s3prl/downstream/voxceleb1/expert.py
|
andybi7676/s3prl
|
0e5acc5d499a629f946d561d87e8924ba3eb004b
|
[
"MIT"
] | 2
|
2021-07-28T20:35:59.000Z
|
2021-07-30T16:01:53.000Z
|
s3prl/downstream/voxceleb1/expert.py
|
andybi7676/s3prl
|
0e5acc5d499a629f946d561d87e8924ba3eb004b
|
[
"MIT"
] | 2
|
2021-07-21T11:05:26.000Z
|
2021-07-22T09:46:38.000Z
|
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ expert.py ]
# Synopsis [ the phone linear downstream wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import math
import torch
import random
import pathlib
#-------------#
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, DistributedSampler
from torch.distributed import is_initialized
from torch.nn.utils.rnn import pad_sequence
#-------------#
from ..model import *
from .dataset import SpeakerClassifiDataset
from argparse import Namespace
from pathlib import Path
class DownstreamExpert(nn.Module):
"""
Used to handle downstream-specific operations
eg. downstream forward, metric computation, contents to log
"""
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(
input_dim = self.modelrc['projector_dim'],
output_dim = self.train_dataset.speaker_num,
**model_conf,
)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = DistributedSampler(dataset) if is_initialized() else None
return DataLoader(
dataset, batch_size=self.datarc['train_batch_size'],
shuffle=(sampler is None), sampler=sampler,
num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def _get_eval_dataloader(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
# Interface
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
# Interface
def forward(self, mode, features, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
predicted, _ = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=-1).indices
records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
# interface
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key, values in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(
f'voxceleb1/{mode}-{key}',
average,
global_step=global_step
)
with open(self.logging, 'a') as f:
if key == 'acc':
f.write(f'{mode} at step {global_step}: {average}\n')
if mode == 'dev' and average > self.best_score:
self.best_score = torch.ones(1) * average
f.write(f'New best on {mode} at step {global_step}: {average}\n')
save_names.append(f'{mode}-best.ckpt')
return save_names
| 37.897638
| 125
| 0.606898
|
torch.nn.utils.rnn import pad_sequence
from ..model import *
from .dataset import SpeakerClassifiDataset
from argparse import Namespace
from pathlib import Path
class DownstreamExpert(nn.Module):
def __init__(self, upstream_dim, downstream_expert, expdir, **kwargs):
super(DownstreamExpert, self).__init__()
self.upstream_dim = upstream_dim
self.downstream = downstream_expert
self.datarc = downstream_expert['datarc']
self.modelrc = downstream_expert['modelrc']
root_dir = Path(self.datarc['file_path'])
self.train_dataset = SpeakerClassifiDataset('train', root_dir, self.datarc['meta_data'], self.datarc['max_timestep'])
self.dev_dataset = SpeakerClassifiDataset('dev', root_dir, self.datarc['meta_data'])
self.test_dataset = SpeakerClassifiDataset('test', root_dir, self.datarc['meta_data'])
model_cls = eval(self.modelrc['select'])
model_conf = self.modelrc.get(self.modelrc['select'], {})
self.projector = nn.Linear(upstream_dim, self.modelrc['projector_dim'])
self.model = model_cls(
input_dim = self.modelrc['projector_dim'],
output_dim = self.train_dataset.speaker_num,
**model_conf,
)
self.objective = nn.CrossEntropyLoss()
self.logging = os.path.join(expdir, 'log.log')
self.register_buffer('best_score', torch.zeros(1))
def _get_train_dataloader(self, dataset):
sampler = DistributedSampler(dataset) if is_initialized() else None
return DataLoader(
dataset, batch_size=self.datarc['train_batch_size'],
shuffle=(sampler is None), sampler=sampler,
num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def _get_eval_dataloader(self, dataset):
return DataLoader(
dataset, batch_size=self.datarc['eval_batch_size'],
shuffle=False, num_workers=self.datarc['num_workers'],
collate_fn=dataset.collate_fn
)
def get_train_dataloader(self):
return self._get_train_dataloader(self.train_dataset)
def get_dev_dataloader(self):
return self._get_eval_dataloader(self.dev_dataset)
def get_test_dataloader(self):
return self._get_eval_dataloader(self.test_dataset)
def get_dataloader(self, mode):
return eval(f'self.get_{mode}_dataloader')()
def forward(self, mode, features, labels, records, **kwargs):
device = features[0].device
features_len = torch.IntTensor([len(feat) for feat in features]).to(device=device)
features = pad_sequence(features, batch_first=True)
features = self.projector(features)
predicted, _ = self.model(features, features_len)
labels = torch.LongTensor(labels).to(features.device)
loss = self.objective(predicted, labels)
predicted_classid = predicted.max(dim=-1).indices
records['acc'] += (predicted_classid == labels).view(-1).cpu().float().tolist()
records['loss'].append(loss.item())
return loss
def log_records(self, mode, records, logger, global_step, **kwargs):
save_names = []
for key, values in records.items():
average = torch.FloatTensor(values).mean().item()
logger.add_scalar(
f'voxceleb1/{mode}-{key}',
average,
global_step=global_step
)
with open(self.logging, 'a') as f:
if key == 'acc':
f.write(f'{mode} at step {global_step}: {average}\n')
if mode == 'dev' and average > self.best_score:
self.best_score = torch.ones(1) * average
f.write(f'New best on {mode} at step {global_step}: {average}\n')
save_names.append(f'{mode}-best.ckpt')
return save_names
| true
| true
|
f708a3ecd84ccc0740adfb8ca7ccc4b6f5725a25
| 1,031
|
gyp
|
Python
|
Dependencies/gyp-master/test/win/shard/shard_ref.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/win/shard/shard_ref.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
Dependencies/gyp-master/test/win/shard/shard_ref.gyp
|
knight666/exlibris
|
b21b46e0c84e5c4f81f8048022cda88e7bb3dca2
|
[
"MIT"
] | null | null | null |
# Copyright 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'refs_to_shard_external_lib',
'type': 'static_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_exe',
'type': 'executable',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_dll',
'type': 'shared_library',
'dependencies': [
# Make sure references in other files are updated correctly.
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
]
}
| 24.547619
| 73
| 0.531523
|
{
'targets': [
{
'target_name': 'refs_to_shard_external_lib',
'type': 'static_library',
'dependencies': [
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_exe',
'type': 'executable',
'dependencies': [
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
{
'target_name': 'refs_to_shard_external_dll',
'type': 'shared_library',
'dependencies': [
'shard.gyp:shard',
],
'sources': [
'hello.cc',
],
},
]
}
| true
| true
|
f708a40a4eec1b5655f85df4fa2c76a70a93f433
| 2,231
|
py
|
Python
|
singa_easy/modules/mod_modelslicing/utils/lr_scheduler.py
|
arielclj/singa-easy
|
fd4bc601a5501062936f874df14711a3cefa1346
|
[
"Apache-2.0"
] | 6
|
2020-04-28T16:57:15.000Z
|
2021-08-07T13:06:28.000Z
|
singa_easy/modules/mod_modelslicing/utils/lr_scheduler.py
|
arielclj/singa-easy
|
fd4bc601a5501062936f874df14711a3cefa1346
|
[
"Apache-2.0"
] | 41
|
2020-04-06T13:18:40.000Z
|
2021-01-20T04:29:50.000Z
|
singa_easy/modules/mod_modelslicing/utils/lr_scheduler.py
|
arielclj/singa-easy
|
fd4bc601a5501062936f874df14711a3cefa1346
|
[
"Apache-2.0"
] | 10
|
2020-04-06T09:56:20.000Z
|
2022-03-21T09:18:51.000Z
|
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import CosineAnnealingLR
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier
warmup_epoch: target learning rate is linearly reached at the warmup_epoch
scheduler: scheduler used after warmup_epoch (eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):
assert multiplier > 1., 'multiplier should be greater than 1.'
self.multiplier = multiplier
self.warmup_epoch = warmup_epoch
self.scheduler = scheduler
self.finish_warmup = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
if self.scheduler:
if not self.finish_warmup:
self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finish_warmup = True
return self.scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]
def step(self, epoch=None, metrics=None):
if self.finish_warmup and self.scheduler:
if epoch is None:
self.scheduler.step(None)
else:
self.scheduler.step(epoch - self.warmup_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
if __name__ == '__main__':
import torch
v = torch.zeros(10, requires_grad=True)
optim = torch.optim.SGD([v], lr=0.01)
scheduler = CosineAnnealingLR(optim, 95)
scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)
for epoch in range(0, 100):
scheduler.step(epoch)
print(epoch, optim.param_groups[0]['lr'])
| 39.839286
| 113
| 0.671896
|
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.optim.lr_scheduler import CosineAnnealingLR
class GradualWarmupScheduler(_LRScheduler):
def __init__(self, optimizer, warmup_epoch, multiplier=1.0, scheduler=None):
assert multiplier > 1., 'multiplier should be greater than 1.'
self.multiplier = multiplier
self.warmup_epoch = warmup_epoch
self.scheduler = scheduler
self.finish_warmup = False
super().__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.warmup_epoch:
if self.scheduler:
if not self.finish_warmup:
self.scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finish_warmup = True
return self.scheduler.get_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
return [base_lr*((self.multiplier-1.)*self.last_epoch/self.warmup_epoch+1.) for base_lr in self.base_lrs]
def step(self, epoch=None, metrics=None):
if self.finish_warmup and self.scheduler:
if epoch is None:
self.scheduler.step(None)
else:
self.scheduler.step(epoch - self.warmup_epoch)
else:
return super(GradualWarmupScheduler, self).step(epoch)
if __name__ == '__main__':
import torch
v = torch.zeros(10, requires_grad=True)
optim = torch.optim.SGD([v], lr=0.01)
scheduler = CosineAnnealingLR(optim, 95)
scheduler = GradualWarmupScheduler(optim, multiplier=10, warmup_epoch=5, scheduler=scheduler)
for epoch in range(0, 100):
scheduler.step(epoch)
print(epoch, optim.param_groups[0]['lr'])
| true
| true
|
f708a43e245048f8b85402378a032274e63bb224
| 13,612
|
py
|
Python
|
atomate/vasp/firetasks/glue_tasks.py
|
dongsenfo/atomate
|
01558e8c3e38470c02bc8b50c0ee3aa6198e5206
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/vasp/firetasks/glue_tasks.py
|
dongsenfo/atomate
|
01558e8c3e38470c02bc8b50c0ee3aa6198e5206
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-04-09T20:55:30.000Z
|
2019-04-09T21:30:24.000Z
|
atomate/vasp/firetasks/glue_tasks.py
|
dongsenfo/atomate
|
01558e8c3e38470c02bc8b50c0ee3aa6198e5206
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-09-01T00:08:51.000Z
|
2021-11-17T01:32:14.000Z
|
# coding: utf-8
from __future__ import division, print_function, unicode_literals, \
absolute_import
import glob
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.io.vasp import Vasprun, zpath
"""
This module defines tasks that acts as a glue between other vasp Firetasks to allow communication
between different Firetasks and Fireworks. This module also contains tasks that affect the control
flow of the workflow, e.g. tasks to check stability or the gap is within a certain range.
"""
import gzip
import os
import re
from pymatgen import MPRester
from pymatgen.io.vasp.sets import get_vasprun_outcar
from pymatgen.core.structure import Structure
from fireworks import explicit_serialize, FiretaskBase, FWAction
from atomate.utils.utils import env_chk, get_logger
from atomate.common.firetasks.glue_tasks import get_calc_loc, PassResult, \
CopyFiles, CopyFilesFromCalcLoc
logger = get_logger(__name__)
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = 'ajain@lbl.gov, kmathew@lbl.gov'
@explicit_serialize
class CopyVaspOutputs(CopyFiles):
"""
Copy files from a previous VASP run directory to the current directory.
By default, copies 'INCAR', 'POSCAR' (default: via 'CONTCAR'), 'KPOINTS',
'POTCAR', 'OUTCAR', and 'vasprun.xml'. Additional files, e.g. 'CHGCAR',
can also be specified. Automatically handles files that have a ".gz"
extension (copies and unzips).
Note that you must specify either "calc_loc" or "calc_dir" to indicate
the directory containing the previous VASP run.
Required params:
(none) - but you must specify either "calc_loc" OR "calc_dir"
Optional params:
calc_loc (str OR bool): if True will set most recent calc_loc. If str
search for the most recent calc_loc with the matching name
calc_dir (str): path to dir that contains VASP output files.
filesystem (str): remote filesystem. e.g. username@host
additional_files ([str]): additional files to copy,
e.g. ["CHGCAR", "WAVECAR"]. Use $ALL if you just want to copy
everything
contcar_to_poscar(bool): If True (default), will move CONTCAR to
POSCAR (original POSCAR is not copied).
"""
optional_params = ["calc_loc", "calc_dir", "filesystem", "additional_files",
"contcar_to_poscar"]
def run_task(self, fw_spec):
calc_loc = get_calc_loc(self["calc_loc"],
fw_spec["calc_locs"]) if self.get(
"calc_loc") else {}
# determine what files need to be copied
files_to_copy = None
if not "$ALL" in self.get("additional_files", []):
files_to_copy = ['INCAR', 'POSCAR', 'KPOINTS', 'POTCAR', 'OUTCAR',
'vasprun.xml']
if self.get("additional_files"):
files_to_copy.extend(self["additional_files"])
# decide between poscar and contcar
contcar_to_poscar = self.get("contcar_to_poscar", True)
if contcar_to_poscar and "CONTCAR" not in files_to_copy:
files_to_copy.append("CONTCAR")
files_to_copy = [f for f in files_to_copy if
f != 'POSCAR'] # remove POSCAR
# setup the copy
self.setup_copy(self.get("calc_dir", None),
filesystem=self.get("filesystem", None),
files_to_copy=files_to_copy, from_path_dict=calc_loc)
# do the copying
self.copy_files()
def copy_files(self):
all_files = self.fileclient.listdir(self.from_dir)
# start file copy
for f in self.files_to_copy:
prev_path_full = os.path.join(self.from_dir, f)
dest_fname = 'POSCAR' if f == 'CONTCAR' and self.get(
"contcar_to_poscar", True) else f
dest_path = os.path.join(self.to_dir, dest_fname)
relax_ext = ""
relax_paths = sorted(
self.fileclient.glob(prev_path_full + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CopyVaspOutputs doesn't properly handle >9 relaxations!")
m = re.search('\.relax\d*', relax_paths[-1])
relax_ext = m.group(0)
# detect .gz extension if needed - note that monty zpath() did not seem useful here
gz_ext = ""
if not (f + relax_ext) in all_files:
for possible_ext in [".gz", ".GZ"]:
if (f + relax_ext + possible_ext) in all_files:
gz_ext = possible_ext
if not (f + relax_ext + gz_ext) in all_files:
raise ValueError("Cannot find file: {}".format(f))
# copy the file (minus the relaxation extension)
self.fileclient.copy(prev_path_full + relax_ext + gz_ext,
dest_path + gz_ext)
# unzip the .gz if needed
if gz_ext in ['.gz', ".GZ"]:
# unzip dest file
f = gzip.open(dest_path + gz_ext, 'rt')
file_content = f.read()
with open(dest_path, 'w') as f_out:
f_out.writelines(file_content)
f.close()
os.remove(dest_path + gz_ext)
@explicit_serialize
class CheckStability(FiretaskBase):
"""
Checks the stability of the entry against the Materials Project database.
If the stability is less than the cutoff (default is 0.1 eV/atom), then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but your MAPI key must be set as an environ var in this case
Optional params:
ehull_cutoff: (float) energy in eV/atom to use as ehull cutoff. Default
is 0.05 eV/atom.
MAPI_KEY: (str) set MAPI key directly. Supports env_chk.
calc_dir: (str) string to path containing vasprun.xml (default currdir)
"""
required_params = []
optional_params = ["ehull_cutoff", "MAPI_KEY", "calc_dir"]
def run_task(self, fw_spec):
mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."),
parse_dos=False,
parse_eigen=False)
my_entry = vasprun.get_computed_entry(inc_structure=False)
stored_data = mpr.get_stability([my_entry])[0]
if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
logger.info("CheckStability: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
else:
return FWAction(stored_data=stored_data)
@explicit_serialize
class CheckBandgap(FiretaskBase):
"""
Checks the band gap of an entry. If band gap is >min_gap or <max_gap, then
the task will return a FWAction that will defuse all remaining tasks.
Required params:
(none) - but you should set either min_gap or max_gap
Optional params:
min_gap: (float) minimum gap energy in eV to proceed
max_gap: (float) maximum gap energy in eV to proceed
vasprun_path: (str) path to vasprun.xml file
"""
required_params = []
optional_params = ["min_gap", "max_gap", "vasprun_path"]
def run_task(self, fw_spec):
vr_path = zpath(self.get("vasprun_path", "vasprun.xml"))
min_gap = self.get("min_gap", None)
max_gap = self.get("max_gap", None)
if not os.path.exists(vr_path):
relax_paths = sorted(glob.glob(vr_path + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CheckBandgap doesn't properly handle >9 relaxations!")
vr_path = relax_paths[-1]
logger.info("Checking the gap of file: {}".format(vr_path))
vr = Vasprun(vr_path)
gap = vr.get_band_structure().get_band_gap()["energy"]
stored_data = {"band_gap": gap}
logger.info(
"The gap is: {}. Min gap: {}. Max gap: {}".format(gap, min_gap,
max_gap))
if (min_gap and gap < min_gap) or (max_gap and gap > max_gap):
logger.info("CheckBandgap: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
return FWAction(stored_data=stored_data)
@explicit_serialize
class GetInterpolatedPOSCAR(FiretaskBase):
"""
Grabs CONTCARS from two previous calculations to create interpolated
structure.
The code gets the CONTCAR locations using get_calc_loc of two calculations
indicated by the start and end params, creates a folder named "interpolate"
in the current FireWork directory, and copies the two CONTCARs to this folder.
The two CONTCARs are then used to create nimages interpolated structures using
pymatgen.core.structure.Structure.interpolate. Finally, the structure indicated
by this_image is written as a POSCAR file.
Required params:
start (str): name of fw for start of interpolation.
end (str): name of fw for end of interpolation.
this_image (int): which interpolation this is.
nimages (int) : number of interpolations.
Optional params:
autosort_tol (float): parameter used by Structure.interpolate.
a distance tolerance in angstrom in which to automatically
sort end_structure to match to the closest
points in this particular structure. Default is 0.0.
"""
required_params = ["start", "end", "this_image", "nimages"]
optional_params = ["autosort_tol"]
def run_task(self, fw_spec):
structure = self.interpolate_poscar(fw_spec)
structure.to(fmt="POSCAR", filename=os.path.join(os.getcwd(), "POSCAR"))
def interpolate_poscar(self, fw_spec):
# make folder for poscar interpolation start and end structure files.
interpolate_folder = 'interpolate'
if not os.path.exists(os.path.join(os.getcwd(), interpolate_folder)):
os.makedirs(os.path.join(os.getcwd(), interpolate_folder))
# use method of GrabFilesFromCalcLoc to grab files from previous locations.
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["start"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_0").run_task(fw_spec=fw_spec)
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["end"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_1").run_task(fw_spec=fw_spec)
# assuming first calc_dir is polar structure for ferroelectric search
s1 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_0"))
s2 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_1"))
structs = s1.interpolate(s2, self["nimages"], interpolate_lattices=True,
autosort_tol=self.get("autosort_tol", 0.0))
# save only the interpolation needed for this run
i = self.get("this_image")
return structs[i]
def pass_vasp_result(pass_dict=None, calc_dir='.', filename="vasprun.xml.gz",
parse_eigen=False,
parse_dos=False, **kwargs):
"""
Function that gets a PassResult firework corresponding to output from a Vasprun. Covers
most use cases in which user needs to pass results from a vasp run to child FWs
(e. g. analysis FWs)
pass_vasp_result(pass_dict={'stress': ">>ionic_steps.-1.stress"})
Args:
pass_dict (dict): dictionary designating keys and values to pass
to child fireworks. If value is a string beginning with '>>',
the firework will search the parsed VASP output dictionary
for the designated property by following the sequence of keys
separated with periods, e. g. ">>ionic_steps.-1.stress" is used
to designate the stress from the last ionic_step. If the value
is not a string or does not begin with ">>" or "a>>" (for an
object attribute, rather than nested key of .as_dict() conversion),
it is passed as is. Defaults to pass the computed entry of
the Vasprun.
calc_dir (str): path to dir that contains VASP output files, defaults
to '.', e. g. current directory
filename (str): filename for vasp xml file to parse, defaults to
"vasprun.xml.gz"
parse_eigen (bool): flag on whether or not to parse eigenvalues,
defaults to false
parse_eigen (bool): flag on whether or not to parse dos,
defaults to false
**kwargs (keyword args): other keyword arguments passed to PassResult
e.g. mod_spec_key or mod_spec_cmd
"""
pass_dict = pass_dict or {"computed_entry": "a>>get_computed_entry"}
parse_kwargs = {"filename": filename, "parse_eigen": parse_eigen,
"parse_dos": parse_dos}
return PassResult(pass_dict=pass_dict, calc_dir=calc_dir,
parse_kwargs=parse_kwargs,
parse_class="pymatgen.io.vasp.outputs.Vasprun", **kwargs)
| 41.754601
| 98
| 0.624082
|
from __future__ import division, print_function, unicode_literals, \
absolute_import
import glob
from pymatgen.analysis.elasticity.strain import Strain
from pymatgen.io.vasp import Vasprun, zpath
import gzip
import os
import re
from pymatgen import MPRester
from pymatgen.io.vasp.sets import get_vasprun_outcar
from pymatgen.core.structure import Structure
from fireworks import explicit_serialize, FiretaskBase, FWAction
from atomate.utils.utils import env_chk, get_logger
from atomate.common.firetasks.glue_tasks import get_calc_loc, PassResult, \
CopyFiles, CopyFilesFromCalcLoc
logger = get_logger(__name__)
__author__ = 'Anubhav Jain, Kiran Mathew'
__email__ = 'ajain@lbl.gov, kmathew@lbl.gov'
@explicit_serialize
class CopyVaspOutputs(CopyFiles):
optional_params = ["calc_loc", "calc_dir", "filesystem", "additional_files",
"contcar_to_poscar"]
def run_task(self, fw_spec):
calc_loc = get_calc_loc(self["calc_loc"],
fw_spec["calc_locs"]) if self.get(
"calc_loc") else {}
files_to_copy = None
if not "$ALL" in self.get("additional_files", []):
files_to_copy = ['INCAR', 'POSCAR', 'KPOINTS', 'POTCAR', 'OUTCAR',
'vasprun.xml']
if self.get("additional_files"):
files_to_copy.extend(self["additional_files"])
contcar_to_poscar = self.get("contcar_to_poscar", True)
if contcar_to_poscar and "CONTCAR" not in files_to_copy:
files_to_copy.append("CONTCAR")
files_to_copy = [f for f in files_to_copy if
f != 'POSCAR']
self.setup_copy(self.get("calc_dir", None),
filesystem=self.get("filesystem", None),
files_to_copy=files_to_copy, from_path_dict=calc_loc)
self.copy_files()
def copy_files(self):
all_files = self.fileclient.listdir(self.from_dir)
for f in self.files_to_copy:
prev_path_full = os.path.join(self.from_dir, f)
dest_fname = 'POSCAR' if f == 'CONTCAR' and self.get(
"contcar_to_poscar", True) else f
dest_path = os.path.join(self.to_dir, dest_fname)
relax_ext = ""
relax_paths = sorted(
self.fileclient.glob(prev_path_full + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CopyVaspOutputs doesn't properly handle >9 relaxations!")
m = re.search('\.relax\d*', relax_paths[-1])
relax_ext = m.group(0)
# detect .gz extension if needed - note that monty zpath() did not seem useful here
gz_ext = ""
if not (f + relax_ext) in all_files:
for possible_ext in [".gz", ".GZ"]:
if (f + relax_ext + possible_ext) in all_files:
gz_ext = possible_ext
if not (f + relax_ext + gz_ext) in all_files:
raise ValueError("Cannot find file: {}".format(f))
# copy the file (minus the relaxation extension)
self.fileclient.copy(prev_path_full + relax_ext + gz_ext,
dest_path + gz_ext)
# unzip the .gz if needed
if gz_ext in ['.gz', ".GZ"]:
# unzip dest file
f = gzip.open(dest_path + gz_ext, 'rt')
file_content = f.read()
with open(dest_path, 'w') as f_out:
f_out.writelines(file_content)
f.close()
os.remove(dest_path + gz_ext)
@explicit_serialize
class CheckStability(FiretaskBase):
required_params = []
optional_params = ["ehull_cutoff", "MAPI_KEY", "calc_dir"]
def run_task(self, fw_spec):
mpr = MPRester(env_chk(self.get("MAPI_KEY"), fw_spec))
vasprun, outcar = get_vasprun_outcar(self.get("calc_dir", "."),
parse_dos=False,
parse_eigen=False)
my_entry = vasprun.get_computed_entry(inc_structure=False)
stored_data = mpr.get_stability([my_entry])[0]
if stored_data["e_above_hull"] > self.get("ehull_cutoff", 0.05):
logger.info("CheckStability: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
else:
return FWAction(stored_data=stored_data)
@explicit_serialize
class CheckBandgap(FiretaskBase):
required_params = []
optional_params = ["min_gap", "max_gap", "vasprun_path"]
def run_task(self, fw_spec):
vr_path = zpath(self.get("vasprun_path", "vasprun.xml"))
min_gap = self.get("min_gap", None)
max_gap = self.get("max_gap", None)
if not os.path.exists(vr_path):
relax_paths = sorted(glob.glob(vr_path + ".relax*"))
if relax_paths:
if len(relax_paths) > 9:
raise ValueError(
"CheckBandgap doesn't properly handle >9 relaxations!")
vr_path = relax_paths[-1]
logger.info("Checking the gap of file: {}".format(vr_path))
vr = Vasprun(vr_path)
gap = vr.get_band_structure().get_band_gap()["energy"]
stored_data = {"band_gap": gap}
logger.info(
"The gap is: {}. Min gap: {}. Max gap: {}".format(gap, min_gap,
max_gap))
if (min_gap and gap < min_gap) or (max_gap and gap > max_gap):
logger.info("CheckBandgap: failed test!")
return FWAction(stored_data=stored_data, exit=True,
defuse_workflow=True)
return FWAction(stored_data=stored_data)
@explicit_serialize
class GetInterpolatedPOSCAR(FiretaskBase):
required_params = ["start", "end", "this_image", "nimages"]
optional_params = ["autosort_tol"]
def run_task(self, fw_spec):
structure = self.interpolate_poscar(fw_spec)
structure.to(fmt="POSCAR", filename=os.path.join(os.getcwd(), "POSCAR"))
def interpolate_poscar(self, fw_spec):
interpolate_folder = 'interpolate'
if not os.path.exists(os.path.join(os.getcwd(), interpolate_folder)):
os.makedirs(os.path.join(os.getcwd(), interpolate_folder))
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["start"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_0").run_task(fw_spec=fw_spec)
CopyFilesFromCalcLoc(calc_dir=None, calc_loc=self["end"],
filenames=["CONTCAR"],
name_prepend=interpolate_folder + os.sep,
name_append="_1").run_task(fw_spec=fw_spec)
s1 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_0"))
s2 = Structure.from_file(os.path.join(interpolate_folder, "CONTCAR_1"))
structs = s1.interpolate(s2, self["nimages"], interpolate_lattices=True,
autosort_tol=self.get("autosort_tol", 0.0))
i = self.get("this_image")
return structs[i]
def pass_vasp_result(pass_dict=None, calc_dir='.', filename="vasprun.xml.gz",
parse_eigen=False,
parse_dos=False, **kwargs):
pass_dict = pass_dict or {"computed_entry": "a>>get_computed_entry"}
parse_kwargs = {"filename": filename, "parse_eigen": parse_eigen,
"parse_dos": parse_dos}
return PassResult(pass_dict=pass_dict, calc_dir=calc_dir,
parse_kwargs=parse_kwargs,
parse_class="pymatgen.io.vasp.outputs.Vasprun", **kwargs)
| true
| true
|
f708a4b5c1c05df7983e5a7b61b02ba89c31901e
| 550
|
py
|
Python
|
experiments/issue488/issue488.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2019-04-23T10:41:35.000Z
|
2019-10-27T05:14:42.000Z
|
experiments/issue488/issue488.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | null | null | null |
experiments/issue488/issue488.py
|
nitinkaveriappa/downward
|
5c9a1b5111d667bb96f94da61ca2a45b1b70bb83
|
[
"MIT"
] | 4
|
2018-01-16T00:00:22.000Z
|
2019-11-01T23:35:01.000Z
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from downward import suites
import common_setup
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue488-base", "issue488-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| 18.965517
| 54
| 0.54
|
from downward import suites
import common_setup
CONFIGS = {
'astar_ipdb': [
'--search',
'astar(ipdb())'],
'astar_pdb': [
'--search',
'astar(pdb())'],
'astar_gapdb': [
'--search',
'astar(gapdb())'],
}
exp = common_setup.IssueExperiment(
search_revisions=["issue488-base", "issue488-v1"],
configs=CONFIGS,
suite=suites.suite_optimal_with_ipc11(),
)
exp.add_comparison_table_step()
exp()
| true
| true
|
f708a57b69d36dd8f2b7f8ddd643b86675efe433
| 15,355
|
py
|
Python
|
comment/tests/test_utils.py
|
abhiabhi94/Comment
|
0956fb395399328ada5d35263307e452567b36aa
|
[
"MIT"
] | null | null | null |
comment/tests/test_utils.py
|
abhiabhi94/Comment
|
0956fb395399328ada5d35263307e452567b36aa
|
[
"MIT"
] | null | null | null |
comment/tests/test_utils.py
|
abhiabhi94/Comment
|
0956fb395399328ada5d35263307e452567b36aa
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from unittest.mock import patch
from django.utils import timezone
from django.core import signing, mail
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import reverse
from comment.conf import settings
from comment.messages import EmailInfo
from comment.utils import (
get_model_obj, has_valid_profile, get_comment_context_data, id_generator, get_comment_from_key,
get_user_for_request, send_email_confirmation_request, process_anonymous_commenting, CommentFailReason,
get_gravatar_img, get_profile_instance)
from comment.tests.base import BaseCommentUtilsTest, Comment, RequestFactory
class CommentUtilsTest(BaseCommentUtilsTest):
def test_get_model_object(self):
data = {
'app_name': 'post',
'model_name': 'Post',
'model_id': self.post_1.id
}
model_object = get_model_obj(**data)
self.assertIsNotNone(model_object)
self.assertIsInstance(model_object, self.post_1.__class__)
@patch.object(settings, 'COMMENT_USE_GRAVATAR', True)
def test_get_gravatar_img(self):
# email is not provided
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
# email is provided
self.assertTrue(get_gravatar_img('test').startswith('https://www.gravatar.com/avatar/'))
# gravatar is disabled
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
def test_get_profile_instance(self):
# wrong content type
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong').start()
self.assertIsNone(get_profile_instance(self.user_1))
# correct data
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertIsNotNone(get_profile_instance(self.user_1))
# profile model has no user related model
patch.object(settings, 'PROFILE_MODEL_NAME', None).start()
self.assertIsNone(get_profile_instance(self.user_1))
@patch.object(settings, 'COMMENT_USE_GRAVATAR', False)
def test_has_valid_profile(self):
patch.object(settings, 'PROFILE_APP_NAME', 'user_profile').start()
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertTrue(has_valid_profile())
# one of settings attribute is missing
patch.object(settings, 'PROFILE_MODEL_NAME', '').start()
self.assertFalse(has_valid_profile())
# settings attr provided with wrong value
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong_value').start()
self.assertFalse(has_valid_profile())
# settings attr provided, profile model has no image
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
mocked_hasattr = patch('comment.utils.hasattr').start()
mocked_hasattr.return_value = False
self.assertFalse(has_valid_profile())
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertTrue(has_valid_profile())
def test_get_comment_context_data(self):
comment_per_page = 'COMMENT_PER_PAGE'
login_url = 'LOGIN_URL'
current_login_url = getattr(settings, login_url, '/profile/login/')
comment_allow_anonymous = 'COMMENT_ALLOW_ANONYMOUS'
comment_allow_translation = 'COMMENT_ALLOW_TRANSLATION'
oauth = 'oauth'
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, False).start()
patch.object(settings, comment_per_page, 0).start()
data = {
'model_object': self.post_1,
'model_name': 'post',
'model_id': self.post_1.id,
'app_name': 'post',
'user': self.post_1.author,
'page': 10,
oauth: 'True'
}
request = self.factory.post('/', data=data)
request.user = self.post_1.author
if current_login_url.startswith('/'):
patch.object(settings, login_url, current_login_url[1:]).start()
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].count(), self.increment)
# test inserting '/' to the beginning of login url
self.assertEqual(comment_context_data['login_url'], '/' + settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
self.assertEqual(comment_context_data['oauth'], True)
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, True).start()
patch.object(settings, comment_allow_translation, False).start()
patch.object(settings, comment_per_page, 2).start()
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_previous())
self.assertEqual(comment_context_data['login_url'], settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
data.update({'page': 'not integer', oauth: 'False'})
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_next())
self.assertEqual(comment_context_data[oauth], False)
def test_user_for_request(self):
request = self.factory.get('/')
request.user = AnonymousUser()
# test unauthenticated user
self.assertIsNone(get_user_for_request(request))
# test authenticated user
request.user = self.user_1
self.assertEqual(get_user_for_request(request), self.user_1)
class BaseAnonymousCommentTest(BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.time_posted = timezone.now()
_email = 'test-1@acme.edu'
_content = 'posting anonymous comment'
_parent = None
_factory = RequestFactory()
self.comment_obj = Comment(
content_object=self.post_1,
content=_content,
user=None,
parent=_parent,
email=_email,
posted=self.time_posted
)
self.key = signing.dumps(self.comment_obj.to_dict(), compress=True)
self.request = _factory.get('/')
self.site = get_current_site(self.request)
class TestGetCommentFromKey(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def test_bad_signature(self):
key = self.key + 'invalid'
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_key_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict.pop('model_name')
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_attribute_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['model_name'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_value_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['user'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_comment_exists(self):
comment_dict = self.comment_obj.to_dict().copy()
comment = self.create_anonymous_comment(posted=timezone.now(), email='a@a.com')
comment_dict.update({
'posted': str(comment.posted),
'email': comment.email
})
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.EXISTS)
self.assertIsNone(response.obj)
def test_success(self):
response = get_comment_from_key(self.key)
self.assertEqual(response.is_valid, True)
self.assertEqual(response.why_invalid, None)
self.assertIsInstance(response.obj, Comment)
# comment is saved
self.assertIsNotNone(response.obj.id)
self.assertEqual(response.obj.posted, self.time_posted)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', True)
class TestSendEmailConfirmationRequest(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
settings.COMMENT_CONTACT_EMAIL = 'contact@domain'
settings.COMMENT_FROM_EMAIL = 'no-reply@domain'
self.len_mailbox = len(mail.outbox)
self.confirmation_url = reverse('comment:confirm-comment', args=[self.key])
self.confirmation_url_drf = f'/api/comments/confirm/{self.key}/'
self.contact_email = settings.COMMENT_CONTACT_EMAIL
self.receivers = [self.comment_obj.to_dict()['email']]
self.sender = settings.COMMENT_FROM_EMAIL
self.subject = EmailInfo.SUBJECT
self.content_object_url = f'http://{self.site.domain}{self.comment_obj.content_object.get_absolute_url()}'
def email_contents_test(self, contents, api=False):
if not api:
confirmation_url = self.confirmation_url
else:
confirmation_url = self.confirmation_url_drf
# message context contains comment content, confirmation url, contact email, site name,\
# content object's absolute url.
self.assertEqual(True, self.comment_obj.content in contents)
self.assertEqual(True, confirmation_url in contents)
self.assertEqual(True, self.contact_email in contents)
self.assertEqual(True, self.site.name in contents)
self.assertEqual(True, self.content_object_url in contents)
def email_metadata_test(self, email, html=False):
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.to, self.receivers)
self.assertEqual(email.subject, self.subject)
if html:
self.assertEqual(email.alternatives[0][1], 'text/html')
else:
self.assertEqual(email.alternatives, [])
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body, api=True)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body, api=True)
class TestProcessAnonymousCommenting(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.request.user = AnonymousUser()
def test_for_django(self):
response = process_anonymous_commenting(self.request, self.comment_obj)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
def test_for_drf(self):
response = process_anonymous_commenting(self.request, self.comment_obj, api=True)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
class UtilsTest(TestCase):
"""Test general purpose utilities that aren't necessarily related to a comment"""
def setUp(self):
self.len_id = 6
def test_id_generator_length(self):
self.assertEqual(self.len_id, len(id_generator()))
def test_id_generator_generates_different_ids(self):
self.assertNotEqual(id_generator(), id_generator())
def test_id_generator_prefix(self):
prefix = 'comment'
output = id_generator(prefix=prefix)
self.assertEqual(True, output.startswith(prefix))
self.assertEqual(self.len_id + len(prefix), len(output))
def test_id_generator_suffix(self):
suffix = 'comment'
output = id_generator(suffix=suffix)
self.assertEqual(True, output.endswith(suffix))
self.assertEqual(self.len_id + len(suffix), len(output))
def test_id_generator_chars(self):
import string # flake8:no qa
chars = string.ascii_uppercase
output = id_generator(chars=chars)
self.assertEqual(output, output.upper())
def test_id_generator_len(self):
len_id = 8
self.assertEqual(len_id, len(id_generator(len_id=len_id)))
| 42.068493
| 114
| 0.696711
|
from unittest import TestCase
from unittest.mock import patch
from django.utils import timezone
from django.core import signing, mail
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import reverse
from comment.conf import settings
from comment.messages import EmailInfo
from comment.utils import (
get_model_obj, has_valid_profile, get_comment_context_data, id_generator, get_comment_from_key,
get_user_for_request, send_email_confirmation_request, process_anonymous_commenting, CommentFailReason,
get_gravatar_img, get_profile_instance)
from comment.tests.base import BaseCommentUtilsTest, Comment, RequestFactory
class CommentUtilsTest(BaseCommentUtilsTest):
def test_get_model_object(self):
data = {
'app_name': 'post',
'model_name': 'Post',
'model_id': self.post_1.id
}
model_object = get_model_obj(**data)
self.assertIsNotNone(model_object)
self.assertIsInstance(model_object, self.post_1.__class__)
@patch.object(settings, 'COMMENT_USE_GRAVATAR', True)
def test_get_gravatar_img(self):
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
self.assertTrue(get_gravatar_img('test').startswith('https://www.gravatar.com/avatar/'))
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertEqual(get_gravatar_img(''), '/static/img/default.png')
def test_get_profile_instance(self):
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong').start()
self.assertIsNone(get_profile_instance(self.user_1))
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertIsNotNone(get_profile_instance(self.user_1))
patch.object(settings, 'PROFILE_MODEL_NAME', None).start()
self.assertIsNone(get_profile_instance(self.user_1))
@patch.object(settings, 'COMMENT_USE_GRAVATAR', False)
def test_has_valid_profile(self):
patch.object(settings, 'PROFILE_APP_NAME', 'user_profile').start()
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
self.assertTrue(has_valid_profile())
patch.object(settings, 'PROFILE_MODEL_NAME', '').start()
self.assertFalse(has_valid_profile())
patch.object(settings, 'PROFILE_MODEL_NAME', 'wrong_value').start()
self.assertFalse(has_valid_profile())
patch.object(settings, 'PROFILE_MODEL_NAME', 'userprofile').start()
mocked_hasattr = patch('comment.utils.hasattr').start()
mocked_hasattr.return_value = False
self.assertFalse(has_valid_profile())
patch.object(settings, 'COMMENT_USE_GRAVATAR', True).start()
self.assertTrue(has_valid_profile())
def test_get_comment_context_data(self):
comment_per_page = 'COMMENT_PER_PAGE'
login_url = 'LOGIN_URL'
current_login_url = getattr(settings, login_url, '/profile/login/')
comment_allow_anonymous = 'COMMENT_ALLOW_ANONYMOUS'
comment_allow_translation = 'COMMENT_ALLOW_TRANSLATION'
oauth = 'oauth'
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, False).start()
patch.object(settings, comment_per_page, 0).start()
data = {
'model_object': self.post_1,
'model_name': 'post',
'model_id': self.post_1.id,
'app_name': 'post',
'user': self.post_1.author,
'page': 10,
oauth: 'True'
}
request = self.factory.post('/', data=data)
request.user = self.post_1.author
if current_login_url.startswith('/'):
patch.object(settings, login_url, current_login_url[1:]).start()
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].count(), self.increment)
self.assertEqual(comment_context_data['login_url'], '/' + settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
self.assertEqual(comment_context_data['oauth'], True)
patch.object(settings, login_url, current_login_url).start()
patch.object(settings, comment_allow_anonymous, True).start()
patch.object(settings, comment_allow_translation, False).start()
patch.object(settings, comment_per_page, 2).start()
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_previous())
self.assertEqual(comment_context_data['login_url'], settings.LOGIN_URL)
self.assertEqual(comment_context_data['is_anonymous_allowed'], settings.COMMENT_ALLOW_ANONYMOUS)
self.assertEqual(comment_context_data['is_translation_allowed'], settings.COMMENT_ALLOW_TRANSLATION)
data.update({'page': 'not integer', oauth: 'False'})
request = self.factory.post('/', data=data)
request.user = self.post_1.author
comment_context_data = get_comment_context_data(request)
self.assertEqual(comment_context_data['comments'].paginator.per_page, 2)
self.assertTrue(comment_context_data['comments'].has_next())
self.assertEqual(comment_context_data[oauth], False)
def test_user_for_request(self):
request = self.factory.get('/')
request.user = AnonymousUser()
self.assertIsNone(get_user_for_request(request))
request.user = self.user_1
self.assertEqual(get_user_for_request(request), self.user_1)
class BaseAnonymousCommentTest(BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.time_posted = timezone.now()
_email = 'test-1@acme.edu'
_content = 'posting anonymous comment'
_parent = None
_factory = RequestFactory()
self.comment_obj = Comment(
content_object=self.post_1,
content=_content,
user=None,
parent=_parent,
email=_email,
posted=self.time_posted
)
self.key = signing.dumps(self.comment_obj.to_dict(), compress=True)
self.request = _factory.get('/')
self.site = get_current_site(self.request)
class TestGetCommentFromKey(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def test_bad_signature(self):
key = self.key + 'invalid'
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_key_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict.pop('model_name')
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_attribute_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['model_name'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_value_error(self):
comment_dict = self.comment_obj.to_dict().copy()
comment_dict['user'] = 1
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.BAD)
self.assertIsNone(response.obj)
def test_comment_exists(self):
comment_dict = self.comment_obj.to_dict().copy()
comment = self.create_anonymous_comment(posted=timezone.now(), email='a@a.com')
comment_dict.update({
'posted': str(comment.posted),
'email': comment.email
})
key = signing.dumps(comment_dict)
response = get_comment_from_key(key)
self.assertEqual(response.is_valid, False)
self.assertEqual(response.why_invalid, CommentFailReason.EXISTS)
self.assertIsNone(response.obj)
def test_success(self):
response = get_comment_from_key(self.key)
self.assertEqual(response.is_valid, True)
self.assertEqual(response.why_invalid, None)
self.assertIsInstance(response.obj, Comment)
self.assertIsNotNone(response.obj.id)
self.assertEqual(response.obj.posted, self.time_posted)
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', True)
class TestSendEmailConfirmationRequest(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
settings.COMMENT_CONTACT_EMAIL = 'contact@domain'
settings.COMMENT_FROM_EMAIL = 'no-reply@domain'
self.len_mailbox = len(mail.outbox)
self.confirmation_url = reverse('comment:confirm-comment', args=[self.key])
self.confirmation_url_drf = f'/api/comments/confirm/{self.key}/'
self.contact_email = settings.COMMENT_CONTACT_EMAIL
self.receivers = [self.comment_obj.to_dict()['email']]
self.sender = settings.COMMENT_FROM_EMAIL
self.subject = EmailInfo.SUBJECT
self.content_object_url = f'http://{self.site.domain}{self.comment_obj.content_object.get_absolute_url()}'
def email_contents_test(self, contents, api=False):
if not api:
confirmation_url = self.confirmation_url
else:
confirmation_url = self.confirmation_url_drf
self.assertEqual(True, self.comment_obj.content in contents)
self.assertEqual(True, confirmation_url in contents)
self.assertEqual(True, self.contact_email in contents)
self.assertEqual(True, self.site.name in contents)
self.assertEqual(True, self.content_object_url in contents)
def email_metadata_test(self, email, html=False):
self.assertEqual(email.from_email, self.sender)
self.assertEqual(email.to, self.receivers)
self.assertEqual(email.subject, self.subject)
if html:
self.assertEqual(email.alternatives[0][1], 'text/html')
else:
self.assertEqual(email.alternatives, [])
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', False)
def test_sending_only_text_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email)
self.email_contents_test(sent_email.body, api=True)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_django(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body)
@patch.object(settings, 'COMMENT_SEND_HTML_EMAIL', True)
def test_sending_both_text_and_html_template_with_drf(self):
receiver = self.comment_obj.to_dict()['email']
len_mailbox = self.len_mailbox
response = send_email_confirmation_request(self.comment_obj, receiver, self.key, self.site, api=True)
self.assertIsNone(response)
self.assertEqual(len(mail.outbox), len_mailbox + 1)
sent_email = mail.outbox[0]
self.email_metadata_test(sent_email, html=True)
self.email_contents_test(sent_email.body, api=True)
class TestProcessAnonymousCommenting(BaseAnonymousCommentTest, BaseCommentUtilsTest):
def setUp(self):
super().setUp()
self.request.user = AnonymousUser()
def test_for_django(self):
response = process_anonymous_commenting(self.request, self.comment_obj)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
def test_for_drf(self):
response = process_anonymous_commenting(self.request, self.comment_obj, api=True)
self.assertEqual(EmailInfo.CONFIRMATION_SENT, response)
class UtilsTest(TestCase):
def setUp(self):
self.len_id = 6
def test_id_generator_length(self):
self.assertEqual(self.len_id, len(id_generator()))
def test_id_generator_generates_different_ids(self):
self.assertNotEqual(id_generator(), id_generator())
def test_id_generator_prefix(self):
prefix = 'comment'
output = id_generator(prefix=prefix)
self.assertEqual(True, output.startswith(prefix))
self.assertEqual(self.len_id + len(prefix), len(output))
def test_id_generator_suffix(self):
suffix = 'comment'
output = id_generator(suffix=suffix)
self.assertEqual(True, output.endswith(suffix))
self.assertEqual(self.len_id + len(suffix), len(output))
def test_id_generator_chars(self):
import string # flake8:no qa
chars = string.ascii_uppercase
output = id_generator(chars=chars)
self.assertEqual(output, output.upper())
def test_id_generator_len(self):
len_id = 8
self.assertEqual(len_id, len(id_generator(len_id=len_id)))
| true
| true
|
f708a65e39503b03d6e8b8b84ad95a20f948f77c
| 224
|
py
|
Python
|
example_project/carts/admin.py
|
aino/django-nimda
|
334709c64cb253c0d1b5676850bd2d8ff9b8bea4
|
[
"BSD-3-Clause"
] | null | null | null |
example_project/carts/admin.py
|
aino/django-nimda
|
334709c64cb253c0d1b5676850bd2d8ff9b8bea4
|
[
"BSD-3-Clause"
] | 7
|
2020-06-05T17:01:18.000Z
|
2022-03-11T23:12:34.000Z
|
example_project/carts/admin.py
|
aino/django-nimda
|
334709c64cb253c0d1b5676850bd2d8ff9b8bea4
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Cart, CartItem
class CartItemInline(admin.TabularInline):
model = CartItem
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
inlines = [CartItemInline]
| 18.666667
| 42
| 0.772321
|
from django.contrib import admin
from .models import Cart, CartItem
class CartItemInline(admin.TabularInline):
model = CartItem
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
inlines = [CartItemInline]
| true
| true
|
f708a70e28dc3d5e7d2e9aa1e23cc76bac08c8a2
| 8,167
|
py
|
Python
|
pyflux/arma/tests/test_arima_laplace.py
|
ThomasHoppe/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 2,091
|
2016-04-01T02:52:10.000Z
|
2022-03-29T11:38:15.000Z
|
pyflux/arma/tests/test_arima_laplace.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 160
|
2016-04-26T14:52:18.000Z
|
2022-03-15T02:09:07.000Z
|
pyflux/arma/tests/test_arima_laplace.py
|
EricSchles/pyflux
|
297f2afc2095acd97c12e827dd500e8ea5da0c0f
|
[
"BSD-3-Clause"
] | 264
|
2016-05-02T14:03:31.000Z
|
2022-03-29T07:48:20.000Z
|
import numpy as np
from pyflux.arma import ARIMA
from pyflux.families import Laplace
noise = np.random.normal(0,1,100)
data = np.zeros(100)
for i in range(1,len(data)):
data[i] = 0.9*data[i-1] + noise[i]
def test_no_terms():
"""
Tests an ARIMA model with no AR or MA terms, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=0, ma=0, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 2)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms():
"""
Tests an ARIMA model with 1 AR and 1 MA term and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=1, ma=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
"""
Tests an ARIMA model with 1 AR and 1 MA term, integrated once, and that
the latent variable list length is correct, and that the estimated
latent variables are not nan
"""
model = ARIMA(data=data, ar=1, ma=1, integ=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
"""
Tests that the prediction dataframe length is equal to the number of steps h
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict(h=5).shape[0] == 5)
def test_predict_is_length():
"""
Tests that the prediction IS dataframe length is equal to the number of steps h
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
"""
Tests that the predictions are not nans
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_predict_is_nans():
"""
Tests that the in-sample predictions are not nans
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
"""
We should not really have predictions that are constant (should be some difference)...
This captures bugs with the predict function not iterating forward
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
"""
Tests prediction intervals are ordered correctly
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
"""
Tests sampling function
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-2)
def test_ppc():
"""
Tests PPC value
"""
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| 42.536458
| 113
| 0.691564
|
import numpy as np
from pyflux.arma import ARIMA
from pyflux.families import Laplace
noise = np.random.normal(0,1,100)
data = np.zeros(100)
for i in range(1,len(data)):
data[i] = 0.9*data[i-1] + noise[i]
def test_no_terms():
model = ARIMA(data=data, ar=0, ma=0, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 2)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms():
model = ARIMA(data=data, ar=1, ma=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_couple_terms_integ():
model = ARIMA(data=data, ar=1, ma=1, integ=1, family=Laplace())
x = model.fit()
assert(len(model.latent_variables.z_list) == 4)
lvs = np.array([i.value for i in model.latent_variables.z_list])
assert(len(lvs[np.isnan(lvs)]) == 0)
def test_predict_length():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict(h=5).shape[0] == 5)
def test_predict_is_length():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(model.predict_is(h=5).shape[0] == 5)
def test_predict_nans():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict(h=5).values[np.isnan(model.predict(h=5).values)]) == 0)
def test_predict_is_nans():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
assert(len(model.predict_is(h=5).values[np.isnan(model.predict_is(h=5).values)]) == 0)
def test_predict_nonconstant():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_is_nonconstant():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=False)
assert(not np.all(predictions.values==predictions.values[0]))
def test_predict_intervals():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit()
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_bbvi():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_bbvi():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_intervals_mh():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_predict_is_intervals_mh():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('M-H', nsims=200, quiet_progress=True)
predictions = model.predict_is(h=10, intervals=True)
assert(np.all(predictions['99% Prediction Interval'].values > predictions['95% Prediction Interval'].values))
assert(np.all(predictions['95% Prediction Interval'].values > predictions[model.data_name].values))
assert(np.all(predictions[model.data_name].values > predictions['5% Prediction Interval'].values))
assert(np.all(predictions['5% Prediction Interval'].values > predictions['1% Prediction Interval'].values))
def test_sample_model():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
sample = model.sample(nsims=100)
assert(sample.shape[0]==100)
assert(sample.shape[1]==len(data)-2)
def test_ppc():
model = ARIMA(data=data, ar=2, ma=2, family=Laplace())
x = model.fit('BBVI', iterations=100, quiet_progress=True)
p_value = model.ppc()
assert(0.0 <= p_value <= 1.0)
| true
| true
|
f708a81d6a696ba3b827ec15b66d87f5d15958c5
| 2,048
|
py
|
Python
|
kernel/examples/handler/component/vert_secureboost.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 39
|
2021-10-12T01:43:27.000Z
|
2022-03-28T04:46:35.000Z
|
kernel/examples/handler/component/vert_secureboost.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 6
|
2021-10-14T02:11:47.000Z
|
2022-03-23T02:41:50.000Z
|
kernel/examples/handler/component/vert_secureboost.py
|
rinceyuan/WeFe
|
8482cb737cb7ba37b2856d184cd42c1bd35a6318
|
[
"Apache-2.0"
] | 10
|
2021-10-14T09:36:03.000Z
|
2022-02-10T11:05:12.000Z
|
# Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.python.utils import log_utils
from kernel.components.boosting.param import BoostingTreeParam
from kernel.examples.handler.component.component_base import Component
from kernel.examples.handler.interface import Input
from kernel.examples.handler.interface import Output
LOGGER = log_utils.get_logger()
class VertSecureBoost(Component, BoostingTreeParam):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
# print(self.name)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
BoostingTreeParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "VertSecureBoost"
self._param_name = "BoostingTreeParam"
| 39.384615
| 74
| 0.751953
|
from common.python.utils import log_utils
from kernel.components.boosting.param import BoostingTreeParam
from kernel.examples.handler.component.component_base import Component
from kernel.examples.handler.interface import Input
from kernel.examples.handler.interface import Output
LOGGER = log_utils.get_logger()
class VertSecureBoost(Component, BoostingTreeParam):
def __init__(self, **kwargs):
Component.__init__(self, **kwargs)
LOGGER.debug(f"{self.name} component created")
new_kwargs = self.erase_component_base_param(**kwargs)
BoostingTreeParam.__init__(self, **new_kwargs)
self.input = Input(self.name, data_type="multi")
self.output = Output(self.name)
self._module_name = "VertSecureBoost"
self._param_name = "BoostingTreeParam"
| true
| true
|
f708a978e4ac5b441e8bee7b68aea9849977d69b
| 2,239
|
py
|
Python
|
src/attendance.py
|
Subdue0/pyqt5-demo
|
aae13e1ab2ffcb2383303028a9c0dd3e3e153d38
|
[
"MIT"
] | null | null | null |
src/attendance.py
|
Subdue0/pyqt5-demo
|
aae13e1ab2ffcb2383303028a9c0dd3e3e153d38
|
[
"MIT"
] | null | null | null |
src/attendance.py
|
Subdue0/pyqt5-demo
|
aae13e1ab2ffcb2383303028a9c0dd3e3e153d38
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QApplication
from Page.page import Page
class Attendance(Page):
def __init__(self, parent=None):
super(Page, self).__init__(parent)
self.setupUi(self)
self.getDataFromDB()
self.setRowHeader(self.row_sum)
self.field = ['编号', '姓名', '迟到', '早退', '病假', '事假', '旷工']
self.setColumnHeader(self.field)
self.col_sum = self.tableWidget.columnCount()
self.setItemColorAlignment()
self.initFormDate()
self.initSearchField()
self.setNumNameUneditable()
self.setFormStyleSheet()
self.createContextMenu()
self.history_record = {'add': [], 'del': [], 'update': {}}
self.submit.setEnabled(False)
# 初始化单元格改变信号标志
self.cell_changed_flag = False
# 初始化当前页面为1
self.form_cur_page_num = 1
# 统计下设置的行数可以分成多少页
row_sum = self.tableWidget.rowCount()
if row_sum%10:
self.form_page_total = int(row_sum/10) + 1
else:
self.form_page_total = int(row_sum/10)
# 初始化分页栏
self.initFormPageBar()
# 表格分页显示
self.pageBlockDisplay()
# 初始化信号连接
self.signalConnection()
'''获取数据'''
def getDataFromDB(self):
try:
self.connectDB()
self.cursor.execute('''
select Eno,Ename,Esex,Eage,Etel,Eedu,Dname,Pname,Eid,Intime,Gradu,Eaddr,Resume
from Employee,Department,Post
where Employee.Dno=Department.Dno
and Employee.Pno=Post.Pno
''')
self.row = self.cursor.fetchall()
self.row_sum = len(self.row)
except Exception as e:
print('getDataFromDB():\n'+repr(e))
sys.exit(-1)
'''初始化表格数据'''
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
'''初始化表格数据'''
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
attendance = Attendance()
attendance.show()
sys.exit(app.exec_())
| 22.846939
| 86
| 0.678428
|
from PyQt5.QtWidgets import QApplication
from Page.page import Page
class Attendance(Page):
def __init__(self, parent=None):
super(Page, self).__init__(parent)
self.setupUi(self)
self.getDataFromDB()
self.setRowHeader(self.row_sum)
self.field = ['编号', '姓名', '迟到', '早退', '病假', '事假', '旷工']
self.setColumnHeader(self.field)
self.col_sum = self.tableWidget.columnCount()
self.setItemColorAlignment()
self.initFormDate()
self.initSearchField()
self.setNumNameUneditable()
self.setFormStyleSheet()
self.createContextMenu()
self.history_record = {'add': [], 'del': [], 'update': {}}
self.submit.setEnabled(False)
self.cell_changed_flag = False
self.form_cur_page_num = 1
row_sum = self.tableWidget.rowCount()
if row_sum%10:
self.form_page_total = int(row_sum/10) + 1
else:
self.form_page_total = int(row_sum/10)
self.initFormPageBar()
self.pageBlockDisplay()
self.signalConnection()
def getDataFromDB(self):
try:
self.connectDB()
self.cursor.execute('''
select Eno,Ename,Esex,Eage,Etel,Eedu,Dname,Pname,Eid,Intime,Gradu,Eaddr,Resume
from Employee,Department,Post
where Employee.Dno=Department.Dno
and Employee.Pno=Post.Pno
''')
self.row = self.cursor.fetchall()
self.row_sum = len(self.row)
except Exception as e:
print('getDataFromDB():\n'+repr(e))
sys.exit(-1)
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
def initFormDate(self):
for each_row in range(self.row_sum):
for each_col in range(self.col_sum):
if self.row[each_row][each_col]:
item_text = str(self.row[each_row][each_col])
self.tableWidget.item(each_row, each_col).setText(item_text)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
attendance = Attendance()
attendance.show()
sys.exit(app.exec_())
| true
| true
|
f708a9b201c27dea1e1329bdb0da07f50d3fea38
| 22
|
py
|
Python
|
battleship/version.py
|
nickknudsen/battleship-python
|
788cf76c3349200f8a1e15f49ee2eee74fdb6e86
|
[
"MIT"
] | 315
|
2016-12-29T17:42:39.000Z
|
2022-03-24T03:57:41.000Z
|
battleship/version.py
|
nickknudsen/battleship-python
|
788cf76c3349200f8a1e15f49ee2eee74fdb6e86
|
[
"MIT"
] | 147
|
2017-01-19T17:45:08.000Z
|
2022-03-31T15:00:29.000Z
|
battleship/version.py
|
nickknudsen/battleship-python
|
788cf76c3349200f8a1e15f49ee2eee74fdb6e86
|
[
"MIT"
] | 112
|
2016-12-29T12:56:52.000Z
|
2022-03-16T08:05:49.000Z
|
__version__ = "1.3.2"
| 11
| 21
| 0.636364
|
__version__ = "1.3.2"
| true
| true
|
f708aa27c15636ec86e4c678122b23f286f52bb0
| 3,204
|
py
|
Python
|
day15/day15.py
|
Daggy1234/AOC2020
|
4ee5cebb6640540f8a3b20e7c7ea37196d4c4ce9
|
[
"MIT"
] | 1
|
2021-01-17T17:59:19.000Z
|
2021-01-17T17:59:19.000Z
|
day15/day15.py
|
Daggy1234/AOC2020
|
4ee5cebb6640540f8a3b20e7c7ea37196d4c4ce9
|
[
"MIT"
] | null | null | null |
day15/day15.py
|
Daggy1234/AOC2020
|
4ee5cebb6640540f8a3b20e7c7ea37196d4c4ce9
|
[
"MIT"
] | null | null | null |
import functools
import time
def timer(function):
@functools.wraps(function)
def wrapper(*args, **kwargs) -> float:
time_list = []
ans = None
for i in range(0, 10):
start = time.perf_counter()
ans = function(*args, **kwargs)
end = time.perf_counter()
time_list.append(end - start)
i += 0
return [round((sum(time_list) / len(time_list)) * 1000, 2),ans]
return wrapper
def sol_a(input: list,num: int):
m = num
last =input[len(input)-1]
turns = dict()
for i, val in enumerate(input):
turns[str(i)]= val
def get_turn_diff(val) -> int:
rev = [val for val in val["data"].values()][::-1]
ans = []
for i,ansa in enumerate(rev):
if ansa == val["val"]:
ans.append(len(rev)-i)
if len(ans) == 2:
break
return ans[0] - ans[1]
for i in range(len(input),m):
prev = last
cc = [val for val in turns.values()].count(prev)
if 1 == cc:
turns[str(i)] = 0
last = 0
else:
dtp = {"val": prev, "data": turns}
out = get_turn_diff(dtp)
turns[str(i)] = out
last = out
return turns[str(m-1)]
@timer
def sol_b(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n]["count"] += 1
indexer[n]["index"].append(d)
except KeyError:
indexer[n] = {"index": [d], "count": 1}
for i,val in enumerate(input):
try:
l = indexer[str(val)]
continue
except KeyError:
indexer[str(val)] = {"index": [i], "count": 1}
for i in range(len(input),times):
try:
if indexer[str(last)]["count"] == 1:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]["index"][::-1]
last = indexes[0] - indexes[1]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
#print(indexer)
return last
def sol_c(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n] = (indexer[n][1],d)
except KeyError:
indexer[n] = (None,d)
for i,val in enumerate(input):
indexer[str(val)] = (None,i)
for i in range(len(input),times):
try:
if indexer[str(last)][0] == None:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]
last = indexes[1] - indexes[0]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
#print(indexer)
return last
input = [0, 13, 1, 8, 6, 15]
print(f"Sol A: {sol_a(input, 2020)[0]}ms")
print(f"Sol B: {sol_b(input, 2020)[0]}ms")
print(f"Sol A: {sol_c(input, 2020)[0]}ms")
print(f"2020 answer: {sol_c(input,2020)[1]}")
print(f"2020 answer: {sol_c(input,30000000)[1]}")
| 26.7
| 71
| 0.485955
|
import functools
import time
def timer(function):
@functools.wraps(function)
def wrapper(*args, **kwargs) -> float:
time_list = []
ans = None
for i in range(0, 10):
start = time.perf_counter()
ans = function(*args, **kwargs)
end = time.perf_counter()
time_list.append(end - start)
i += 0
return [round((sum(time_list) / len(time_list)) * 1000, 2),ans]
return wrapper
def sol_a(input: list,num: int):
m = num
last =input[len(input)-1]
turns = dict()
for i, val in enumerate(input):
turns[str(i)]= val
def get_turn_diff(val) -> int:
rev = [val for val in val["data"].values()][::-1]
ans = []
for i,ansa in enumerate(rev):
if ansa == val["val"]:
ans.append(len(rev)-i)
if len(ans) == 2:
break
return ans[0] - ans[1]
for i in range(len(input),m):
prev = last
cc = [val for val in turns.values()].count(prev)
if 1 == cc:
turns[str(i)] = 0
last = 0
else:
dtp = {"val": prev, "data": turns}
out = get_turn_diff(dtp)
turns[str(i)] = out
last = out
return turns[str(m-1)]
@timer
def sol_b(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n]["count"] += 1
indexer[n]["index"].append(d)
except KeyError:
indexer[n] = {"index": [d], "count": 1}
for i,val in enumerate(input):
try:
l = indexer[str(val)]
continue
except KeyError:
indexer[str(val)] = {"index": [i], "count": 1}
for i in range(len(input),times):
try:
if indexer[str(last)]["count"] == 1:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]["index"][::-1]
last = indexes[0] - indexes[1]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
return last
def sol_c(input: list,times: int):
indexer = dict()
last = input[len(input)-1]
def set_index(n,d):
try:
indexer[n] = (indexer[n][1],d)
except KeyError:
indexer[n] = (None,d)
for i,val in enumerate(input):
indexer[str(val)] = (None,i)
for i in range(len(input),times):
try:
if indexer[str(last)][0] == None:
set_index(str(0),i)
last = 0
else:
indexes = indexer[str(last)]
last = indexes[1] - indexes[0]
set_index(str(last),i)
except KeyError:
set_index(str(last),i)
return last
input = [0, 13, 1, 8, 6, 15]
print(f"Sol A: {sol_a(input, 2020)[0]}ms")
print(f"Sol B: {sol_b(input, 2020)[0]}ms")
print(f"Sol A: {sol_c(input, 2020)[0]}ms")
print(f"2020 answer: {sol_c(input,2020)[1]}")
print(f"2020 answer: {sol_c(input,30000000)[1]}")
| true
| true
|
f708aaad810d4f1c0c9891020fc2450dbb9ec8db
| 1,849
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/imaging/scripts/plot.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/imaging/scripts/plot.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/imaging/scripts/plot.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple script to plot waveforms in one or more files.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
from argparse import ArgumentParser
from obspy import Stream, __version__, read
from obspy.core.util.base import ENTRY_POINTS
from obspy.core.util.misc import MatplotlibBackend
def main(argv=None):
parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip())
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
help='Waveform format.')
parser.add_argument('-o', '--outfile',
help='Output filename.')
parser.add_argument('-n', '--no-automerge', dest='automerge',
action='store_false',
help='Disable automatic merging of matching channels.')
parser.add_argument('--full', dest='full', action='store_true',
help='Disable min/max-plot, i.e. always plot every '
'single sample (Stream.plot(..., method="full"), '
'for interactive zooming).')
parser.add_argument('files', nargs='+',
help='Files to plot.')
args = parser.parse_args(argv)
if args.outfile is not None:
MatplotlibBackend.switch_backend("AGG", sloppy=False)
st = Stream()
for f in args.files:
st += read(f, format=args.format)
kwargs = {"outfile": args.outfile,
"automerge": args.automerge}
if args.full:
kwargs['method'] = "full"
st.plot(**kwargs)
if __name__ == "__main__":
main()
| 36.254902
| 79
| 0.592212
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import *
from argparse import ArgumentParser
from obspy import Stream, __version__, read
from obspy.core.util.base import ENTRY_POINTS
from obspy.core.util.misc import MatplotlibBackend
def main(argv=None):
parser = ArgumentParser(prog='obspy-plot', description=__doc__.strip())
parser.add_argument('-V', '--version', action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-f', '--format', choices=ENTRY_POINTS['waveform'],
help='Waveform format.')
parser.add_argument('-o', '--outfile',
help='Output filename.')
parser.add_argument('-n', '--no-automerge', dest='automerge',
action='store_false',
help='Disable automatic merging of matching channels.')
parser.add_argument('--full', dest='full', action='store_true',
help='Disable min/max-plot, i.e. always plot every '
'single sample (Stream.plot(..., method="full"), '
'for interactive zooming).')
parser.add_argument('files', nargs='+',
help='Files to plot.')
args = parser.parse_args(argv)
if args.outfile is not None:
MatplotlibBackend.switch_backend("AGG", sloppy=False)
st = Stream()
for f in args.files:
st += read(f, format=args.format)
kwargs = {"outfile": args.outfile,
"automerge": args.automerge}
if args.full:
kwargs['method'] = "full"
st.plot(**kwargs)
if __name__ == "__main__":
main()
| true
| true
|
f708aaf2d466e75e865e8659b2df3dd9d95762c0
| 7,025
|
py
|
Python
|
code/plotting/plot_evalrep.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | 1
|
2019-08-27T10:05:41.000Z
|
2019-08-27T10:05:41.000Z
|
code/plotting/plot_evalrep.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | null | null | null |
code/plotting/plot_evalrep.py
|
modichirag/21cm_cleaning
|
1615fea4e2d617bb6ef00770a49698901227daa8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Plots the power spectra and Fourier-space biases for the HI.
#
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from lab import mapbias as mapp
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
#
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
print(font)
#
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument('-m', '--model', help='model name to use')
parser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=128, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.01, type=float)
args = parser.parse_args()
figpath = './figs/'
bs, nc, aa = args.bs, args.nmesh, args.aa
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, kmin, ang)
dpath += 'L%04d-N%04d/'%(bs, nc)
################
def make_rep_plot():
"""Does the work of making the real-space xi(r) and b(r) figure."""
noises = np.loadtxt('/global/u1/c/chmodi/Programs/21cm/21cm_cleaning/data/summaryHI.txt').T
for i in range(noises[0].size):
if noises[0][i] == np.round(1/aa-1, 2): noise = noises[3][i]
print(noise)
datap = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap')
dataprsd = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap')
try:
datapup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap_up')
dataprsdup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap_up')
except Exception as e: print(e)
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
def makeplot(bfit, datapp, lss, lww, cc, lbl=None):
rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]
ax[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)
ax[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)
#fits
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
print(bpaths)
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datap
lss, lww, cc, lbl = '-', 2, 'C0', 'Fid'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C1', 'Up1'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C2', 'Up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
#rsd
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsd
lss, lww, cc, lbl = '--', 2, 'C0', 'rsd'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C1', 'rsd up'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C2', 'rsd up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
ax[0].set_ylabel('$r_{cc}$', fontdict=font)
ax[1].set_ylabel(r'$\sqrt{P_{\rm mod}/P_{hh}}$', fontdict=font)
for axis in ax:
axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
axis.legend(prop=fontmanage)
# Put on some more labels.
for axis in ax:
axis.set_xscale('log')
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
##and finish
plt.tight_layout(rect=[0, 0, 1, 0.95])
if rank == 0: plt.savefig(figpath + '/rep_L%04d_%04d.pdf'%(bs, aa*10000))
################
if __name__=="__main__":
make_rep_plot()
#
| 36.21134
| 132
| 0.611103
|
import numpy as np
import os, sys
import matplotlib.pyplot as plt
from pmesh.pm import ParticleMesh
from scipy.interpolate import InterpolatedUnivariateSpline as ius
from nbodykit.lab import BigFileMesh, BigFileCatalog, FFTPower
from nbodykit.cosmology import Planck15, EHPower, Cosmology
sys.path.append('../utils/')
sys.path.append('../recon/')
sys.path.append('../recon/cosmo4d/')
from lab import mapbias as mapp
from lab import report as rp
from lab import dg
from getbiasparams import getbias
import tools
from matplotlib import rc, rcParams, font_manager
rcParams['font.family'] = 'serif'
fsize = 12
fontmanage = font_manager.FontProperties(family='serif', style='normal',
size=fsize, weight='normal', stretch='normal')
font = {'family': fontmanage.get_family()[0],
'style': fontmanage.get_style(),
'weight': fontmanage.get_weight(),
'size': fontmanage.get_size(),
}
print(font)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aa', help='scale factor', default=0.3333, type=float)
parser.add_argument('-l', '--bs', help='boxsize', default=256, type=float)
parser.add_argument('-n', '--nmesh', help='nmesh', default=128, type=int)
parser.add_argument('-t', '--angle', help='angle of the wedge', default=50, type=float)
parser.add_argument('-k', '--kmin', help='kmin of the wedge', default=0.01, type=float)
args = parser.parse_args()
figpath = './figs/'
bs, nc, aa = args.bs, args.nmesh, args.aa
zz = 1/aa- 1
kmin = args.kmin
ang = args.angle
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
dpath = '/global/cscratch1/sd/chmodi/m3127/21cm_cleaning/recon/fastpm_%0.4f/wedge_kmin%0.2f_ang%0.1f/'%(aa, kmin, ang)
dpath += 'L%04d-N%04d/'%(bs, nc)
).T
for i in range(noises[0].size):
if noises[0][i] == np.round(1/aa-1, 2): noise = noises[3][i]
print(noise)
datap = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap')
dataprsd = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap')
try:
datapup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier/datap_up')
dataprsdup = mapp.Observable.load(dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/datap_up')
except Exception as e: print(e)
fig, ax = plt.subplots(1, 2, figsize=(9, 4))
def makeplot(bfit, datapp, lss, lww, cc, lbl=None):
rpfit = rp.evaluate1(bfit, datapp, field='mapp')[:-2]
ax[0].plot(rpfit[0]['k'], rpfit[0]['power']/(rpfit[1]['power']*rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc, label=lbl)
ax[1].plot(rpfit[0]['k'], (rpfit[1]['power']/rpfit[2]['power'])**0.5, ls=lss, lw=lww, color=cc)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
print(bpaths)
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datap
lss, lww, cc, lbl = '-', 2, 'C0', 'Fid'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C1', 'Up1'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = datapup
lss, lww, cc, lbl = '-', 2, 'C2', 'Up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/%d-0.00/'%(nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsd
lss, lww, cc, lbl = '--', 2, 'C0', 'rsd'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample1/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C1', 'rsd up'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
try:
basepath = dpath+'ZA/opt_s999_h1massA_fourier_rsdpos/upsample2/%d-0.00/'%(2*nc)
bpaths = [basepath+'/best-fit'] + [basepath + '/%04d/fit_p/'%i for i in range(100, -1, -20)]
for path in bpaths:
if os.path.isdir(path): break
print(path)
bfit = mapp.Observable.load(path)
datapp = dataprsdup
lss, lww, cc, lbl = '--', 2, 'C2', 'rsd up2'
makeplot(bfit, datapp, lss, lww, cc, lbl)
print('%s done'%lbl)
except Exception as e: print(e)
ax[0].set_ylabel('$r_{cc}$', fontdict=font)
ax[1].set_ylabel(r'$\sqrt{P_{\rm mod}/P_{hh}}$', fontdict=font)
for axis in ax:
axis.set_xlabel(r'$k\quad [h\,{\rm Mpc}^{-1}]$', fontdict=font)
axis.set_xscale('log')
axis.grid(which='both', lw=0.2, alpha=0.2, color='gray')
axis.legend(prop=fontmanage)
for axis in ax:
axis.set_xscale('log')
for tick in axis.xaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
for tick in axis.yaxis.get_major_ticks():
tick.label.set_fontproperties(fontmanage)
ght_layout(rect=[0, 0, 1, 0.95])
if rank == 0: plt.savefig(figpath + '/rep_L%04d_%04d.pdf'%(bs, aa*10000))
| true
| true
|
f708ab43517a33dfe1a59f1d6d385c2e637e41da
| 4,892
|
py
|
Python
|
designate/tests/test_backend/test_nsd4.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
designate/tests/test_backend/test_nsd4.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
designate/tests/test_backend/test_nsd4.py
|
kiall/designate-py3
|
2b135d64bb0ced77327a563e037b270d1e5ca308
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Artom Lifshitz <artom.lifshitz@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import ssl
import eventlet
import fixtures
from mock import MagicMock
from designate import exceptions
from designate import objects
from designate.tests.test_backend import BackendTestCase
from designate.tests import resources
from designate.backend import impl_nsd4
class NSD4ServerStub:
recved_command = None
response = 'ok'
keyfile = os.path.join(resources.path, 'ssl', 'nsd_server.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_server.pem')
def handle(self, client_sock, client_addr):
stream = client_sock.makefile()
self.recved_command = stream.readline()
stream.write(self.response)
stream.flush()
def start(self):
self.port = 1025
while True:
try:
eventlet.spawn_n(eventlet.serve,
eventlet.wrap_ssl(
eventlet.listen(('127.0.0.1', self.port)),
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True),
self.handle)
break
except socket.error:
self.port = self.port + 1
def stop(self):
eventlet.StopServe()
class NSD4Fixture(fixtures.Fixture):
def setUp(self):
super(NSD4Fixture, self).setUp()
self.server = NSD4ServerStub()
self.server.start()
self.addCleanup(self.tearDown)
def tearDown(self):
self.server.stop()
# NOTE: We'll only test the specifics to the nsd4 backend here.
# Rest is handled via scenarios
class NSD4BackendTestCase(BackendTestCase):
def setUp(self):
super(NSD4BackendTestCase, self).setUp()
self.server_fixture = NSD4Fixture()
self.useFixture(self.server_fixture)
keyfile = os.path.join(resources.path, 'ssl', 'nsd_control.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_control.pem')
self.target = objects.PoolTarget.from_dict({
'id': '4588652b-50e7-46b9-b688-a9bad40a873e',
'type': 'nsd4',
'masters': [{'host': '192.0.2.1', 'port': 53},
{'host': '192.0.2.2', 'port': 35}],
'options': [
{'key': 'keyfile', 'value': keyfile},
{'key': 'certfile', 'value': certfile},
{'key': 'pattern', 'value': 'test-pattern'},
{'key': 'port', 'value': self.server_fixture.server.port}
],
})
self.backend = impl_nsd4.NSD4Backend(self.target)
def test_create_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
command = 'NSDCT1 addzone %s test-pattern\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_delete_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.delete_domain(context, domain)
command = 'NSDCT1 delzone %s\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_server_not_ok(self):
self.server_fixture.server.response = 'goat'
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_ssl_error(self):
self.backend._command = MagicMock(side_effect=ssl.SSLError)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_socket_error(self):
self.backend._command = MagicMock(side_effect=socket.error)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
| 35.194245
| 79
| 0.613042
|
import os
import socket
import ssl
import eventlet
import fixtures
from mock import MagicMock
from designate import exceptions
from designate import objects
from designate.tests.test_backend import BackendTestCase
from designate.tests import resources
from designate.backend import impl_nsd4
class NSD4ServerStub:
recved_command = None
response = 'ok'
keyfile = os.path.join(resources.path, 'ssl', 'nsd_server.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_server.pem')
def handle(self, client_sock, client_addr):
stream = client_sock.makefile()
self.recved_command = stream.readline()
stream.write(self.response)
stream.flush()
def start(self):
self.port = 1025
while True:
try:
eventlet.spawn_n(eventlet.serve,
eventlet.wrap_ssl(
eventlet.listen(('127.0.0.1', self.port)),
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True),
self.handle)
break
except socket.error:
self.port = self.port + 1
def stop(self):
eventlet.StopServe()
class NSD4Fixture(fixtures.Fixture):
def setUp(self):
super(NSD4Fixture, self).setUp()
self.server = NSD4ServerStub()
self.server.start()
self.addCleanup(self.tearDown)
def tearDown(self):
self.server.stop()
# Rest is handled via scenarios
class NSD4BackendTestCase(BackendTestCase):
def setUp(self):
super(NSD4BackendTestCase, self).setUp()
self.server_fixture = NSD4Fixture()
self.useFixture(self.server_fixture)
keyfile = os.path.join(resources.path, 'ssl', 'nsd_control.key')
certfile = os.path.join(resources.path, 'ssl', 'nsd_control.pem')
self.target = objects.PoolTarget.from_dict({
'id': '4588652b-50e7-46b9-b688-a9bad40a873e',
'type': 'nsd4',
'masters': [{'host': '192.0.2.1', 'port': 53},
{'host': '192.0.2.2', 'port': 35}],
'options': [
{'key': 'keyfile', 'value': keyfile},
{'key': 'certfile', 'value': certfile},
{'key': 'pattern', 'value': 'test-pattern'},
{'key': 'port', 'value': self.server_fixture.server.port}
],
})
self.backend = impl_nsd4.NSD4Backend(self.target)
def test_create_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.create_domain(context, domain)
command = 'NSDCT1 addzone %s test-pattern\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_delete_domain(self):
context = self.get_context()
domain = self.get_domain_fixture()
self.backend.delete_domain(context, domain)
command = 'NSDCT1 delzone %s\n' % domain['name']
self.assertEqual(command, self.server_fixture.server.recved_command)
def test_server_not_ok(self):
self.server_fixture.server.response = 'goat'
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_ssl_error(self):
self.backend._command = MagicMock(side_effect=ssl.SSLError)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
def test_socket_error(self):
self.backend._command = MagicMock(side_effect=socket.error)
context = self.get_context()
domain = self.get_domain_fixture()
self.assertRaises(exceptions.Backend,
self.backend.create_domain,
context, domain)
| true
| true
|
f708ad4593250f5cec6499371c6a716dfeb0eb8b
| 1,603
|
py
|
Python
|
generic_link_tracking/migrations/0001_initial.py
|
jonatron/django_generic_links
|
71c720b47380a665973543ef69109d34015e5069
|
[
"MIT"
] | null | null | null |
generic_link_tracking/migrations/0001_initial.py
|
jonatron/django_generic_links
|
71c720b47380a665973543ef69109d34015e5069
|
[
"MIT"
] | null | null | null |
generic_link_tracking/migrations/0001_initial.py
|
jonatron/django_generic_links
|
71c720b47380a665973543ef69109d34015e5069
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GenericLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('where', models.CharField(default=b'', max_length=200, blank=True)),
('url', models.URLField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('show_in_admin', models.BooleanField(default=True)),
('rotate', models.CharField(max_length=100, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GenericLinkClick',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('created', models.DateTimeField(auto_now_add=True)),
('link', models.ForeignKey(to='generic_link_tracking.GenericLink')),
],
options={
},
bases=(models.Model,),
),
]
| 37.27907
| 114
| 0.561447
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GenericLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('where', models.CharField(default=b'', max_length=200, blank=True)),
('url', models.URLField(max_length=255)),
('created', models.DateTimeField(auto_now_add=True)),
('show_in_admin', models.BooleanField(default=True)),
('rotate', models.CharField(max_length=100, blank=True)),
('object_id', models.PositiveIntegerField(null=True, blank=True)),
('content_type', models.ForeignKey(blank=True, to='contenttypes.ContentType', null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GenericLinkClick',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField()),
('created', models.DateTimeField(auto_now_add=True)),
('link', models.ForeignKey(to='generic_link_tracking.GenericLink')),
],
options={
},
bases=(models.Model,),
),
]
| true
| true
|
f708ad495ce75529097465bc672b71eaac2e14bc
| 12,548
|
py
|
Python
|
custom_components/heartbeat/sensor.py
|
HausNet/hausmon-hass
|
3342fa4d01d3962ea6b07a143d64ffc61d07db05
|
[
"MIT"
] | null | null | null |
custom_components/heartbeat/sensor.py
|
HausNet/hausmon-hass
|
3342fa4d01d3962ea6b07a143d64ffc61d07db05
|
[
"MIT"
] | null | null | null |
custom_components/heartbeat/sensor.py
|
HausNet/hausmon-hass
|
3342fa4d01d3962ea6b07a143d64ffc61d07db05
|
[
"MIT"
] | null | null | null |
"""Support for monitoring the local system for anomalous events."""
from __future__ import annotations
import asyncio
import time
from dataclasses import dataclass
import datetime
import logging
from typing import Any, Dict, Optional, List
import pprint
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorEntity
)
from homeassistant.const import (
CONF_ICON,
CONF_SENSORS,
CONF_ID, CONF_NAME, EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import HomeAssistant, Event
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
CONF_RELATED_ENTITY_ID = "related_entity_id"
CONF_PULSE_MINUTES = "pulse_minutes"
DEFAULT_ICON = "mdi.alarm"
SCAN_INTERVAL_MINUTES = 1
SIGNAL_HEARTBEAT_UPDATE = "heartbeat_update"
# TODO: Make id & name unique
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_RELATED_ENTITY_ID): cv.entity_id,
vol.Required(CONF_PULSE_MINUTES): cv.positive_int,
vol.Required(CONF_ICON, default=DEFAULT_ICON):
cv.icon
}
)
]
)
}
)
@dataclass
class PulseState:
"""Data for a missing pulse sensor."""
# The current state - true => pulse missing, false => pulse present
pulse_missing: bool
# Time by which, if no pulse has been received, the pulse will be
# considered missing.
receipt_deadline: Optional[datetime.datetime]
# Minutes between expected pulses.
pulse_minutes: int
# Related entity that is being monitored.
related_entity_id: str
# Time the state was changed last.
update_time: Optional[datetime.datetime]
# Last exception, if any.
last_exception: Optional[BaseException]
def set_next_deadline(self):
"""Set the next deadline by adding the number of minutes a pulse is
expected in, to the current date/time.
"""
self.receipt_deadline = datetime.datetime.now() + \
datetime.timedelta(minutes=self.pulse_minutes)
# noinspection PyUnusedLocal
# (discovery_info parameter)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: Optional[Any] = None
) -> None:
"""Set up the monitor condition sensors."""
entities: List[BinarySensorEntity] = []
sensor_registry: Dict[str, PulseState] = {}
for sensor_config in config[CONF_SENSORS]:
pulse_minutes = sensor_config[CONF_PULSE_MINUTES]
sensor_id = sensor_config[CONF_ID]
related_entity_id = sensor_config[CONF_RELATED_ENTITY_ID]
sensor_registry[sensor_id] = PulseState(
pulse_missing=False,
receipt_deadline=None,
pulse_minutes=pulse_minutes,
related_entity_id=related_entity_id,
update_time=None,
last_exception=None
)
_LOGGER.debug("Added sensor to registry: %s", sensor_id)
entities.append(PulseMissingSensor(
sensor_config[CONF_ID],
sensor_config[CONF_NAME],
sensor_config[CONF_ICON],
sensor_registry[sensor_id]
))
_LOGGER.debug("Created entity for sensor: %s", sensor_id)
async_add_entities(entities)
await async_manage_sensor_registry_updates(
hass,
sensor_registry
)
async def async_manage_sensor_registry_updates(
hass: HomeAssistant,
sensor_registry: Dict[str, PulseState]
) -> None:
"""Update the registry and create polling."""
_pulse_data_lock = asyncio.Lock()
_timeout_scheduled = False
def _handle_missing_pulse(sensor_id: str, pulse_state: PulseState) -> bool:
""" Called when pulse goes missing. Returns true if the pulse went
missing since the last time it was received -- i.e. it happened since
the last time it was updated.
"""
_LOGGER.debug(
"Handling missing pulse: "
"sensor=%s, related_entity_id=%s, current_state=%s",
sensor_id,
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
if pulse_state.pulse_missing:
return False
pulse_state.pulse_missing = True
entity_id = pulse_state.related_entity_id
minutes = pulse_state.pulse_minutes
persistent_notification.async_create(
hass,
f"No updates received from '{entity_id}' in {minutes} minutes. ",
title=f"Pulse missing: {sensor_id}",
notification_id=sensor_id + '.' + str(int(time.time()))
)
return True
def _handle_pulse_event(sensor_id: str, pulse_state: PulseState) -> bool:
""" Update a pulse's state when a pulse event is received. Returns
True if the state goes from missing to present.
"""
_LOGGER.debug(
"Handling pulse event received: entity=%s; current_state=%s",
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
state_changed = pulse_state.pulse_missing
pulse_state.pulse_missing = False
now = datetime.datetime.now()
pulse_state.update_time = now
pulse_state.last_exception = None
pulse_state.set_next_deadline()
entity_id = pulse_state.related_entity_id
if state_changed:
persistent_notification.async_create(
hass,
f"Missing pulse from '{entity_id}' resumed. ",
title=f"Pulse resumed: {sensor_id}",
notification_id=sensor_id + str(int(time.time()))
)
return state_changed
async def _set_next_deadline():
"""If a timeout has not been scheduled, schedule one for the closest
receipt_deadline in the future. Does not schedule a timeout if all the
pulses have gone missing.
Note that the callback timer's resolution is seconds, so 1 is added to
the timeout to avoid timeout times of zero.
"""
async with _pulse_data_lock:
nonlocal _timeout_scheduled
if _timeout_scheduled:
return
next_timeout: Optional[datetime.datetime] = None
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
if pulse_state.receipt_deadline < now:
continue
if next_timeout is None:
next_timeout = pulse_state.receipt_deadline
continue
if pulse_state.receipt_deadline < next_timeout:
next_timeout = pulse_state.receipt_deadline
if next_timeout is None:
_LOGGER.debug("No next timeout found")
return
_LOGGER.debug(
"Setting next pulse timeout: scheduled=%s",
next_timeout
)
_timeout_scheduled = True
next_timeout_seconds = int((next_timeout - now).total_seconds()) + 1
async_call_later(hass, next_timeout_seconds, _pulse_timeout)
# noinspection PyUnusedLocal
# timestamp ignored
async def _pulse_timeout(timestamp: datetime.datetime) -> None:
"""Given the current time, examines each of the sensors, and, if its
receipt_deadline is in the past, handles it as a missing pulse. Then,
sets the next timout.
"""
_LOGGER.debug("Pulse timeout!")
state_changed = False
async with _pulse_data_lock:
nonlocal _timeout_scheduled
_timeout_scheduled = False
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
_LOGGER.debug(
"State: sensor=%s; entity=%s, now=%s; deadline=%s",
sensor_id,
pulse_state.related_entity_id,
now,
pulse_state.receipt_deadline
)
if pulse_state.receipt_deadline < now:
state_changed |= _handle_missing_pulse(
sensor_id,
pulse_state
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
async def _event_to_pulse(event: Event):
"""Event listener, that, when the event's entity corresponds to one
of the sensors' related entities, resets that sensor's timeout. Also
calls _set_next_deadline() to handle the case where all the pulses
have gone missing, and the pulse timout has to be restarted.
"""
_LOGGER.debug("Event listener triggered!")
pp = pprint.PrettyPrinter()
pp.pprint(event)
state_changed: bool = False
async with _pulse_data_lock:
for sensor_id, sensor_data in sensor_registry.items():
_LOGGER.debug(
"Matching event: related_entity_id=%s; event_entity_id=%s",
sensor_data.related_entity_id,
event.data['entity_id']
)
if sensor_data.related_entity_id == event.data['entity_id']:
state_changed |= _handle_pulse_event(sensor_id, sensor_data)
_LOGGER.debug(
"Pulse received: entity_id=%s; state_changed=%s",
event.data['entity_id'],
state_changed
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
# For event_time, passed in by HASS, but not used.
# noinspection PyUnusedLocal
async def _start_pulse_monitor(event_time: datetime.datetime):
"""Start monitoring pulses, and set up the first pulse deadline."""
for sensor_id, pulse_state in sensor_registry.items():
pulse_state.set_next_deadline()
remove_listener = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_event_to_pulse
)
# TODO: Remove
_LOGGER.debug("Event listener installed!")
pp = pprint.PrettyPrinter()
pp.pprint(remove_listener)
await _set_next_deadline()
# Start working once HASS is up.
hass.bus.async_listen(EVENT_HOMEASSISTANT_STARTED, _start_pulse_monitor)
class PulseMissingSensor(BinarySensorEntity):
"""A sensor that turns on when activity was not sensed within a given
time frame.
"""
def __init__(
self,
id_: str,
name: str,
icon: Optional[str],
pulse_state: PulseState
) -> None:
"""Initialize the sensor, with an id, name, and pulse period. Also,
give it access to the sensor data that is collected out of band.
"""
self._name: str = name
self._unique_id: str = id_
self._pulse_state: PulseState = pulse_state
self._icon: str = icon
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Return the unique ID."""
return self._unique_id
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return None
@property
def icon(self) -> Optional[str]:
"""Icon to use in the frontend."""
return self._icon
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def should_poll(self) -> bool:
"""Entity does not poll."""
return False
@property
def data(self) -> PulseState:
"""Return registry entry for the data."""
return self._pulse_state
| 35.749288
| 80
| 0.626474
|
from __future__ import annotations
import asyncio
import time
from dataclasses import dataclass
import datetime
import logging
from typing import Any, Dict, Optional, List
import pprint
import voluptuous as vol
from homeassistant.components import persistent_notification
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorEntity
)
from homeassistant.const import (
CONF_ICON,
CONF_SENSORS,
CONF_ID, CONF_NAME, EVENT_STATE_CHANGED, EVENT_HOMEASSISTANT_STARTED,
)
from homeassistant.core import HomeAssistant, Event
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
CONF_RELATED_ENTITY_ID = "related_entity_id"
CONF_PULSE_MINUTES = "pulse_minutes"
DEFAULT_ICON = "mdi.alarm"
SCAN_INTERVAL_MINUTES = 1
SIGNAL_HEARTBEAT_UPDATE = "heartbeat_update"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_ID): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_RELATED_ENTITY_ID): cv.entity_id,
vol.Required(CONF_PULSE_MINUTES): cv.positive_int,
vol.Required(CONF_ICON, default=DEFAULT_ICON):
cv.icon
}
)
]
)
}
)
@dataclass
class PulseState:
pulse_missing: bool
receipt_deadline: Optional[datetime.datetime]
pulse_minutes: int
related_entity_id: str
update_time: Optional[datetime.datetime]
last_exception: Optional[BaseException]
def set_next_deadline(self):
self.receipt_deadline = datetime.datetime.now() + \
datetime.timedelta(minutes=self.pulse_minutes)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: Optional[Any] = None
) -> None:
entities: List[BinarySensorEntity] = []
sensor_registry: Dict[str, PulseState] = {}
for sensor_config in config[CONF_SENSORS]:
pulse_minutes = sensor_config[CONF_PULSE_MINUTES]
sensor_id = sensor_config[CONF_ID]
related_entity_id = sensor_config[CONF_RELATED_ENTITY_ID]
sensor_registry[sensor_id] = PulseState(
pulse_missing=False,
receipt_deadline=None,
pulse_minutes=pulse_minutes,
related_entity_id=related_entity_id,
update_time=None,
last_exception=None
)
_LOGGER.debug("Added sensor to registry: %s", sensor_id)
entities.append(PulseMissingSensor(
sensor_config[CONF_ID],
sensor_config[CONF_NAME],
sensor_config[CONF_ICON],
sensor_registry[sensor_id]
))
_LOGGER.debug("Created entity for sensor: %s", sensor_id)
async_add_entities(entities)
await async_manage_sensor_registry_updates(
hass,
sensor_registry
)
async def async_manage_sensor_registry_updates(
hass: HomeAssistant,
sensor_registry: Dict[str, PulseState]
) -> None:
_pulse_data_lock = asyncio.Lock()
_timeout_scheduled = False
def _handle_missing_pulse(sensor_id: str, pulse_state: PulseState) -> bool:
_LOGGER.debug(
"Handling missing pulse: "
"sensor=%s, related_entity_id=%s, current_state=%s",
sensor_id,
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
if pulse_state.pulse_missing:
return False
pulse_state.pulse_missing = True
entity_id = pulse_state.related_entity_id
minutes = pulse_state.pulse_minutes
persistent_notification.async_create(
hass,
f"No updates received from '{entity_id}' in {minutes} minutes. ",
title=f"Pulse missing: {sensor_id}",
notification_id=sensor_id + '.' + str(int(time.time()))
)
return True
def _handle_pulse_event(sensor_id: str, pulse_state: PulseState) -> bool:
_LOGGER.debug(
"Handling pulse event received: entity=%s; current_state=%s",
pulse_state.related_entity_id,
pulse_state.pulse_missing
)
state_changed = pulse_state.pulse_missing
pulse_state.pulse_missing = False
now = datetime.datetime.now()
pulse_state.update_time = now
pulse_state.last_exception = None
pulse_state.set_next_deadline()
entity_id = pulse_state.related_entity_id
if state_changed:
persistent_notification.async_create(
hass,
f"Missing pulse from '{entity_id}' resumed. ",
title=f"Pulse resumed: {sensor_id}",
notification_id=sensor_id + str(int(time.time()))
)
return state_changed
async def _set_next_deadline():
async with _pulse_data_lock:
nonlocal _timeout_scheduled
if _timeout_scheduled:
return
next_timeout: Optional[datetime.datetime] = None
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
if pulse_state.receipt_deadline < now:
continue
if next_timeout is None:
next_timeout = pulse_state.receipt_deadline
continue
if pulse_state.receipt_deadline < next_timeout:
next_timeout = pulse_state.receipt_deadline
if next_timeout is None:
_LOGGER.debug("No next timeout found")
return
_LOGGER.debug(
"Setting next pulse timeout: scheduled=%s",
next_timeout
)
_timeout_scheduled = True
next_timeout_seconds = int((next_timeout - now).total_seconds()) + 1
async_call_later(hass, next_timeout_seconds, _pulse_timeout)
async def _pulse_timeout(timestamp: datetime.datetime) -> None:
_LOGGER.debug("Pulse timeout!")
state_changed = False
async with _pulse_data_lock:
nonlocal _timeout_scheduled
_timeout_scheduled = False
now = datetime.datetime.now()
for sensor_id, pulse_state in sensor_registry.items():
_LOGGER.debug(
"State: sensor=%s; entity=%s, now=%s; deadline=%s",
sensor_id,
pulse_state.related_entity_id,
now,
pulse_state.receipt_deadline
)
if pulse_state.receipt_deadline < now:
state_changed |= _handle_missing_pulse(
sensor_id,
pulse_state
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
async def _event_to_pulse(event: Event):
_LOGGER.debug("Event listener triggered!")
pp = pprint.PrettyPrinter()
pp.pprint(event)
state_changed: bool = False
async with _pulse_data_lock:
for sensor_id, sensor_data in sensor_registry.items():
_LOGGER.debug(
"Matching event: related_entity_id=%s; event_entity_id=%s",
sensor_data.related_entity_id,
event.data['entity_id']
)
if sensor_data.related_entity_id == event.data['entity_id']:
state_changed |= _handle_pulse_event(sensor_id, sensor_data)
_LOGGER.debug(
"Pulse received: entity_id=%s; state_changed=%s",
event.data['entity_id'],
state_changed
)
if state_changed:
async_dispatcher_send(hass, SIGNAL_HEARTBEAT_UPDATE)
await _set_next_deadline()
async def _start_pulse_monitor(event_time: datetime.datetime):
for sensor_id, pulse_state in sensor_registry.items():
pulse_state.set_next_deadline()
remove_listener = hass.bus.async_listen(
EVENT_STATE_CHANGED,
_event_to_pulse
)
_LOGGER.debug("Event listener installed!")
pp = pprint.PrettyPrinter()
pp.pprint(remove_listener)
await _set_next_deadline()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STARTED, _start_pulse_monitor)
class PulseMissingSensor(BinarySensorEntity):
def __init__(
self,
id_: str,
name: str,
icon: Optional[str],
pulse_state: PulseState
) -> None:
self._name: str = name
self._unique_id: str = id_
self._pulse_state: PulseState = pulse_state
self._icon: str = icon
@property
def name(self) -> str:
return self._name
@property
def unique_id(self) -> str:
return self._unique_id
@property
def device_class(self) -> Optional[str]:
return None
@property
def icon(self) -> Optional[str]:
return self._icon
@property
def available(self) -> bool:
return True
@property
def should_poll(self) -> bool:
return False
@property
def data(self) -> PulseState:
return self._pulse_state
| true
| true
|
f708ad710b2525cc93f4cc91eba0c88665a4cb0b
| 710
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
Prajwol-Chhetri/recipe-app-api
|
db09cd7dfe27c68253428ae8e36fe125399aba5b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
Prajwol-Chhetri/recipe-app-api
|
db09cd7dfe27c68253428ae8e36fe125399aba5b
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
Prajwol-Chhetri/recipe-app-api
|
db09cd7dfe27c68253428ae8e36fe125399aba5b
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until the database is avaialable"""
def handle(self, *args, **options):
"""Handle the command"""
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database Available!'))
| 30.869565
| 78
| 0.64507
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database Available!'))
| true
| true
|
f708adf43b497e49417c34ed43afcd34566bbb08
| 3,535
|
py
|
Python
|
matrix_factorization/mf_keras.py
|
ashwanikumar04/ml-recommendation-engine
|
57a7c0d5ac073b976e40c17d8892a4b7291d08ed
|
[
"MIT"
] | null | null | null |
matrix_factorization/mf_keras.py
|
ashwanikumar04/ml-recommendation-engine
|
57a7c0d5ac073b976e40c17d8892a4b7291d08ed
|
[
"MIT"
] | null | null | null |
matrix_factorization/mf_keras.py
|
ashwanikumar04/ml-recommendation-engine
|
57a7c0d5ac073b976e40c17d8892a4b7291d08ed
|
[
"MIT"
] | null | null | null |
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
# df = pd.read_csv("./data/processed_rating.csv")
# N = df["user_idx"].max() + 1
# M = df["isbn_idx"].max() + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["Book-Rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(
# x=[df_train["user_idx"].values, df_train["isbn_idx"].values],
# y=df_train["Book-Rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([df_test["user_idx"].values,
# df_test["isbn_idx"].values], df_test["Book-Rating"].values - mu))
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
df = pd.read_csv("./data/archive/ratings.csv")
# N = len(set(df["user_id"].values)) + 1
# M = len(set(df["book_id"].values)) + 1
# df = shuffle(df)
# cut_off = int(0.8 * len(df))
# df_train = df.iloc[:cut_off]
# df_test = df.iloc[cut_off:]
# K = 15
# mu = df_train["rating"].mean()
# epochs = 15
# reg_penalty = 0.0
# u = Input(shape=(1, ))
# b = Input(shape=(1, ))
# u_embedding = Embedding(N, K, embeddings_regularizer=l2(reg_penalty))(u)
# b_embedding = Embedding(M, K, embeddings_regularizer=l2(reg_penalty))(b)
# u_bias = Embedding(N, 1, embeddings_regularizer=l2(reg_penalty))(u)
# b_bias = Embedding(M, 1, embeddings_regularizer=l2(reg_penalty))(b)
# x = Dot(axes=2)([u_embedding, b_embedding])
# x = Add()([x, u_bias, b_bias])
# x = Flatten()(x)
# model = Model(inputs=[u, b], outputs=x)
# model.compile(loss='mse', optimizer=Adam(lr=0.01), metrics=["mse"])
# r = model.fit(x=[df_train["user_id"].values, df_train["book_id"].values],
# y=df_train["rating"].values - mu,
# epochs=epochs,
# batch_size=128,
# validation_data=([
# df_test["user_id"].values, df_test["book_id"].values
# ], df_test["rating"].values - mu))
# model.save('regression_model.h5')
# plt.plot(r.history['loss'], label="train loss")
# plt.plot(r.history['val_loss'], label="test loss")
# plt.legend()
# plt.show()
def predict(user_id):
model = keras.models.load_model('regression_model.h5')
book_data = np.array(list(set(df.book_id)))
user = np.array([user_id for i in range(len(book_data))])
predictions = model.predict([user, book_data])
predictions = np.array([a[0] for a in predictions])
recommended_book_ids = (-predictions).argsort()[:5]
print(recommended_book_ids)
print(predictions[recommended_book_ids])
predict(1)
| 28.508065
| 89
| 0.655728
|
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Embedding, Dot, Add, Flatten
from tensorflow.keras.regularizers import l2
from tensorflow.keras.optimizers import Adam
df = pd.read_csv("./data/archive/ratings.csv")
def predict(user_id):
model = keras.models.load_model('regression_model.h5')
book_data = np.array(list(set(df.book_id)))
user = np.array([user_id for i in range(len(book_data))])
predictions = model.predict([user, book_data])
predictions = np.array([a[0] for a in predictions])
recommended_book_ids = (-predictions).argsort()[:5]
print(recommended_book_ids)
print(predictions[recommended_book_ids])
predict(1)
| true
| true
|
f708ae2427e8e0e2382e2ff741c4fafc8c35c6ef
| 853
|
py
|
Python
|
Config.py
|
dreasine/Lyrics_generator
|
c742f632b42d8cc0dfa87da2b32f3e4993b0b971
|
[
"MIT"
] | null | null | null |
Config.py
|
dreasine/Lyrics_generator
|
c742f632b42d8cc0dfa87da2b32f3e4993b0b971
|
[
"MIT"
] | null | null | null |
Config.py
|
dreasine/Lyrics_generator
|
c742f632b42d8cc0dfa87da2b32f3e4993b0b971
|
[
"MIT"
] | null | null | null |
#coding:utf-8
class Config(object):
init_scale = 0.04
learning_rate = 0.001
max_grad_norm = 15
num_layers = 3
num_steps = 25 # number of steps to unroll the RNN for
hidden_size = 1000 # size of hidden layer of neurons
iteration = 40
save_freq = 5 #The step (counted by the number of iterations) at which the model is saved to hard disk.
keep_prob = 0.5
batch_size = 32
model_path = './model/Model' #the path of model that need to save or load
#parameters for generation
save_time = 40 #load save_time saved models
is_sample = True #true means using sample, if not using max
is_beams = True #whether or not using beam search
beam_size = 4 #size of beam search
len_of_generation = 10 #The number of characters by generated
start_sentence = u'如果' #the seed sentence to generate text
| 38.772727
| 107
| 0.701055
|
class Config(object):
init_scale = 0.04
learning_rate = 0.001
max_grad_norm = 15
num_layers = 3
num_steps = 25
hidden_size = 1000
iteration = 40
save_freq = 5
keep_prob = 0.5
batch_size = 32
model_path = './model/Model'
save_time = 40
is_sample = True
is_beams = True
beam_size = 4
len_of_generation = 10
start_sentence = u'如果'
| true
| true
|
f708ae898023a88a8c10dbea4d3b4a59b626e0f7
| 5,366
|
py
|
Python
|
TF/TARNN/test_tarnn.py
|
RandolphVI/Question-Difficulty-Prediction
|
77b4b83b5bc747c5074926d7a37545a5d46ed343
|
[
"Apache-2.0"
] | 29
|
2019-03-13T07:31:07.000Z
|
2022-03-21T02:09:32.000Z
|
TF/TARNN/test_tarnn.py
|
RandolphVI/Question-Difficulty-Prediction
|
77b4b83b5bc747c5074926d7a37545a5d46ed343
|
[
"Apache-2.0"
] | 2
|
2020-12-30T02:17:00.000Z
|
2021-04-20T08:59:03.000Z
|
TF/TARNN/test_tarnn.py
|
RandolphVI/Question-Difficulty-Prediction
|
77b4b83b5bc747c5074926d7a37545a5d46ed343
|
[
"Apache-2.0"
] | 11
|
2019-07-21T07:45:11.000Z
|
2022-01-28T09:28:42.000Z
|
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import mean_squared_error, r2_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def test_tarnn():
"""Test TARNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False)
logger.info("Data padding...")
x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len)
# Load tarnn model
OPTION = dh.option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_content = graph.get_operation_by_name("input_x_content").outputs[0]
input_x_question = graph.get_operation_by_name("input_x_question").outputs[0]
input_x_option = graph.get_operation_by_name("input_x_option").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/scores"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-tarnn-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)),
args.batch_size, 1, shuffle=False)
test_counter, test_loss = 0, 0.0
# Collect the predictions here
true_labels = []
predicted_scores = []
for batch_test in batches:
x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test)
feed_dict = {
input_x_content: x_batch_content,
input_x_question: x_batch_question,
input_x_option: x_batch_option,
input_y: y_batch,
dropout_keep_prob: 1.0,
is_training: False
}
batch_scores, cur_loss = sess.run([scores, loss], feed_dict)
# Prepare for calculating metrics
for i in y_batch:
true_labels.append(i)
for j in batch_scores:
predicted_scores.append(j)
test_loss = test_loss + cur_loss
test_counter = test_counter + 1
# Calculate PCC & DOA
pcc, doa = dh.evaluation(true_labels, predicted_scores)
# Calculate RMSE
rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5
r2 = r2_score(true_labels, predicted_scores)
test_loss = float(test_loss / test_counter)
logger.info("All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}"
.format(test_loss, pcc, doa, rmse, r2))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", all_id=test_data.id,
all_labels=true_labels, all_predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_tarnn()
| 39.748148
| 110
| 0.623742
|
__author__ = 'Randolph'
import os
import sys
import time
import logging
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import mean_squared_error, r2_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def test_tarnn():
dh.tab_printer(args, logger)
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args.test_file, args.word2vec_file, data_aug_flag=False)
logger.info("Data padding...")
x_test_content, x_test_question, x_test_option, y_test = dh.pad_data(test_data, args.pad_seq_len)
OPTION = dh.option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
input_x_content = graph.get_operation_by_name("input_x_content").outputs[0]
input_x_question = graph.get_operation_by_name("input_x_question").outputs[0]
input_x_option = graph.get_operation_by_name("input_x_option").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
scores = graph.get_operation_by_name("output/scores").outputs[0]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
output_node_names = "output/scores"
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-tarnn-{0}.pb".format(MODEL), as_text=False)
batches = dh.batch_iter(list(zip(x_test_content, x_test_question, x_test_option, y_test)),
args.batch_size, 1, shuffle=False)
test_counter, test_loss = 0, 0.0
true_labels = []
predicted_scores = []
for batch_test in batches:
x_batch_content, x_batch_question, x_batch_option, y_batch = zip(*batch_test)
feed_dict = {
input_x_content: x_batch_content,
input_x_question: x_batch_question,
input_x_option: x_batch_option,
input_y: y_batch,
dropout_keep_prob: 1.0,
is_training: False
}
batch_scores, cur_loss = sess.run([scores, loss], feed_dict)
for i in y_batch:
true_labels.append(i)
for j in batch_scores:
predicted_scores.append(j)
test_loss = test_loss + cur_loss
test_counter = test_counter + 1
pcc, doa = dh.evaluation(true_labels, predicted_scores)
rmse = mean_squared_error(true_labels, predicted_scores) ** 0.5
r2 = r2_score(true_labels, predicted_scores)
test_loss = float(test_loss / test_counter)
logger.info("All Test Dataset: Loss {0:g} | PCC {1:g} | DOA {2:g} | RMSE {3:g} | R2 {4:g}"
.format(test_loss, pcc, doa, rmse, r2))
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", all_id=test_data.id,
all_labels=true_labels, all_predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_tarnn()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.