hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7143b7edbae191e6924ad8021d9f11a2e53d982 | 2,262 | py | Python | src/oscar/apps/dashboard/app.py | abirafdirp/revania | 70272b842316e8df57b0bc8a0dc669c3af4ec8f9 | [
"BSD-3-Clause"
] | 2 | 2015-12-11T00:19:15.000Z | 2021-11-14T19:44:42.000Z | src/oscar/apps/dashboard/app.py | abirafdirp/revania | 70272b842316e8df57b0bc8a0dc669c3af4ec8f9 | [
"BSD-3-Clause"
] | null | null | null | src/oscar/apps/dashboard/app.py | abirafdirp/revania | 70272b842316e8df57b0bc8a0dc669c3af4ec8f9 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url, include
from oscar.core.application import Application
from oscar.core.loading import get_class
class DashboardApplication(Application):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', include(self.catalogue_app.urls)),
url(r'^reports/', include(self.reports_app.urls)),
url(r'^orders/', include(self.orders_app.urls)),
url(r'^users/', include(self.users_app.urls)),
url(r'^content-blocks/', include(self.promotions_app.urls)),
url(r'^pages/', include(self.pages_app.urls)),
url(r'^partners/', include(self.partners_app.urls)),
url(r'^offers/', include(self.offers_app.urls)),
url(r'^ranges/', include(self.ranges_app.urls)),
url(r'^reviews/', include(self.reviews_app.urls)),
url(r'^vouchers/', include(self.vouchers_app.urls)),
url(r'^comms/', include(self.comms_app.urls)),
url(r'^shipping/', include(self.shipping_app.urls)),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| 46.163265 | 73 | 0.665782 | from django.conf.urls import url, include
from oscar.core.application import Application
from oscar.core.loading import get_class
class DashboardApplication(Application):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', include(self.catalogue_app.urls)),
url(r'^reports/', include(self.reports_app.urls)),
url(r'^orders/', include(self.orders_app.urls)),
url(r'^users/', include(self.users_app.urls)),
url(r'^content-blocks/', include(self.promotions_app.urls)),
url(r'^pages/', include(self.pages_app.urls)),
url(r'^partners/', include(self.partners_app.urls)),
url(r'^offers/', include(self.offers_app.urls)),
url(r'^ranges/', include(self.ranges_app.urls)),
url(r'^reviews/', include(self.reviews_app.urls)),
url(r'^vouchers/', include(self.vouchers_app.urls)),
url(r'^comms/', include(self.comms_app.urls)),
url(r'^shipping/', include(self.shipping_app.urls)),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| true | true |
f7143dd95c98480c7753abe771970d9fae229904 | 20,780 | py | Python | pilosa/orm.py | philoprove/python-pilosa | c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78 | [
"BSD-3-Clause"
] | null | null | null | pilosa/orm.py | philoprove/python-pilosa | c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78 | [
"BSD-3-Clause"
] | null | null | null | pilosa/orm.py | philoprove/python-pilosa | c0edc8d0fe1687b9afd61c8bc4dd236b3c73fb78 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 Pilosa Corp.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#
import json
from .exceptions import PilosaError
from .validator import validate_index_name, validate_frame_name, validate_label
__all__ = ("TimeQuantum", "CacheType", "Schema", "Index", "PQLQuery", "PQLBatchQuery")
_TIME_FORMAT = "%Y-%m-%dT%H:%M"
class TimeQuantum:
"""Valid time quantum values for frames having support for that.
* See: `Data Model <https://www.pilosa.com/docs/data-model/>`_
"""
NONE = None
YEAR = None
MONTH = None
DAY = None
HOUR = None
YEAR_MONTH = None
MONTH_DAY = None
DAY_HOUR = None
YEAR_MONTH_DAY = None
MONTH_DAY_HOUR = None
YEAR_MONTH_DAY_HOUR = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, TimeQuantum):
return self.value == other.value
return False
TimeQuantum.NONE = TimeQuantum("")
TimeQuantum.YEAR = TimeQuantum("Y")
TimeQuantum.MONTH = TimeQuantum("M")
TimeQuantum.DAY = TimeQuantum("D")
TimeQuantum.HOUR = TimeQuantum("H")
TimeQuantum.YEAR_MONTH = TimeQuantum("YM")
TimeQuantum.MONTH_DAY = TimeQuantum("MD")
TimeQuantum.DAY_HOUR = TimeQuantum("DH")
TimeQuantum.YEAR_MONTH_DAY = TimeQuantum("YMD")
TimeQuantum.MONTH_DAY_HOUR = TimeQuantum("MDH")
TimeQuantum.YEAR_MONTH_DAY_HOUR = TimeQuantum("YMDH")
class CacheType:
DEFAULT = None
LRU = None
RANKED = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, CacheType):
return self.value == other.value
return False
CacheType.DEFAULT = CacheType("")
CacheType.LRU = CacheType("lru")
CacheType.RANKED = CacheType("ranked")
class Schema:
"""Schema is a container for index objects"""
def __init__(self):
self._indexes = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._indexes == other._indexes
def __ne__(self, other):
return not self.__eq__(other)
def index(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
"""Returns an index object with the given name and options.
If the index didn't exist in the schema, it is added to the schema.
:param str name: index name
:param str column_label: a valid column label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum
:return: Index object
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
index = self._indexes.get(name)
if index is None:
index = Index(name, column_label, time_quantum)
self._indexes[name] = index
return index
def _diff(self, other):
result = Schema()
for index_name, index in self._indexes.items():
if index_name not in other._indexes:
# if the index doesn't exist in the other schema, simply copy it
result._indexes[index_name] = index.copy()
else:
# the index exists in the other schema; check the frames
result_index = index.copy(frames=False)
for frame_name, frame in index._frames.items():
# the frame doesn't exist in the other scheme, copy it
if frame_name not in result_index._frames:
result_index._frames[frame_name] = frame.copy()
# check whether we modified result index
if len(result_index._frames) > 0:
result._indexes[index_name] = result_index
return result
class Index:
"""The purpose of the Index is to represent a data namespace.
You cannot perform cross-index queries. Column-level attributes are global to the Index.
:param str name: index name
:param str column_label: a valid column label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
def __init__(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
validate_index_name(name)
validate_label(column_label)
self.name = name
self.column_label = column_label
self.time_quantum = time_quantum
self._frames = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._meta_eq(other) and \
self._frames == other._frames
def __ne__(self, other):
return not self.__eq__(other)
def _meta_eq(self, other):
return self.name == other.name and \
self.column_label == other.column_label and \
self.time_quantum == other.time_quantum
def copy(self, frames=True):
index = Index(self.name, column_label=self.column_label, time_quantum=self.time_quantum)
if frames:
index._frames = dict((name, frame.copy()) for name, frame in self._frames.items())
return index
def frame(self, name, row_label="rowID", time_quantum=TimeQuantum.NONE,
inverse_enabled=False, cache_type=CacheType.DEFAULT, cache_size=0):
"""Creates a frame object with the specified name and defaults.
:param str name: frame name
:param str row_label: a valid row label
:param pilosa.TimeQuantum time_quantum: Sets the time quantum for the frame. If a Frame has a time quantum, then Views are generated for each of the defined time segments.
:param bool inverse_enabled:
:param pilosa.CacheType cache_type: ``CacheType.DEFAULT``, ``CacheType.LRU`` or ``CacheType.RANKED``
:param int cache_size: Values greater than 0 sets the cache size. Otherwise uses the default cache size
:return: Pilosa frame
:rtype: pilosa.Frame
"""
frame = self._frames.get(name)
if frame is None:
frame = Frame(self, name, row_label, time_quantum,
inverse_enabled, cache_type, cache_size)
self._frames[name] = frame
return frame
def raw_query(self, query):
"""Creates a raw query.
Note that the query is not validated before sending to the server.
:param str query:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(query, self)
def batch_query(self, *queries):
"""Creates a batch query.
:param pilosa.PQLQuery queries: the queries in the batch
:return: Pilosa batch query
:rtype: pilosa.PQLBatchQuery
"""
q = PQLBatchQuery(self)
q.add(*queries)
return q
def union(self, *bitmaps):
"""Creates a ``Union`` query.
``Union`` performs a logical OR on the results of each BITMAP_CALL query passed to it.
:param pilosa.PQLBitmapQuery bitmaps: 0 or more bitmap queries to union
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return self._bitmap_op("Union", bitmaps)
def intersect(self, *bitmaps):
"""Creates an ``Intersect`` query.
``Intersect`` performs a logical AND on the results of each BITMAP_CALL query passed to it.
:param pilosa.PQLBitmapQuery bitmaps: 1 or more bitmap queries to intersect
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
:raise PilosaError: if the number of bitmaps is less than 1
"""
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Intersect", bitmaps)
def difference(self, *bitmaps):
"""Creates a ``Difference`` query.
``Difference`` returns all of the bits from the first BITMAP_CALL argument passed to it,
without the bits from each subsequent BITMAP_CALL.
:param pilosa.PQLBitmapQuery bitmaps: 0 or more bitmap queries to differentiate
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
:raise PilosaError: if the number of bitmaps is less than 1
"""
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Difference", bitmaps)
def count(self, bitmap):
"""Creates a Count query.
``Count`` returns the number of set bits in the BITMAP_CALL passed in.
:param pilosa.PQLQuery bitmap: the bitmap query
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(u"Count(%s)" % bitmap.serialize(), self)
def set_column_attrs(self, column_id, attrs):
"""Creates a SetColumnAttrs query.
``SetColumnAttrs`` associates arbitrary key/value pairs with a column in an index.
Following object types are accepted:
* int
* str
* bool
* float
:param int column_id:
:param dict attrs: column attributes
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetColumnAttrs(%s=%d, %s)" %
(self.column_label, column_id, attrs_str), self)
def _bitmap_op(self, name, bitmaps):
return PQLQuery(u"%s(%s)" % (name, u", ".join(b.serialize() for b in bitmaps)), self)
class Frame:
"""Frames are used to segment and define different functional characteristics within your entire index.
You can think of a Frame as a table-like data partition within your Index.
Row-level attributes are namespaced at the Frame level.
Do not create a Frame object directly. Instead, use ``pilosa.Index.frame`` method.
* See `Data Model <https://www.pilosa.com/docs/data-model/>`_
* See `Query Language <https://www.pilosa.com/docs/query-language/>`_
"""
def __init__(self, index, name, row_label, time_quantum, inverse_enabled,
cache_type, cache_size):
validate_frame_name(name)
validate_label(row_label)
self.index = index
self.name = name
self.time_quantum = time_quantum
self.inverse_enabled = inverse_enabled
self.cache_type = cache_type
self.cache_size = cache_size
self.row_label = row_label
self.column_label = index.column_label
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
# Note that we skip comparing the frames of the indexes by using index._meta_eq
# in order to avoid a call cycle
return self.name == other.name and \
self.index._meta_eq(other.index) and \
self.row_label == other.row_label and \
self.time_quantum == other.time_quantum and \
self.inverse_enabled == other.inverse_enabled and \
self.cache_type == other.cache_type and \
self.cache_size == other.cache_size
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
return Frame(self.index, self.name, self.row_label, self.time_quantum,
self.inverse_enabled, self.cache_type, self.cache_size)
def bitmap(self, row_id):
"""Creates a Bitmap query.
Bitmap retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query. It also retrieves any attributes set on that row or column.
This variant of Bitmap query uses the row label.
:param int row_id:
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.row_label, row_id, self.name),
self.index)
def inverse_bitmap(self, column_id):
"""Creates a Bitmap query.
``Bitmap`` retrieves the indices of all the set bits in a row or column based on whether the row label or column label is given in the query. It also retrieves any attributes set on that row or column.
This variant of Bitmap query uses the column label.
:param int column_id:
:return: Pilosa bitmap query
:rtype: pilosa.PQLBitmapQuery
"""
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.column_label, column_id, self.name),
self.index)
def setbit(self, row_id, column_id, timestamp=None):
"""Creates a SetBit query.
``SetBit`` assigns a value of 1 to a bit in the binary matrix, thus associating the given row in the given frame with the given column.
:param int row_id:
:param int column_id:
:param pilosa.TimeStamp timestamp:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
ts = ", timestamp='%s'" % timestamp.strftime(_TIME_FORMAT) if timestamp else ''
return PQLQuery(u"SetBit(%s=%d, frame='%s', %s=%d%s)" % \
(self.row_label, row_id, self.name, self.column_label, column_id, ts),
self.index)
def clearbit(self, row_id, column_id):
"""Creates a ClearBit query.
``ClearBit`` assigns a value of 0 to a bit in the binary matrix, thus disassociating the given row in the given frame from the given column.
:param int row_id:
:param int column_id:
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
return PQLQuery(u"ClearBit(%s=%d, frame='%s', %s=%d)" % \
(self.row_label, row_id, self.name, self.column_label, column_id),
self.index)
def topn(self, n, bitmap=None, field="", *values):
"""Creates a TopN query.
``TopN`` returns the id and count of the top n bitmaps (by count of bits) in the frame.
* see: `TopN Query <https://www.pilosa.com/docs/query-language/#topn>`_
:param int n: number of items to return
:param pilosa.PQLBitmapQuery bitmap: a PQL Bitmap query
:param field str field: field name
:param object values: filter values to be matched against the field
"""
return self._topn(n, bitmap, field, False, *values)
def inverse_topn(self, n, bitmap=None, field="", *values):
"""Creates a TopN query.
``TopN`` returns the id and count of the top n bitmaps (by count of bits) in the frame.
This version sets `inverse=true`.
* see: `TopN Query <https://www.pilosa.com/docs/query-language/#topn>`_
:param int n: number of items to return
:param pilosa.PQLBitmapQuery bitmap: a PQL Bitmap query
:param field str field: field name
:param object values: filter values to be matched against the field
"""
return self._topn(n, bitmap, field, True, *values)
def _topn(self, n, bitmap=None, field="", inverse=False, *values):
parts = ["frame='%s'" % self.name, "n=%d" % n, "inverse=%s" % ('true' if inverse else 'false')]
if bitmap:
parts.insert(0, bitmap.serialize())
if field:
validate_label(field)
values_str = json.dumps(values, separators=(',', ': '))
parts.extend(["field='%s'" % field, "filters=%s" % values_str])
qry = u"TopN(%s)" % ", ".join(parts)
return PQLQuery(qry, self.index)
def range(self, row_id, start, end):
"""Creates a Range query.
Similar to ``Bitmap``, but only returns bits which were set with timestamps between the given start and end timestamps.
* see: `Range Query <https://www.pilosa.com/docs/query-language/#range>`_
:param int row_id:
:param datetime.datetime start: start timestamp
:param datetime.datetime end: end timestamp
"""
return self._range(self.row_label, row_id, start, end)
def inverse_range(self, column_id, start, end):
"""Creates a Range query.
Similar to ``Bitmap``, but only returns bits which were set with timestamps between the given start and end timestamps.
:param int column_id:
:param datetime.datetime start: start timestamp
:param datetime.datetime end: end timestamp
"""
return self._range(self.column_label, column_id, start, end)
def _range(self, label, rowcol_id, start, end):
start_str = start.strftime(_TIME_FORMAT)
end_str = end.strftime(_TIME_FORMAT)
return PQLQuery(u"Range(%s=%d, frame='%s', start='%s', end='%s')" %
(label, rowcol_id, self.name, start_str, end_str),
self.index)
def set_row_attrs(self, row_id, attrs):
"""Creates a SetRowAttrs query.
``SetRowAttrs`` associates arbitrary key/value pairs with a row in a frame.
Following object types are accepted:
* int
* str
* bool
* float
:param int row_id:
:param dict attrs: row attributes
:return: Pilosa query
:rtype: pilosa.PQLQuery
"""
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetRowAttrs(%s=%d, frame='%s', %s)" %
(self.row_label, row_id, self.name, attrs_str),
self.index)
def _get_options_string(self):
data = {"rowLabel": self.row_label}
if self.inverse_enabled:
data["inverseEnabled"] = True
if self.time_quantum != TimeQuantum.NONE:
data["timeQuantum"] = str(self.time_quantum)
if self.cache_type != CacheType.DEFAULT:
data["cacheType"] = str(self.cache_type)
if self.cache_size > 0:
data["cacheSize"] = self.cache_size
return json.dumps({"options": data}, sort_keys=True)
class PQLQuery:
def __init__(self, pql, index):
self.pql = pql
self.index = index
def serialize(self):
return self.pql
def _create_attributes_str(attrs):
kvs = []
try:
for k, v in attrs.items():
# TODO: make key use its own validator
validate_label(k)
kvs.append("%s=%s" % (k, json.dumps(v)))
return ", ".join(sorted(kvs))
except TypeError:
raise PilosaError("Error while converting values")
class PQLBatchQuery:
def __init__(self, index):
self.index = index
self.queries = []
def add(self, *queries):
self.queries.extend(queries)
def serialize(self):
return u''.join(q.serialize() for q in self.queries)
| 36.392294 | 209 | 0.626853 |
import json
from .exceptions import PilosaError
from .validator import validate_index_name, validate_frame_name, validate_label
__all__ = ("TimeQuantum", "CacheType", "Schema", "Index", "PQLQuery", "PQLBatchQuery")
_TIME_FORMAT = "%Y-%m-%dT%H:%M"
class TimeQuantum:
NONE = None
YEAR = None
MONTH = None
DAY = None
HOUR = None
YEAR_MONTH = None
MONTH_DAY = None
DAY_HOUR = None
YEAR_MONTH_DAY = None
MONTH_DAY_HOUR = None
YEAR_MONTH_DAY_HOUR = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, TimeQuantum):
return self.value == other.value
return False
TimeQuantum.NONE = TimeQuantum("")
TimeQuantum.YEAR = TimeQuantum("Y")
TimeQuantum.MONTH = TimeQuantum("M")
TimeQuantum.DAY = TimeQuantum("D")
TimeQuantum.HOUR = TimeQuantum("H")
TimeQuantum.YEAR_MONTH = TimeQuantum("YM")
TimeQuantum.MONTH_DAY = TimeQuantum("MD")
TimeQuantum.DAY_HOUR = TimeQuantum("DH")
TimeQuantum.YEAR_MONTH_DAY = TimeQuantum("YMD")
TimeQuantum.MONTH_DAY_HOUR = TimeQuantum("MDH")
TimeQuantum.YEAR_MONTH_DAY_HOUR = TimeQuantum("YMDH")
class CacheType:
DEFAULT = None
LRU = None
RANKED = None
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __eq__(self, other):
if isinstance(other, CacheType):
return self.value == other.value
return False
CacheType.DEFAULT = CacheType("")
CacheType.LRU = CacheType("lru")
CacheType.RANKED = CacheType("ranked")
class Schema:
def __init__(self):
self._indexes = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._indexes == other._indexes
def __ne__(self, other):
return not self.__eq__(other)
def index(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
index = self._indexes.get(name)
if index is None:
index = Index(name, column_label, time_quantum)
self._indexes[name] = index
return index
def _diff(self, other):
result = Schema()
for index_name, index in self._indexes.items():
if index_name not in other._indexes:
result._indexes[index_name] = index.copy()
else:
# the index exists in the other schema; check the frames
result_index = index.copy(frames=False)
for frame_name, frame in index._frames.items():
# the frame doesn't exist in the other scheme, copy it
if frame_name not in result_index._frames:
result_index._frames[frame_name] = frame.copy()
if len(result_index._frames) > 0:
result._indexes[index_name] = result_index
return result
class Index:
def __init__(self, name, column_label="columnID", time_quantum=TimeQuantum.NONE):
validate_index_name(name)
validate_label(column_label)
self.name = name
self.column_label = column_label
self.time_quantum = time_quantum
self._frames = {}
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self._meta_eq(other) and \
self._frames == other._frames
def __ne__(self, other):
return not self.__eq__(other)
def _meta_eq(self, other):
return self.name == other.name and \
self.column_label == other.column_label and \
self.time_quantum == other.time_quantum
def copy(self, frames=True):
index = Index(self.name, column_label=self.column_label, time_quantum=self.time_quantum)
if frames:
index._frames = dict((name, frame.copy()) for name, frame in self._frames.items())
return index
def frame(self, name, row_label="rowID", time_quantum=TimeQuantum.NONE,
inverse_enabled=False, cache_type=CacheType.DEFAULT, cache_size=0):
frame = self._frames.get(name)
if frame is None:
frame = Frame(self, name, row_label, time_quantum,
inverse_enabled, cache_type, cache_size)
self._frames[name] = frame
return frame
def raw_query(self, query):
return PQLQuery(query, self)
def batch_query(self, *queries):
q = PQLBatchQuery(self)
q.add(*queries)
return q
def union(self, *bitmaps):
return self._bitmap_op("Union", bitmaps)
def intersect(self, *bitmaps):
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Intersect", bitmaps)
def difference(self, *bitmaps):
if len(bitmaps) < 1:
raise PilosaError("Number of bitmap queries should be greater or equal to 1")
return self._bitmap_op("Difference", bitmaps)
def count(self, bitmap):
return PQLQuery(u"Count(%s)" % bitmap.serialize(), self)
def set_column_attrs(self, column_id, attrs):
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetColumnAttrs(%s=%d, %s)" %
(self.column_label, column_id, attrs_str), self)
def _bitmap_op(self, name, bitmaps):
return PQLQuery(u"%s(%s)" % (name, u", ".join(b.serialize() for b in bitmaps)), self)
class Frame:
def __init__(self, index, name, row_label, time_quantum, inverse_enabled,
cache_type, cache_size):
validate_frame_name(name)
validate_label(row_label)
self.index = index
self.name = name
self.time_quantum = time_quantum
self.inverse_enabled = inverse_enabled
self.cache_type = cache_type
self.cache_size = cache_size
self.row_label = row_label
self.column_label = index.column_label
def __eq__(self, other):
if id(self) == id(other):
return True
if not isinstance(other, self.__class__):
return False
return self.name == other.name and \
self.index._meta_eq(other.index) and \
self.row_label == other.row_label and \
self.time_quantum == other.time_quantum and \
self.inverse_enabled == other.inverse_enabled and \
self.cache_type == other.cache_type and \
self.cache_size == other.cache_size
def __ne__(self, other):
return not self.__eq__(other)
def copy(self):
return Frame(self.index, self.name, self.row_label, self.time_quantum,
self.inverse_enabled, self.cache_type, self.cache_size)
def bitmap(self, row_id):
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.row_label, row_id, self.name),
self.index)
def inverse_bitmap(self, column_id):
return PQLQuery(u"Bitmap(%s=%d, frame='%s')" % (self.column_label, column_id, self.name),
self.index)
def setbit(self, row_id, column_id, timestamp=None):
ts = ", timestamp='%s'" % timestamp.strftime(_TIME_FORMAT) if timestamp else ''
return PQLQuery(u"SetBit(%s=%d, frame='%s', %s=%d%s)" % \
(self.row_label, row_id, self.name, self.column_label, column_id, ts),
self.index)
def clearbit(self, row_id, column_id):
return PQLQuery(u"ClearBit(%s=%d, frame='%s', %s=%d)" % \
(self.row_label, row_id, self.name, self.column_label, column_id),
self.index)
def topn(self, n, bitmap=None, field="", *values):
return self._topn(n, bitmap, field, False, *values)
def inverse_topn(self, n, bitmap=None, field="", *values):
return self._topn(n, bitmap, field, True, *values)
def _topn(self, n, bitmap=None, field="", inverse=False, *values):
parts = ["frame='%s'" % self.name, "n=%d" % n, "inverse=%s" % ('true' if inverse else 'false')]
if bitmap:
parts.insert(0, bitmap.serialize())
if field:
validate_label(field)
values_str = json.dumps(values, separators=(',', ': '))
parts.extend(["field='%s'" % field, "filters=%s" % values_str])
qry = u"TopN(%s)" % ", ".join(parts)
return PQLQuery(qry, self.index)
def range(self, row_id, start, end):
return self._range(self.row_label, row_id, start, end)
def inverse_range(self, column_id, start, end):
return self._range(self.column_label, column_id, start, end)
def _range(self, label, rowcol_id, start, end):
start_str = start.strftime(_TIME_FORMAT)
end_str = end.strftime(_TIME_FORMAT)
return PQLQuery(u"Range(%s=%d, frame='%s', start='%s', end='%s')" %
(label, rowcol_id, self.name, start_str, end_str),
self.index)
def set_row_attrs(self, row_id, attrs):
attrs_str = _create_attributes_str(attrs)
return PQLQuery(u"SetRowAttrs(%s=%d, frame='%s', %s)" %
(self.row_label, row_id, self.name, attrs_str),
self.index)
def _get_options_string(self):
data = {"rowLabel": self.row_label}
if self.inverse_enabled:
data["inverseEnabled"] = True
if self.time_quantum != TimeQuantum.NONE:
data["timeQuantum"] = str(self.time_quantum)
if self.cache_type != CacheType.DEFAULT:
data["cacheType"] = str(self.cache_type)
if self.cache_size > 0:
data["cacheSize"] = self.cache_size
return json.dumps({"options": data}, sort_keys=True)
class PQLQuery:
def __init__(self, pql, index):
self.pql = pql
self.index = index
def serialize(self):
return self.pql
def _create_attributes_str(attrs):
kvs = []
try:
for k, v in attrs.items():
validate_label(k)
kvs.append("%s=%s" % (k, json.dumps(v)))
return ", ".join(sorted(kvs))
except TypeError:
raise PilosaError("Error while converting values")
class PQLBatchQuery:
def __init__(self, index):
self.index = index
self.queries = []
def add(self, *queries):
self.queries.extend(queries)
def serialize(self):
return u''.join(q.serialize() for q in self.queries)
| true | true |
f7143e71d4927605031e54ebefb2763f34929e39 | 9,923 | py | Python | old_code/YoutubeVideo.py | lukewest/Movie-Extra-Downloader | f5ba12a2f1a34fd4aa892eb0379342b131076a70 | [
"MIT"
] | 23 | 2018-08-08T14:28:59.000Z | 2022-03-22T15:45:10.000Z | old_code/YoutubeVideo.py | lukewest/Movie-Extra-Downloader | f5ba12a2f1a34fd4aa892eb0379342b131076a70 | [
"MIT"
] | 13 | 2018-08-08T14:50:29.000Z | 2022-01-27T09:05:18.000Z | old_code/YoutubeVideo.py | lukewest/Movie-Extra-Downloader | f5ba12a2f1a34fd4aa892eb0379342b131076a70 | [
"MIT"
] | 9 | 2018-08-12T14:08:15.000Z | 2021-09-18T01:08:04.000Z | from _socket import timeout
from urllib.error import URLError
from pytube import YouTube
from pytube.exceptions import RegexMatchError
from old_code.Stream import Stream
import time
import tools as tools
class YoutubeVideo(object):
# todo (2): subtitles
conn_errors = 0
def __init__(self, url, score=0, preferred_container='mp4', min_resolution=360,
max_resolution=1080, force_preferred_container=False):
########################################
self.url = None
self.source = None
self.delete = None
self.complete = None
self.is_play_trailer = None
self.title = None
self.thumbnail_url = None
self.channel = None
self.tags = list()
self.view_count = None
self.rating = None
self.adjusted_rating = None
self.resolution = None
self.quality_score = None
self.length = None
self.resolution_ratio = None
self.streams = list()
self.best_video_stream = None
self.best_audio_stream = None
self.best_combined_stream = None
########################################
self.url = url
self.delete = False
self.is_play_trailer = False
self.complete = True
tries = 0
while True:
try:
self.source = YouTube(url)
except KeyError as e:
if e.args[0] == 'url':
self.delete = True
self.is_play_trailer = True
# todo (1): add youtube-dl info grabber/downloader
# stuff I need: title, length, keywords?
return
elif e.args[0] == 'url_encoded_fmt_stream_map':
if tries > 4:
print('Failed to load youtube data, retrying. Reason: ' + str(e))
self.delete = True
return
print('Failed to load youtube data, retrying. Reason: ' + str(e))
time.sleep(2)
tries += 1
else:
raise
except RegexMatchError as e:
print('Pytube failed to load video info. Reason: ' + url + ': ' + str(e))
self.delete = True
return
except timeout as e:
if tries > 4:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if Stream.conn_errors > 2:
raise
else:
Stream.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
tries += 1
time.sleep(1)
except URLError as e:
if tries > 2:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if YoutubeVideo.conn_errors > 2:
raise
else:
YoutubeVideo.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
time.sleep(1)
tries += 1
else:
YoutubeVideo.conn_errors = 0
break
self.score = score
self.title = self.source.title
self.title = tools.get_clean_string(self.title)
self.rating = float(self.source.player_config_args['avg_rating'])
self.view_count = int(self.source.player_config_args['view_count'])
self.channel = self.source.player_config_args['author']
self.length = self.source.player_config_args['length_seconds']
self.thumbnail_url = self.source.thumbnail_url
try:
self.thumbnail_url = self.source.thumbnail_url
except KeyError:
self.thumbnail_url = None
try:
self.tags = self.source.player_config_args['keywords'].split(',')
except KeyError:
self.tags = ''
if self.view_count < 100:
self.view_count = 100
self.adjusted_rating = self.rating * (1 - 1 / ((self.view_count / 60) ** 0.5))
self.load_streams(min_resolution, max_resolution)
self.update_quality_score(preferred_container)
self.update_best_audio_stream(preferred_container, force_preferred_container)
self.update_best_video_stream(preferred_container, force_preferred_container)
self.update_best_combined_stream(preferred_container, force_preferred_container)
if self.is_play_trailer:
self.update_youtube_dl_info()
def update_youtube_dl_info(self):
pass
def update_quality_score(self, preferred_container='mp4'):
self.quality_score = 0
max_res = 0
for stream in self.streams:
if stream.type != 'video':
continue
quality_score = 0
pixel_bitrate = stream.bitrate_per_pixel
if stream.resolution == 1080:
pixel_bitrate /= 1
quality_score = 120
elif stream.resolution == 720:
pixel_bitrate /= 1.22
quality_score = 108
elif stream.resolution == 480:
pixel_bitrate /= 1.52
quality_score = 65
elif stream.resolution == 360:
pixel_bitrate /= 1.39
quality_score = 40
elif stream.resolution == 240:
pixel_bitrate /= 2.15
quality_score = 20
elif stream.resolution == 144:
pixel_bitrate /= 2.65
quality_score = 10
if preferred_container.lower() == stream.container:
quality_score *= 1.2
quality_score *= pixel_bitrate
if stream.resolution > max_res:
self.quality_score = quality_score
max_res = stream.resolution
self.resolution_ratio = stream.size[0] / stream.size[1]
elif stream.resolution == max_res:
if quality_score > self.quality_score:
self.quality_score = quality_score
def load_streams(self, min_resolution=360, max_resolution=1080):
self.streams = list()
self.complete = True
for source_stream in self.source.streams.fmt_streams:
stream = Stream(source_stream, int(self.length))
if stream.complete:
if stream.resolution is not None:
if stream.resolution > max_resolution or stream.resolution < min_resolution:
continue
self.streams.append(stream)
elif stream.retry:
self.complete = False
if Stream.conn_errors != 0:
self.complete = False
def update_best_video_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
best_stream = None
highest_pref_resolution = 0
best_pref_stream = None
for stream in self.streams:
if 'video' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.resolution > highest_pref_resolution:
highest_pref_resolution = stream.resolution
best_pref_stream = stream
if highest_resolution == highest_pref_resolution or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_video_stream = ret
def update_best_audio_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_bitrate = 0
best_stream = None
highest_pref_bitrate = 0
best_pref_stream = None
for stream in self.streams:
if 'audio' != stream.type:
continue
if stream.bitrate > highest_bitrate:
highest_bitrate = stream.bitrate
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.bitrate > highest_pref_bitrate:
highest_pref_bitrate = stream.bitrate
best_pref_stream = stream
if highest_bitrate <= highest_pref_bitrate * 1.35 or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_audio_stream = ret
def update_best_combined_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
for stream in self.streams:
if 'combined' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
max_score = 0
selected_stream = None
for stream in self.streams:
if 'combined' != stream.type:
continue
score = 0
resolution = stream.resolution
if force_preferred_container:
if stream.container != preferred_container:
continue
if resolution == highest_resolution:
score += 10 ** 1
if stream.container == preferred_container:
score += 10 ** 0
if score > max_score:
max_score = score
selected_stream = stream
self.best_combined_stream = selected_stream
| 33.866894 | 102 | 0.550338 | from _socket import timeout
from urllib.error import URLError
from pytube import YouTube
from pytube.exceptions import RegexMatchError
from old_code.Stream import Stream
import time
import tools as tools
class YoutubeVideo(object):
conn_errors = 0
def __init__(self, url, score=0, preferred_container='mp4', min_resolution=360,
max_resolution=1080, force_preferred_container=False):
except RegexMatchError as e:
print('Pytube failed to load video info. Reason: ' + url + ': ' + str(e))
self.delete = True
return
except timeout as e:
if tries > 4:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if Stream.conn_errors > 2:
raise
else:
Stream.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
tries += 1
time.sleep(1)
except URLError as e:
if tries > 2:
print('Pytube failed to load video info. Reason: ' + str(e))
self.complete = False
if YoutubeVideo.conn_errors > 2:
raise
else:
YoutubeVideo.conn_errors += 1
return
print('Pytube failed to load video info. Reason: ' + str(e) + ', retrying...')
time.sleep(1)
tries += 1
else:
YoutubeVideo.conn_errors = 0
break
self.score = score
self.title = self.source.title
self.title = tools.get_clean_string(self.title)
self.rating = float(self.source.player_config_args['avg_rating'])
self.view_count = int(self.source.player_config_args['view_count'])
self.channel = self.source.player_config_args['author']
self.length = self.source.player_config_args['length_seconds']
self.thumbnail_url = self.source.thumbnail_url
try:
self.thumbnail_url = self.source.thumbnail_url
except KeyError:
self.thumbnail_url = None
try:
self.tags = self.source.player_config_args['keywords'].split(',')
except KeyError:
self.tags = ''
if self.view_count < 100:
self.view_count = 100
self.adjusted_rating = self.rating * (1 - 1 / ((self.view_count / 60) ** 0.5))
self.load_streams(min_resolution, max_resolution)
self.update_quality_score(preferred_container)
self.update_best_audio_stream(preferred_container, force_preferred_container)
self.update_best_video_stream(preferred_container, force_preferred_container)
self.update_best_combined_stream(preferred_container, force_preferred_container)
if self.is_play_trailer:
self.update_youtube_dl_info()
def update_youtube_dl_info(self):
pass
def update_quality_score(self, preferred_container='mp4'):
self.quality_score = 0
max_res = 0
for stream in self.streams:
if stream.type != 'video':
continue
quality_score = 0
pixel_bitrate = stream.bitrate_per_pixel
if stream.resolution == 1080:
pixel_bitrate /= 1
quality_score = 120
elif stream.resolution == 720:
pixel_bitrate /= 1.22
quality_score = 108
elif stream.resolution == 480:
pixel_bitrate /= 1.52
quality_score = 65
elif stream.resolution == 360:
pixel_bitrate /= 1.39
quality_score = 40
elif stream.resolution == 240:
pixel_bitrate /= 2.15
quality_score = 20
elif stream.resolution == 144:
pixel_bitrate /= 2.65
quality_score = 10
if preferred_container.lower() == stream.container:
quality_score *= 1.2
quality_score *= pixel_bitrate
if stream.resolution > max_res:
self.quality_score = quality_score
max_res = stream.resolution
self.resolution_ratio = stream.size[0] / stream.size[1]
elif stream.resolution == max_res:
if quality_score > self.quality_score:
self.quality_score = quality_score
def load_streams(self, min_resolution=360, max_resolution=1080):
self.streams = list()
self.complete = True
for source_stream in self.source.streams.fmt_streams:
stream = Stream(source_stream, int(self.length))
if stream.complete:
if stream.resolution is not None:
if stream.resolution > max_resolution or stream.resolution < min_resolution:
continue
self.streams.append(stream)
elif stream.retry:
self.complete = False
if Stream.conn_errors != 0:
self.complete = False
def update_best_video_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
best_stream = None
highest_pref_resolution = 0
best_pref_stream = None
for stream in self.streams:
if 'video' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.resolution > highest_pref_resolution:
highest_pref_resolution = stream.resolution
best_pref_stream = stream
if highest_resolution == highest_pref_resolution or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_video_stream = ret
def update_best_audio_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_bitrate = 0
best_stream = None
highest_pref_bitrate = 0
best_pref_stream = None
for stream in self.streams:
if 'audio' != stream.type:
continue
if stream.bitrate > highest_bitrate:
highest_bitrate = stream.bitrate
best_stream = stream
if stream.container.lower() == preferred_container.lower():
if stream.bitrate > highest_pref_bitrate:
highest_pref_bitrate = stream.bitrate
best_pref_stream = stream
if highest_bitrate <= highest_pref_bitrate * 1.35 or force_preferred_container:
ret = best_pref_stream
else:
ret = best_stream
self.best_audio_stream = ret
def update_best_combined_stream(self, preferred_container='mp4', force_preferred_container=False):
highest_resolution = 0
for stream in self.streams:
if 'combined' != stream.type:
continue
if stream.resolution > highest_resolution:
highest_resolution = stream.resolution
max_score = 0
selected_stream = None
for stream in self.streams:
if 'combined' != stream.type:
continue
score = 0
resolution = stream.resolution
if force_preferred_container:
if stream.container != preferred_container:
continue
if resolution == highest_resolution:
score += 10 ** 1
if stream.container == preferred_container:
score += 10 ** 0
if score > max_score:
max_score = score
selected_stream = stream
self.best_combined_stream = selected_stream
| true | true |
f7143ea3ef7f254f2d3187ba1ded0afb09ea30ff | 23,487 | py | Python | tools/trainpar_deepqmri.py | fragrussu/qMRINet | 418cbe22cefa2974d8a97b359324ff4c35865d22 | [
"BSD-2-Clause"
] | 3 | 2020-10-22T23:37:36.000Z | 2022-02-18T09:39:42.000Z | tools/trainpar_deepqmri.py | fragrussu/qMRINet | 418cbe22cefa2974d8a97b359324ff4c35865d22 | [
"BSD-2-Clause"
] | null | null | null | tools/trainpar_deepqmri.py | fragrussu/qMRINet | 418cbe22cefa2974d8a97b359324ff4c35865d22 | [
"BSD-2-Clause"
] | null | null | null | # Author: Francesco Grussu, University College London
# <f.grussu@ucl.ac.uk> <francegrussu@gmail.com>
#
# Code released under BSD Two-Clause license
#
# Copyright (c) 2020 University College London.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
### Load libraries
import argparse, os, sys
from numpy import matlib
import numpy as np
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import deepqmri
if __name__ == "__main__":
### Print help and parse arguments
parser = argparse.ArgumentParser(description='This program trains a qMRI-net for quantitative MRI parameter estimation. A qMRI-Nnet enables voxel-by-voxel estimation of microstructural properties from sets of MRI images aacquired by varying the MRI sequence parameters. Author: Francesco Grussu, University College London (<f.grussu@ucl.ac.uk><francegrussu@gmail.com>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('sig_train', help='path to a pickle binary file storing the input training MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_train', help='path to a pickle binary file storing the training tissue parameter data as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('sig_val', help='path to a pickle binary file storing the input validation MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_val', help='path to a pickle binary file storing the validation tissue parameters as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('mri_model', help='string indicating the MRI model to fit (choose among: "pr_hybriddwi" for prostate hybrid diffusion-relaxometry imaging; "br_sirsmdt" for brain saturation recovery diffusion tensor on spherical mean signals; "twocompdwite" for a two-compartment diffusion-t2 relaxation model without anisotropy). Tissue parameters will be: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0')
parser.add_argument('mri_prot', help='path to text file storing the MRI protocol. For model "pr_hybriddwi" and "twocompdwite" it must contain a matrix where the 1st row stores b-values in s/mm^2, while 2nd row echo times in ms; for model "br_sirsmdt" it must contain a matrix where the 1st row stores preparation times (saturation-inversion delay) in ms, the 2nd row inversion times (inversion-excitation delay) in ms, the 3rd row b-values in s/mm^2. For a pure inversion recovery (i.e. no saturation pulse), use a very large number for the saturation-inversion delay (at least 5 times the maximum expected T1). Different entries should be separated by spaces')
parser.add_argument('out_base', help='base name of output directory (a string built with the network parameters will be added to the base). The output directory will contain the following output files: ** losstrain.bin, pickle binary storing the training loss as a numpy matrix (shape: epoch x batch); ** lossval.bin, pickle binary storing the validation loss as a numpy matrix (shape: epoch x 1); ** nnet_epoch0.bin, pickle binary storing the qMRI-net at initialisation; ** nnet_epoch0.pth, Pytorch binary storing the qMRI-net at initialisation; ** nnet_epoch<FINAL_EPOCH>.bin, pickle binary storing the qMRI-net at the final epoch; ** nnet_lossvalmin.bin, pickle binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); * nnet_lossvalmin.pth, Pytorch binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_sigval.bin, prediction of the validation signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_tissueval.bin, prediction of tissue parameters from validation signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin.info, text file reporting information regarding the epoch with the lowest validation loss; ** lossval_min.txt, miniimum validation loss; ** nnet_lossvalmin_sigtest.bin, prediction of the test signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information), if those signals are provided; ** nnet_lossvalmin_tissuetest.bin, prediction of tissue parameters from test signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information) if test signals are provided')
parser.add_argument('--nn', metavar='<list>', help='array storing the number of hidden neurons, separated by hyphens (example: 30-15-8). The first number (input neurons) must equal the number of measurements in the protocol (Nmeas); the last number (output neurons) must equal the number of parameters in the model (Npar, 9 for model "pr_hybriddwi", 4 for model "br_sirsmdt", 7 for model "twocompdwite"). Default: Nmeas-(Npar + (Nmeas minus Npar))/2-Npar, where Nmeas is the number of MRI measurements and Npar is the number of tissue parameters for the signal model to fit')
parser.add_argument('--pdrop', metavar='<value>', default='0.0', help='dropout probability in each layer of the neural network. Default: 0.0')
parser.add_argument('--noepoch', metavar='<value>', default='500', help='number of epochs used for training. Default: 500')
parser.add_argument('--lrate', metavar='<value>', default='0.001', help='learning rate. Default: 0.001')
parser.add_argument('--mbatch', metavar='<value>', help='number of voxels in each training mini-batch. Default: 1/80 of the total number of training voxels (minimum: 2 voxels)')
parser.add_argument('--seed', metavar='<value>', default='19102018', help='integer used as a seed for Numpy and PyTorch random number generators. Default: 19102018')
parser.add_argument('--nwork', metavar='<value>', default='0', help='number of workers for data loader. Default: 0')
parser.add_argument('--dtest', metavar='<file>', help='path to an option input pickle binary file storing test MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('--parmin', metavar='<value>', help='list of lower bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 0.5,0.2,250,0.5 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
parser.add_argument('--parmax', metavar='<value>', help='list of upper bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 2.4,0.9,3000,5.0 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
args = parser.parse_args()
### Get some of the inputs
pdrop = float(args.pdrop)
noepoch = int(args.noepoch)
lrate = float(args.lrate)
seed = int(args.seed)
nwork = int(args.nwork)
mrimodel = args.mri_model
### Print some information
print('')
print('')
print('********************************************************************')
print(' TRAIN A qMRI-NET (qmripar CLASS) ')
print('********************************************************************')
print('')
print('** Input training MRI signals: {}'.format(args.sig_train))
print('** Input training tissue parameters: {}'.format(args.param_train))
print('** Input validation MRI signals: {}'.format(args.sig_val))
print('** Input validation tissue parameters: {}'.format(args.param_val))
if args.dtest is not None:
print('** Input test MRI signals: {}'.format(args.dtest))
### Load training MRI signals
fh = open(args.sig_train,'rb')
datatrain = pk.load(fh)
fh.close()
nvox_train = datatrain.shape[0]
nmeas_train = datatrain.shape[1]
### Load validation MRI signals
fh = open(args.sig_val,'rb')
dataval = pk.load(fh)
fh.close()
nvox_val = dataval.shape[0]
if dataval.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the validation set differs from the training set!')
### Load test MRI signals
if args.dtest is not None:
fh = open(args.dtest,'rb')
datatest = np.float32(pk.load(fh))
fh.close()
if datatest.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the test set differs from the training set!')
### Load training tissue parameters
fh = open(args.param_train,'rb')
prmtrain = pk.load(fh)
npar_train = prmtrain.shape[1]
fh.close()
if prmtrain.shape[0]!=datatrain.shape[0]:
raise RuntimeError('the number of voxels in the training parameters differs from the training MRI signals!')
### Load validation tissue parameters
fh = open(args.param_val,'rb')
prmval = pk.load(fh)
fh.close()
if prmval.shape[0]!=dataval.shape[0]:
raise RuntimeError('the number of voxels in the validation parameters differs from the validation MRI signals!')
if prmval.shape[1]!=prmtrain.shape[1]:
raise RuntimeError('the number of validation parameters differs from the number of training parameters!')
### Get number of mini-batches
if args.mbatch is None:
mbatch = int(float(datatrain.shape[0]) / 80.0) # Default: 1/80 of the total number of training voxels
else:
mbatch = int(args.mbatch)
if (mbatch>datatrain.shape[0]):
mbatch = datatrain.shape[0]
if(mbatch<2):
mbatch = int(2)
### Load MRI protocol
try:
mriprot = np.loadtxt(args.mri_prot)
except:
raise RuntimeError('the format of the MRI protocol is not understood!')
### Check that MRI model exists
if ( (mrimodel!='pr_hybriddwi') and (mrimodel!='br_sirsmdt') and (mrimodel!='twocompdwite') ):
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
if (mrimodel=='pr_hybriddwi'):
s0idx = 8
elif (mrimodel=='br_sirsmdt'):
s0idx = 3
elif (mrimodel=='twocompdwite'):
s0idx = 6
### Get specifics for hidden layers
if args.nn is None:
if (mrimodel=='pr_hybriddwi'):
npars = 9
elif (mrimodel=='br_sirsmdt'):
npars = 4
elif (mrimodel=='twocompdwite'):
npars = 7
else:
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
nhidden = np.array([int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars)])
nhidden_str = '{}-{}-{}'.format( int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars) )
else:
nhidden = (args.nn).split('-')
nhidden = np.array( list(map( int,nhidden )) )
nhidden_str = args.nn
### Get optional user-defined bounds for tissue parameters
if (args.parmin is not None) or (args.parmax is not None):
if (args.parmin is not None) and (args.parmax is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
if (args.parmax is not None) and (args.parmin is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
# Lower bound
pminbound = (args.parmin).split(',')
pminbound = np.array( list(map( float, pminbound )) )
# Upper bound
pmaxbound = (args.parmax).split(',')
pmaxbound = np.array( list(map( float, pmaxbound )) )
### Create output base name
out_base_dir = '{}_nhidden{}_pdrop{}_noepoch{}_lr{}_mbatch{}_seed{}'.format(args.out_base,nhidden_str,pdrop,noepoch,lrate,mbatch,seed)
if(os.path.isdir(out_base_dir)==False):
os.mkdir(out_base_dir)
### Print some more information
print('** Output directory: {}'.format(out_base_dir))
print('')
print('')
print('PARAMETERS')
print('')
print('** Hidden neurons: {}'.format(nhidden))
print('** Dropout probability: {}'.format(pdrop))
print('** Number of epochs: {}'.format(noepoch))
print('** Learning rate: {}'.format(lrate))
print('** Number of voxels in a mini-batch: {}'.format(mbatch))
print('** Seed: {}'.format(seed))
print('** Number of workers for data loader: {}'.format(nwork))
### Set random seeds
np.random.seed(seed) # Random seed for reproducibility: NumPy
torch.manual_seed(seed) # Random seed for reproducibility: PyTorch
### Normalise MRI signals and convert to single precision
max_val_train = np.transpose( matlib.repmat(np.max(datatrain,axis=1),nmeas_train,1) )
datatrain = np.float32( datatrain / max_val_train )
max_val_val = np.transpose( matlib.repmat(np.max(dataval,axis=1),nmeas_train,1) )
dataval = np.float32( dataval / max_val_val )
if args.dtest is not None:
max_val_test = np.transpose( matlib.repmat(np.max(datatest,axis=1),nmeas_train,1) )
datatest = np.float32( datatest / max_val_test )
prmtrain = np.float32(prmtrain)
prmval = np.float32(prmval)
### Create mini-batches on training data with data loader
loadertrain = DataLoader(np.concatenate((datatrain,prmtrain),axis=1), batch_size=mbatch, shuffle=True, num_workers=nwork)
### Allocate memory for losses
nobatch=0 # Count how many mini-batches of size mbatch we created
for signals in loadertrain:
nobatch = nobatch+1
losstrain = np.zeros((noepoch,nobatch)) + np.nan
lossval = np.zeros((noepoch,1)) + np.nan
### Instantiate the network and training objects, and save the intantiated network
nnet = deepqmri.qmripar(nhidden,pdrop,mrimodel,mriprot).cpu() # Instantiate neural network
if (args.parmin is not None) or (args.parmax is not None):
nnet.changelim(pminbound,pmaxbound) # Change tissue parameter ranges
print('** Tissue parameter names: {}'.format(nnet.param_name))
print('** Tissue parameter lower bounds: {}'.format(nnet.param_min))
print('** Tissue parameter upper bounds: {}'.format(nnet.param_max))
print('')
print('')
nnetloss = nn.MSELoss() # Loss: L2 norm (mean squared error, Gaussian noise)
nnetopt = torch.optim.Adam(nnet.parameters(), lr=lrate) # Network trained with ADAM optimiser
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch0_net.pth') ) # Save network at epoch 0 (i.e. at initialisation)
nnet_file = open(os.path.join(out_base_dir,'epoch0_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
### Create normalisation tensors for model parameters
slope_norm_tr = np.ones((mbatch , npar_train))
offset_norm_tr = np.ones((mbatch , npar_train))
for pp in range(0,npar_train):
slope_norm_tr[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_tr[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_tr = Tensor(np.float32(slope_norm_tr))
offset_norm_tr = Tensor(np.float32(offset_norm_tr))
slope_norm_val = np.ones((nvox_val , npar_train))
offset_norm_val = np.ones((nvox_val , npar_train))
for pp in range(0,npar_train):
slope_norm_val[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_val[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_val = Tensor(np.float32(slope_norm_val))
offset_norm_val = Tensor(np.float32(offset_norm_val))
### Run training
# Loop over epochs
loss_val_prev = np.inf
for epoch in range(noepoch):
print(' EPOCH {}/{}'.format(epoch+1,noepoch))
print('')
# Loop over mini-batches for at a fixed epoch
minibatch_id = 0
for signals in loadertrain:
# Pass the mini-batch through the network and store the training loss
output = nnet( Tensor(signals[:,0:nmeas_train]) ) # Pass MRI measurements and estimate tissue parmaters
try:
lossmeas_train = nnetloss(Tensor(output)*slope_norm_tr + offset_norm_tr, Tensor(signals[:,nmeas_train:nmeas_train+npar_train])*slope_norm_tr + offset_norm_tr) # Training loss
except:
raise RuntimeError('The number of training voxels must be a multiple of the size of the mini-batch!')
# Back propagation
nnetopt.zero_grad() # Evaluate loss gradient with respect to network parameters at the output layer
lossmeas_train.backward() # Backpropage the loss gradient through previous layers
nnetopt.step() # Update network parameters
# Store loss for the current mini-batch of training
losstrain[epoch,minibatch_id] = Tensor.numpy(lossmeas_train.data)
# Update mini-batch counter
minibatch_id = minibatch_id + 1
# Run validation
nnet.eval() # Set network to evaluation mode (deactivates dropout)
tissueval_nnet = nnet( Tensor(dataval) ) # Output of full network (predicted tissue parameters)
dataval_nnet = nnet.getsignals( Tensor(tissueval_nnet) ) # Estimate MRI signals
dataval_nnet = dataval_nnet.detach().numpy()
max_val_val_out = np.transpose( matlib.repmat(np.max(dataval_nnet,axis=1),nmeas_train,1) )
lossmeas_val = nnetloss( Tensor(tissueval_nnet)*slope_norm_val + offset_norm_val , Tensor(prmval)*slope_norm_val + offset_norm_val ) # Validation loss
# Store validation loss
lossval[epoch,0] = Tensor.numpy(lossmeas_val.data)
# Save trained network at current epoch if validation loss has decreased
if(Tensor.numpy(lossmeas_val.data)<=loss_val_prev):
print(' ... validation loss has decreased. Saving net...')
# Save network
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'lossvalmin_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'lossvalmin_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
# Save information on the epoch
nnet_text = open(os.path.join(out_base_dir,'lossvalmin.info'),'w')
nnet_text.write('Epoch {} (indices starting from 0)'.format(epoch));
nnet_text.close();
# Update value of best validation loss so far
loss_val_prev = Tensor.numpy(lossmeas_val.data)
# Save predicted validation tissue parameters
tissueval_nnet = tissueval_nnet.detach().numpy()
tissueval_nnet[:,s0idx] = (max_val_val[:,0]/max_val_val_out[:,0])*tissueval_nnet[:,s0idx] # Rescale s0 (any column of would work)
tissueval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissueval.bin'),'wb')
pk.dump(tissueval_nnet,tissueval_nnet_file,pk.HIGHEST_PROTOCOL)
tissueval_nnet_file.close()
# Save predicted validation signals
dataval_nnet = (max_val_val/max_val_val_out)*dataval_nnet
dataval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigval.bin'),'wb')
pk.dump(dataval_nnet,dataval_nnet_file,pk.HIGHEST_PROTOCOL)
dataval_nnet_file.close()
# Analyse test data if provided
if args.dtest is not None:
# Get neuronal activations as well as predicted test tissue parameters and test MRI signals
tissuetest_nnet = nnet( Tensor(datatest) ) # Output of network (estimated tissue parameters)
datatest_nnet = nnet.getsignals( Tensor(tissuetest_nnet) ) # Predicted MRI signals
datatest_nnet = datatest_nnet.detach().numpy()
max_val_test_out = np.transpose( matlib.repmat(np.max(datatest_nnet,axis=1),nmeas_train,1) )
# Save predicted test tissue parameters
tissuetest_nnet = tissuetest_nnet.detach().numpy()
tissuetest_nnet[:,s0idx] = (max_val_test[:,0]/max_val_test_out[:,0])*tissuetest_nnet[:,s0idx] # Rescale s0 (any column of max_val_test works)
tissuetest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissuetest.bin'),'wb')
pk.dump(tissuetest_nnet,tissuetest_nnet_file,pk.HIGHEST_PROTOCOL)
tissuetest_nnet_file.close()
# Save predicted test signals
datatest_nnet = (max_val_test/max_val_test_out)*datatest_nnet # Rescale signal
datatest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigtest.bin'),'wb')
pk.dump(datatest_nnet,datatest_nnet_file,pk.HIGHEST_PROTOCOL)
datatest_nnet_file.close()
# Set network back to training mode
nnet.train()
# Print some information
print('')
print(' TRAINING INFO:')
print(' Trainig loss: {:.12f}; validation loss: {:.12f}'.format(Tensor.numpy(lossmeas_train.data), Tensor.numpy(lossmeas_val.data)) )
print('')
# Save the final network
nnet.eval()
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch{}_net.pth'.format(noepoch)) )
nnet_file = open(os.path.join(out_base_dir,'epoch{}_net.bin'.format(noepoch)),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
# Save the training and validation loss
losstrain_file = open(os.path.join(out_base_dir,'losstrain.bin'),'wb')
pk.dump(losstrain,losstrain_file,pk.HIGHEST_PROTOCOL)
losstrain_file.close()
lossval_file = open(os.path.join(out_base_dir,'lossval.bin'),'wb')
pk.dump(lossval,lossval_file,pk.HIGHEST_PROTOCOL)
lossval_file.close()
np.savetxt(os.path.join(out_base_dir,'lossval_min.txt'), [np.nanmin(lossval)], fmt='%.12f', delimiter=' ')
| 60.689922 | 2,100 | 0.732788 |
mpy import matlib
import numpy as np
import torch
from torch import nn
from torch import Tensor
from torch.utils.data import DataLoader
from torch import autograd
import pickle as pk
from pathlib import Path as pt
sys.path.insert(0, os.path.dirname(pt(__file__).absolute()) )
import deepqmri
if __name__ == "__main__":
ns a qMRI-net for quantitative MRI parameter estimation. A qMRI-Nnet enables voxel-by-voxel estimation of microstructural properties from sets of MRI images aacquired by varying the MRI sequence parameters. Author: Francesco Grussu, University College London (<f.grussu@ucl.ac.uk><francegrussu@gmail.com>). Code released under BSD Two-Clause license. Copyright (c) 2020 University College London. All rights reserved.')
parser.add_argument('sig_train', help='path to a pickle binary file storing the input training MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_train', help='path to a pickle binary file storing the training tissue parameter data as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('sig_val', help='path to a pickle binary file storing the input validation MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('param_val', help='path to a pickle binary file storing the validation tissue parameters as a numpy matrix (rows: voxels; columns: parameters)')
parser.add_argument('mri_model', help='string indicating the MRI model to fit (choose among: "pr_hybriddwi" for prostate hybrid diffusion-relaxometry imaging; "br_sirsmdt" for brain saturation recovery diffusion tensor on spherical mean signals; "twocompdwite" for a two-compartment diffusion-t2 relaxation model without anisotropy). Tissue parameters will be: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0')
parser.add_argument('mri_prot', help='path to text file storing the MRI protocol. For model "pr_hybriddwi" and "twocompdwite" it must contain a matrix where the 1st row stores b-values in s/mm^2, while 2nd row echo times in ms; for model "br_sirsmdt" it must contain a matrix where the 1st row stores preparation times (saturation-inversion delay) in ms, the 2nd row inversion times (inversion-excitation delay) in ms, the 3rd row b-values in s/mm^2. For a pure inversion recovery (i.e. no saturation pulse), use a very large number for the saturation-inversion delay (at least 5 times the maximum expected T1). Different entries should be separated by spaces')
parser.add_argument('out_base', help='base name of output directory (a string built with the network parameters will be added to the base). The output directory will contain the following output files: ** losstrain.bin, pickle binary storing the training loss as a numpy matrix (shape: epoch x batch); ** lossval.bin, pickle binary storing the validation loss as a numpy matrix (shape: epoch x 1); ** nnet_epoch0.bin, pickle binary storing the qMRI-net at initialisation; ** nnet_epoch0.pth, Pytorch binary storing the qMRI-net at initialisation; ** nnet_epoch<FINAL_EPOCH>.bin, pickle binary storing the qMRI-net at the final epoch; ** nnet_lossvalmin.bin, pickle binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); * nnet_lossvalmin.pth, Pytorch binary storing the trained qMRI-net at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_sigval.bin, prediction of the validation signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin_tissueval.bin, prediction of tissue parameters from validation signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information); ** nnet_lossvalmin.info, text file reporting information regarding the epoch with the lowest validation loss; ** lossval_min.txt, miniimum validation loss; ** nnet_lossvalmin_sigtest.bin, prediction of the test signals (shape: voxels x measurements) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information), if those signals are provided; ** nnet_lossvalmin_tissuetest.bin, prediction of tissue parameters from test signals (shape: voxels x number_of_tissue_parameters) at the best epoch (epoch with lowest validation loss, check nnet_lossvalmin.info file for more information) if test signals are provided')
parser.add_argument('--nn', metavar='<list>', help='array storing the number of hidden neurons, separated by hyphens (example: 30-15-8). The first number (input neurons) must equal the number of measurements in the protocol (Nmeas); the last number (output neurons) must equal the number of parameters in the model (Npar, 9 for model "pr_hybriddwi", 4 for model "br_sirsmdt", 7 for model "twocompdwite"). Default: Nmeas-(Npar + (Nmeas minus Npar))/2-Npar, where Nmeas is the number of MRI measurements and Npar is the number of tissue parameters for the signal model to fit')
parser.add_argument('--pdrop', metavar='<value>', default='0.0', help='dropout probability in each layer of the neural network. Default: 0.0')
parser.add_argument('--noepoch', metavar='<value>', default='500', help='number of epochs used for training. Default: 500')
parser.add_argument('--lrate', metavar='<value>', default='0.001', help='learning rate. Default: 0.001')
parser.add_argument('--mbatch', metavar='<value>', help='number of voxels in each training mini-batch. Default: 1/80 of the total number of training voxels (minimum: 2 voxels)')
parser.add_argument('--seed', metavar='<value>', default='19102018', help='integer used as a seed for Numpy and PyTorch random number generators. Default: 19102018')
parser.add_argument('--nwork', metavar='<value>', default='0', help='number of workers for data loader. Default: 0')
parser.add_argument('--dtest', metavar='<file>', help='path to an option input pickle binary file storing test MRI signals as a numpy matrix (rows: voxels; columns: measurements)')
parser.add_argument('--parmin', metavar='<value>', help='list of lower bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 0.5,0.2,250,0.5 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
parser.add_argument('--parmax', metavar='<value>', help='list of upper bounds of tissue parameters. Entries corresponding to different parameters should be separated by a comma (for example: 2.4,0.9,3000,5.0 for model br_sirsmdt). Tissue parameters are: model "pr_hybriddwi", parameters vl, v s.t. ve=(1-vl)*v, Dl, De, Ds, t2l, t2e, t2s, s0, where l/e/stroma stands for lumen/epithelium/stroma; model "br_sirsmdt", parameters dpar, kperp s.t. dperp=kperp*dpar, t1, s0; model "twocompdwite", parameters v, Da, t2a, Db, Kb, t2b, s0, where a and b indicate compartments a and b. If not specified, default tissue parameter ranges are used.')
args = parser.parse_args()
oepoch)
lrate = float(args.lrate)
seed = int(args.seed)
nwork = int(args.nwork)
mrimodel = args.mri_model
**************************************************')
print(' TRAIN A qMRI-NET (qmripar CLASS) ')
print('********************************************************************')
print('')
print('** Input training MRI signals: {}'.format(args.sig_train))
print('** Input training tissue parameters: {}'.format(args.param_train))
print('** Input validation MRI signals: {}'.format(args.sig_val))
print('** Input validation tissue parameters: {}'.format(args.param_val))
if args.dtest is not None:
print('** Input test MRI signals: {}'.format(args.dtest))
h)
fh.close()
nvox_train = datatrain.shape[0]
nmeas_train = datatrain.shape[1]
lose()
nvox_val = dataval.shape[0]
if dataval.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the validation set differs from the training set!')
test,'rb')
datatest = np.float32(pk.load(fh))
fh.close()
if datatest.shape[1]!=datatrain.shape[1]:
raise RuntimeError('the number of MRI measurements in the test set differs from the training set!')
ain = prmtrain.shape[1]
fh.close()
if prmtrain.shape[0]!=datatrain.shape[0]:
raise RuntimeError('the number of voxels in the training parameters differs from the training MRI signals!')
prmval.shape[0]!=dataval.shape[0]:
raise RuntimeError('the number of voxels in the validation parameters differs from the validation MRI signals!')
if prmval.shape[1]!=prmtrain.shape[1]:
raise RuntimeError('the number of validation parameters differs from the number of training parameters!')
shape[0]) / 80.0)
else:
mbatch = int(args.mbatch)
if (mbatch>datatrain.shape[0]):
mbatch = datatrain.shape[0]
if(mbatch<2):
mbatch = int(2)
prot)
except:
raise RuntimeError('the format of the MRI protocol is not understood!')
t') and (mrimodel!='twocompdwite') ):
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
if (mrimodel=='pr_hybriddwi'):
s0idx = 8
elif (mrimodel=='br_sirsmdt'):
s0idx = 3
elif (mrimodel=='twocompdwite'):
s0idx = 6
9
elif (mrimodel=='br_sirsmdt'):
npars = 4
elif (mrimodel=='twocompdwite'):
npars = 7
else:
raise RuntimeError('the chosen MRI model is not implemented. Sorry!')
nhidden = np.array([int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars)])
nhidden_str = '{}-{}-{}'.format( int(nmeas_train) , int(float(npars)+0.5*( float(nmeas_train) - float(npars))) , int(npars) )
else:
nhidden = (args.nn).split('-')
nhidden = np.array( list(map( int,nhidden )) )
nhidden_str = args.nn
s None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
if (args.parmax is not None) and (args.parmin is None):
raise RuntimeError('you need to set both parmin and parmax options simultaneously')
pminbound = (args.parmin).split(',')
pminbound = np.array( list(map( float, pminbound )) )
pmaxbound = (args.parmax).split(',')
pmaxbound = np.array( list(map( float, pmaxbound )) )
{}_mbatch{}_seed{}'.format(args.out_base,nhidden_str,pdrop,noepoch,lrate,mbatch,seed)
if(os.path.isdir(out_base_dir)==False):
os.mkdir(out_base_dir)
int('')
print('')
print('PARAMETERS')
print('')
print('** Hidden neurons: {}'.format(nhidden))
print('** Dropout probability: {}'.format(pdrop))
print('** Number of epochs: {}'.format(noepoch))
print('** Learning rate: {}'.format(lrate))
print('** Number of voxels in a mini-batch: {}'.format(mbatch))
print('** Seed: {}'.format(seed))
print('** Number of workers for data loader: {}'.format(nwork))
manual_seed(seed)
( datatrain / max_val_train )
max_val_val = np.transpose( matlib.repmat(np.max(dataval,axis=1),nmeas_train,1) )
dataval = np.float32( dataval / max_val_val )
if args.dtest is not None:
max_val_test = np.transpose( matlib.repmat(np.max(datatest,axis=1),nmeas_train,1) )
datatest = np.float32( datatest / max_val_test )
prmtrain = np.float32(prmtrain)
prmval = np.float32(prmval)
rkers=nwork)
obatch+1
losstrain = np.zeros((noepoch,nobatch)) + np.nan
lossval = np.zeros((noepoch,1)) + np.nan
pmaxbound)
print('** Tissue parameter names: {}'.format(nnet.param_name))
print('** Tissue parameter lower bounds: {}'.format(nnet.param_min))
print('** Tissue parameter upper bounds: {}'.format(nnet.param_max))
print('')
print('')
nnetloss = nn.MSELoss()
nnetopt = torch.optim.Adam(nnet.parameters(), lr=lrate)
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch0_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'epoch0_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
pp in range(0,npar_train):
slope_norm_tr[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_tr[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_tr = Tensor(np.float32(slope_norm_tr))
offset_norm_tr = Tensor(np.float32(offset_norm_tr))
slope_norm_val = np.ones((nvox_val , npar_train))
offset_norm_val = np.ones((nvox_val , npar_train))
for pp in range(0,npar_train):
slope_norm_val[:,pp] = 1.0 / (nnet.param_max[pp] - nnet.param_min[pp])
offset_norm_val[:,pp] = (-1.0*nnet.param_min[pp]) / (nnet.param_max[pp] - nnet.param_min[pp])
slope_norm_val = Tensor(np.float32(slope_norm_val))
offset_norm_val = Tensor(np.float32(offset_norm_val))
or epoch in range(noepoch):
print(' EPOCH {}/{}'.format(epoch+1,noepoch))
print('')
minibatch_id = 0
for signals in loadertrain:
output = nnet( Tensor(signals[:,0:nmeas_train]) )
try:
lossmeas_train = nnetloss(Tensor(output)*slope_norm_tr + offset_norm_tr, Tensor(signals[:,nmeas_train:nmeas_train+npar_train])*slope_norm_tr + offset_norm_tr)
except:
raise RuntimeError('The number of training voxels must be a multiple of the size of the mini-batch!')
nnetopt.zero_grad()
lossmeas_train.backward()
nnetopt.step()
losstrain[epoch,minibatch_id] = Tensor.numpy(lossmeas_train.data)
minibatch_id = minibatch_id + 1
nnet.eval()
tissueval_nnet = nnet( Tensor(dataval) )
dataval_nnet = nnet.getsignals( Tensor(tissueval_nnet) )
dataval_nnet = dataval_nnet.detach().numpy()
max_val_val_out = np.transpose( matlib.repmat(np.max(dataval_nnet,axis=1),nmeas_train,1) )
lossmeas_val = nnetloss( Tensor(tissueval_nnet)*slope_norm_val + offset_norm_val , Tensor(prmval)*slope_norm_val + offset_norm_val )
lossval[epoch,0] = Tensor.numpy(lossmeas_val.data)
if(Tensor.numpy(lossmeas_val.data)<=loss_val_prev):
print(' ... validation loss has decreased. Saving net...')
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'lossvalmin_net.pth') )
nnet_file = open(os.path.join(out_base_dir,'lossvalmin_net.bin'),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
nnet_text = open(os.path.join(out_base_dir,'lossvalmin.info'),'w')
nnet_text.write('Epoch {} (indices starting from 0)'.format(epoch));
nnet_text.close();
loss_val_prev = Tensor.numpy(lossmeas_val.data)
tissueval_nnet = tissueval_nnet.detach().numpy()
tissueval_nnet[:,s0idx] = (max_val_val[:,0]/max_val_val_out[:,0])*tissueval_nnet[:,s0idx]
tissueval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissueval.bin'),'wb')
pk.dump(tissueval_nnet,tissueval_nnet_file,pk.HIGHEST_PROTOCOL)
tissueval_nnet_file.close()
dataval_nnet = (max_val_val/max_val_val_out)*dataval_nnet
dataval_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigval.bin'),'wb')
pk.dump(dataval_nnet,dataval_nnet_file,pk.HIGHEST_PROTOCOL)
dataval_nnet_file.close()
if args.dtest is not None:
tissuetest_nnet = nnet( Tensor(datatest) )
datatest_nnet = nnet.getsignals( Tensor(tissuetest_nnet) )
datatest_nnet = datatest_nnet.detach().numpy()
max_val_test_out = np.transpose( matlib.repmat(np.max(datatest_nnet,axis=1),nmeas_train,1) )
tissuetest_nnet = tissuetest_nnet.detach().numpy()
tissuetest_nnet[:,s0idx] = (max_val_test[:,0]/max_val_test_out[:,0])*tissuetest_nnet[:,s0idx]
tissuetest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_tissuetest.bin'),'wb')
pk.dump(tissuetest_nnet,tissuetest_nnet_file,pk.HIGHEST_PROTOCOL)
tissuetest_nnet_file.close()
datatest_nnet = (max_val_test/max_val_test_out)*datatest_nnet
datatest_nnet_file = open(os.path.join(out_base_dir,'lossvalmin_sigtest.bin'),'wb')
pk.dump(datatest_nnet,datatest_nnet_file,pk.HIGHEST_PROTOCOL)
datatest_nnet_file.close()
nnet.train()
print('')
print(' TRAINING INFO:')
print(' Trainig loss: {:.12f}; validation loss: {:.12f}'.format(Tensor.numpy(lossmeas_train.data), Tensor.numpy(lossmeas_val.data)) )
print('')
nnet.eval()
torch.save( nnet.state_dict(), os.path.join(out_base_dir,'epoch{}_net.pth'.format(noepoch)) )
nnet_file = open(os.path.join(out_base_dir,'epoch{}_net.bin'.format(noepoch)),'wb')
pk.dump(nnet,nnet_file,pk.HIGHEST_PROTOCOL)
nnet_file.close()
losstrain_file = open(os.path.join(out_base_dir,'losstrain.bin'),'wb')
pk.dump(losstrain,losstrain_file,pk.HIGHEST_PROTOCOL)
losstrain_file.close()
lossval_file = open(os.path.join(out_base_dir,'lossval.bin'),'wb')
pk.dump(lossval,lossval_file,pk.HIGHEST_PROTOCOL)
lossval_file.close()
np.savetxt(os.path.join(out_base_dir,'lossval_min.txt'), [np.nanmin(lossval)], fmt='%.12f', delimiter=' ')
| true | true |
f71442b16a12f46a840756d2038ff554248234be | 9,869 | py | Python | ansible/lib/ansible/modules/core/cloud/google/gce_pd.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/core/cloud/google/gce_pd.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/core/cloud/google/gce_pd.py | kiv-box/redis | 966a0c3f0a51282cd173b42a6e249d23f4e89dec | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| 32.251634 | 95 | 0.616375 |
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
try:
inst = gce.ex_get_node(instance_name, zone)
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| true | true |
f71442ba66bcddc2b3b52f67bbd9823def89ad03 | 476 | py | Python | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py | SDGraph/Hacktoberfest2k21 | 8f8aead15afa10ea12e1b23ece515a10a882de28 | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py | SDGraph/Hacktoberfest2k21 | 8f8aead15afa10ea12e1b23ece515a10a882de28 | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/Basics/Exercise/13_read_write_files/exercise_2_stocks.py | SDGraph/Hacktoberfest2k21 | 8f8aead15afa10ea12e1b23ece515a10a882de28 | [
"MIT"
] | null | null | null | with open("stocks.csv", "r") as f, open("output.csv", "w") as out:
out.write("Company Name,PE Ratio, PB Ratio\n")
next(f) # This will skip first line in the file which is a header
for line in f:
tokens = line.split(",")
stock = tokens[0]
price = float(tokens[1])
eps = float(tokens[2])
book = float(tokens[3])
pe = round(price / eps, 2)
pb = round(price / book, 2)
out.write(f"{stock},{pe},{pb}\n")
| 36.615385 | 70 | 0.546218 | with open("stocks.csv", "r") as f, open("output.csv", "w") as out:
out.write("Company Name,PE Ratio, PB Ratio\n")
next(f)
for line in f:
tokens = line.split(",")
stock = tokens[0]
price = float(tokens[1])
eps = float(tokens[2])
book = float(tokens[3])
pe = round(price / eps, 2)
pb = round(price / book, 2)
out.write(f"{stock},{pe},{pb}\n")
| true | true |
f71442ee9da45672024ed542f4f081204ce1ee75 | 4,356 | py | Python | redash/utils/parameterized_query.py | quanpower/redash | 2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857 | [
"BSD-2-Clause"
] | 1 | 2021-01-20T18:57:12.000Z | 2021-01-20T18:57:12.000Z | redash/utils/parameterized_query.py | quanpower/redash | 2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857 | [
"BSD-2-Clause"
] | null | null | null | redash/utils/parameterized_query.py | quanpower/redash | 2a37cb31d95703c239e1edf3d3d9e0f9c2eaf857 | [
"BSD-2-Clause"
] | null | null | null | import pystache
from functools import partial
from flask_login import current_user
from redash.authentication.org_resolving import current_org
from numbers import Number
from redash import models
from redash.utils import mustache_render, json_loads
from redash.permissions import require_access, view_only
from funcy import distinct
from dateutil.parser import parse
def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
return {"name": row[name_column], "value": row[value_column]}
def _load_result(query_id):
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.data_source.groups, current_user, view_only)
query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)
return json_loads(query_result.data)
def dropdown_values(query_id):
data = _load_result(query_id)
first_column = data["columns"][0]["name"]
pluck = partial(_pluck_name_and_value, first_column)
return map(pluck, data["rows"])
def _collect_key_names(nodes):
keys = []
for node in nodes._parse_tree:
if isinstance(node, pystache.parser._EscapeNode):
keys.append(node.key)
elif isinstance(node, pystache.parser._SectionNode):
keys.append(node.key)
keys.extend(_collect_key_names(node.parsed))
return distinct(keys)
def _collect_query_parameters(query):
nodes = pystache.parse(query)
keys = _collect_key_names(nodes)
return keys
def _parameter_names(parameter_values):
names = []
for key, value in parameter_values.iteritems():
if isinstance(value, dict):
for inner_key in value.keys():
names.append(u'{}.{}'.format(key, inner_key))
else:
names.append(key)
return names
def _is_date(string):
try:
parse(string)
return True
except ValueError:
return False
def _is_date_range(obj):
try:
return _is_date(obj["start"]) and _is_date(obj["end"])
except (KeyError, TypeError):
return False
class ParameterizedQuery(object):
def __init__(self, template, schema=None):
self.schema = schema or []
self.template = template
self.query = template
self.parameters = {}
def apply(self, parameters):
invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]
if invalid_parameter_names:
raise InvalidParameterError(invalid_parameter_names)
else:
self.parameters.update(parameters)
self.query = mustache_render(self.template, self.parameters)
return self
def _valid(self, name, value):
if not self.schema:
return True
definition = next((definition for definition in self.schema if definition["name"] == name), None)
if not definition:
return False
validators = {
"text": lambda value: isinstance(value, basestring),
"number": lambda value: isinstance(value, Number),
"enum": lambda value: value in definition["enumOptions"],
"query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])],
"date": _is_date,
"datetime-local": _is_date,
"datetime-with-seconds": _is_date,
"date-range": _is_date_range,
"datetime-range": _is_date_range,
"datetime-range-with-seconds": _is_date_range,
}
validate = validators.get(definition["type"], lambda x: False)
return validate(value)
@property
def missing_params(self):
query_parameters = set(_collect_query_parameters(self.template))
return set(query_parameters) - set(_parameter_names(self.parameters))
@property
def text(self):
return self.query
class InvalidParameterError(Exception):
def __init__(self, parameters):
message = u"The following parameter values are incompatible with their definitions: {}".format(", ".join(parameters))
super(InvalidParameterError, self).__init__(message)
| 31.565217 | 125 | 0.672635 | import pystache
from functools import partial
from flask_login import current_user
from redash.authentication.org_resolving import current_org
from numbers import Number
from redash import models
from redash.utils import mustache_render, json_loads
from redash.permissions import require_access, view_only
from funcy import distinct
from dateutil.parser import parse
def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()
return {"name": row[name_column], "value": row[value_column]}
def _load_result(query_id):
query = models.Query.get_by_id_and_org(query_id, current_org)
require_access(query.data_source.groups, current_user, view_only)
query_result = models.QueryResult.get_by_id_and_org(query.latest_query_data_id, current_org)
return json_loads(query_result.data)
def dropdown_values(query_id):
data = _load_result(query_id)
first_column = data["columns"][0]["name"]
pluck = partial(_pluck_name_and_value, first_column)
return map(pluck, data["rows"])
def _collect_key_names(nodes):
keys = []
for node in nodes._parse_tree:
if isinstance(node, pystache.parser._EscapeNode):
keys.append(node.key)
elif isinstance(node, pystache.parser._SectionNode):
keys.append(node.key)
keys.extend(_collect_key_names(node.parsed))
return distinct(keys)
def _collect_query_parameters(query):
nodes = pystache.parse(query)
keys = _collect_key_names(nodes)
return keys
def _parameter_names(parameter_values):
names = []
for key, value in parameter_values.iteritems():
if isinstance(value, dict):
for inner_key in value.keys():
names.append(u'{}.{}'.format(key, inner_key))
else:
names.append(key)
return names
def _is_date(string):
try:
parse(string)
return True
except ValueError:
return False
def _is_date_range(obj):
try:
return _is_date(obj["start"]) and _is_date(obj["end"])
except (KeyError, TypeError):
return False
class ParameterizedQuery(object):
def __init__(self, template, schema=None):
self.schema = schema or []
self.template = template
self.query = template
self.parameters = {}
def apply(self, parameters):
invalid_parameter_names = [key for (key, value) in parameters.iteritems() if not self._valid(key, value)]
if invalid_parameter_names:
raise InvalidParameterError(invalid_parameter_names)
else:
self.parameters.update(parameters)
self.query = mustache_render(self.template, self.parameters)
return self
def _valid(self, name, value):
if not self.schema:
return True
definition = next((definition for definition in self.schema if definition["name"] == name), None)
if not definition:
return False
validators = {
"text": lambda value: isinstance(value, basestring),
"number": lambda value: isinstance(value, Number),
"enum": lambda value: value in definition["enumOptions"],
"query": lambda value: value in [v["value"] for v in dropdown_values(definition["queryId"])],
"date": _is_date,
"datetime-local": _is_date,
"datetime-with-seconds": _is_date,
"date-range": _is_date_range,
"datetime-range": _is_date_range,
"datetime-range-with-seconds": _is_date_range,
}
validate = validators.get(definition["type"], lambda x: False)
return validate(value)
@property
def missing_params(self):
query_parameters = set(_collect_query_parameters(self.template))
return set(query_parameters) - set(_parameter_names(self.parameters))
@property
def text(self):
return self.query
class InvalidParameterError(Exception):
def __init__(self, parameters):
message = u"The following parameter values are incompatible with their definitions: {}".format(", ".join(parameters))
super(InvalidParameterError, self).__init__(message)
| true | true |
f7144329ccafee0fa6d6b0aae0ee85c8503eceb0 | 13,247 | py | Python | codigo/process_datos_abiertos.py | Morisset/Mexico-datos | 29d5ed1079732d5d809bc14eb5d3438662508728 | [
"MIT"
] | null | null | null | codigo/process_datos_abiertos.py | Morisset/Mexico-datos | 29d5ed1079732d5d809bc14eb5d3438662508728 | [
"MIT"
] | null | null | null | codigo/process_datos_abiertos.py | Morisset/Mexico-datos | 29d5ed1079732d5d809bc14eb5d3438662508728 | [
"MIT"
] | null | null | null | import os
import csv
import pandas as pd
import geopandas as gpd
from datetime import datetime, timedelta
## PROCESSING FUNCTIONS ##
def confirmados_diarios_por_estado(datos, entidades):
"""
Calcula el número total de casos confirmados por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- serie: Serie de tiempo de nuevos casos confirmados por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 1]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def negativos_diarios_por_estado(datos, entidades):
"""
Calcula el número total de casos negativos por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas negativas por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 2]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_pendientes_diarias_por_estado(datos, entidades):
"""
Calcula el número de pruebas pendientes por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas pendientes por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos[datos['RESULTADO'] == 3]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_totales_diarias_por_estado(datos, entidades):
"""
Calcula el número total de pruebas realizadas por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas pruebas totales por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
series = (datos
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def defunciones_diarias_por_estado(datos, entidades):
"""
Calcula el número de defunciones por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevas muertes por dia para cada entidad
federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['FECHA_DEF'] != '9999-99-99')
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_DEF'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def hospitalizados_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes hopitalizados por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos hospitalizados por dia para cada entidad
federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
# esta serie incluye UCI + noUCI
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 2)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def ambulatorios_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes ambulatorios por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos pacientes infectados ambulatorios por
dia para cada entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def uci_diarios_por_estado(datos, entidades):
"""
Calcula el número de pacientes ingresados a una UCI por fecha y por estado.
Input:
- datos: datos abiertos de COVID-19 en México disponibles en [1].
Output:
- series: Serie de tiempo de nuevos pacientes en UCI por dia para cada
entidad federativa en México.
[1]: https://www.gob.mx/salud/documentos/datos-abiertos-152127
"""
idx = (datos['RESULTADO'] == 1) & (datos['UCI'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
## HELPER FUNCTIONS ##
def get_formato_series(series, entidades):
"""
Convierte groupby a formato tidy (columnas son estados e indice es la fecha).
Input:
- series:
DataFrame en formato groupby agrupada for una columna que corresponde a
entidades federativas y otra columna que corresponde a una fecha.
- entidades:
diccionario de clave_de_entidad => nombre_de_entidad.
Output:
- series:
DataFrame en formato tidy, con los nombres de los estados como columnas
(la primer columna es el total nacional) y con la fecha como indice.
"""
diccionario_cambio_edos = {'Ciudad De México': 'Ciudad de México',
'Coahuila De Zaragoza': 'Coahuila',
'Michoacán De Ocampo': 'Michoacán',
'Veracruz De Ignacio De La Llave': 'Veracruz'}
series = series.unstack(level=0).fillna(0).astype('int')
# Formato para mexicovid19/Mexico-datos
series.index.name = 'Fecha'
series.index = pd.to_datetime(series.index)
# Formato oficial de DGE
series = series.rename(columns=entidades)
# Formato específico de nuestro repositorio
series = series.rename(columns=diccionario_cambio_edos)
series = series.reindex(sorted(series.columns), axis=1)
# Formato de agregado nacional
series.loc[:, 'Nacional'] = series.sum(axis=1)
# Reordenar columnas para que los casos nacionales queden primero
cols = list(series.columns)
cols = cols[-1:] + cols[:-1]
series = series[cols]
# Llenamos ceros para fechas sin informacion
idx = pd.date_range(series.index.min(), series.index.max())
series = series.reindex(idx, fill_value=0)
series.index.name = 'Fecha'
return series
if __name__ == '__main__':
update_time = datetime.now() - timedelta(hours=6)
date = datetime.now() - timedelta(days=1)
date_filename = date.strftime('%Y%m%d')
date_iso = date.strftime('%Y-%m-%d')
repo = '..'
dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')
dir_datos = os.path.join(repo, 'datos', '')
dir_geo = os.path.join(dir_datos, 'geograficos', '')
dir_demograficos = os.path.join(dir_datos, 'demograficos_variables', '')
dir_series_dge = os.path.join(dir_datos_abiertos, 'series_de_tiempo', '')
dir_series = os.path.join(dir_datos, 'series_de_tiempo', '')
dir_input = os.path.join(dir_datos_abiertos, 'raw', '')
input_filename = dir_input + f'datos_abiertos_{date_filename}.zip'
## READING ##
# Lee los datos abiertos
datos_abiertos_df = pd.read_csv(input_filename, compression='zip')
# Lee catalogo de entidades (hoja de calculo 'Catálogo de ENTIDADES' en
# el archivo 'diccionario_datos/Catalogos_0412.xlsx''; ha sido convertido a csv)
cat = (pd.read_csv(dir_input + 'diccionario_datos/catalogo_entidades.csv')
.set_index('CLAVE_ENTIDAD')['ENTIDAD_FEDERATIVA']
.to_dict())
# cambia mayúsculas de estados por formato título
entidades = {key: val.title() for (key, val) in cat.items()}
# Datos abiertos
files = ['covid19_mex_confirmados.csv',
'covid19_mex_negativos.csv',
'covid19_mex_pendientes.csv',
'covid19_mex_pruebas-totales.csv',
'covid19_mex_muertes.csv',
'covid19_mex_hospitalizados.csv',
'covid19_mex_uci.csv',
'covid19_mex_ambulatorios.csv']
funciones = [confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
pruebas_totales_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado,
ambulatorios_diarios_por_estado]
dfs = [func(datos_abiertos_df, entidades) for func in funciones]
for f, df in zip(files, dfs):
df.to_csv(f'{dir_series_dge}/nuevos/{f}')
df.cumsum().to_csv(f'{dir_series_dge}/acumulados/{f}')
## Series de tiempo estaticas (solo actualiza ultima fila) ##
# Formato unix sin quotes
csv.register_dialect('unixnq', delimiter=',', lineterminator='\n',
quoting=csv.QUOTE_NONE)
# Totales por estado
totales_file = dir_series + 'covid19_mex_casos_totales.csv'
fila_totales = dfs[0].cumsum().tail(1) # confirmados_diarios_por_estado
with open(totales_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_totales.values[0].tolist())
# Casos ultimas 24h
nuevos_file = dir_series + 'covid19_mex_casos_nuevos.csv'
totales_df = pd.read_csv(totales_file)
fila_nuevos = (totales_df.iloc[-1, 1:] - totales_df.iloc[-2, 1:]).astype(int)
with open(nuevos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevos.values.tolist()) # a series
# Muertes por estado
muertes_file = dir_series + 'covid19_mex_muertes.csv'
fila_muertes = dfs[4].cumsum().tail(1) # defunciones_diarias_por_estado
with open(muertes_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_muertes.values[0].tolist())
# Muertes nuevas por estado
muertes_nuevas_file = dir_series + 'covid19_mex_muertes_nuevas.csv'
muertes_df = pd.read_csv(muertes_file)
fila_nuevas = (muertes_df.iloc[-1, 1:] - muertes_df.iloc[-2, 1:]).astype(int)
with open(muertes_nuevas_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevas.values.tolist()) # a series
# Sospechosos por estado
sospechosos_file = dir_series + 'covid19_mex_sospechosos.csv'
# pruebas_pendientes_diarias_por_estado
fila_sospechosos = dfs[2].cumsum().tail(1)
with open(sospechosos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_sospechosos.values[0].tolist())
# Sospechosos por estado
negativos_file = dir_series + 'covid19_mex_negativos.csv'
fila_negativos = dfs[1].cumsum().tail(1) # negativos_diarios_por_estado
with open(negativos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_negativos.values[0].tolist())
## Totales por estado en el archivo geojson ##
geojson_file = dir_geo + 'mexico.geojson'
edos_hoy_file = dir_datos + 'estados_hoy.csv'
updated_file = dir_datos + 'last_updated.csv'
gdf = gpd.read_file(geojson_file).set_index('name')
gdf.totales = fila_totales.drop('Nacional', axis=1).squeeze()
gdf.nuevos = fila_nuevos.drop('Nacional').squeeze() # series
gdf.muertes = fila_muertes.drop('Nacional', axis=1).squeeze()
gdf.muertes_nuevas = fila_nuevas.drop('Nacional').squeeze() # series
gdf.sospechosos = fila_sospechosos.drop('Nacional', axis=1).squeeze()
gdf.negativos = fila_negativos.drop('Nacional', axis=1).squeeze()
gdf.totales_100k = gdf.totales * 100000 / gdf.population
gdf.muertes_100k = gdf.muertes * 100000 / gdf.population
gdf.updated_at = str(update_time).replace(' ', 'T')
gdf = gdf.reset_index()
assert gdf.shape[1] == 14
gdf.to_file(geojson_file, driver='GeoJSON')
gdf.loc[0:0, ['updated_at']].to_csv(updated_file, index=False)
### Estados hoy ###
cols_edos_hoy = ['name', 'totales', 'nuevos',
'muertes', 'muertes_nuevas', 'sospechosos', 'negativos']
map_cols = {'name': 'Estado',
'totales': 'Confirmados totales',
'nuevos': 'Confirmados nuevos',
'muertes': 'Defunciones',
'muertes_nuevas': 'Defunciones nuevas',
'sospechosos': 'Sospechosos totales',
'negativos': 'Negativos totales'}
edos_hoy_df = gdf[cols_edos_hoy].rename(columns=map_cols)
edos_hoy_df.to_csv(edos_hoy_file, index=False)
print(f'Se procesaron exitosamente los datos abiertos de {input_filename}')
| 35.802703 | 84 | 0.658111 | import os
import csv
import pandas as pd
import geopandas as gpd
from datetime import datetime, timedelta
r_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 1]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def negativos_diarios_por_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 2]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_pendientes_diarias_por_estado(datos, entidades):
series = (datos[datos['RESULTADO'] == 3]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def pruebas_totales_diarias_por_estado(datos, entidades):
series = (datos
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def defunciones_diarias_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['FECHA_DEF'] != '9999-99-99')
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_DEF'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def hospitalizados_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 2)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def ambulatorios_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['TIPO_PACIENTE'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
def uci_diarios_por_estado(datos, entidades):
idx = (datos['RESULTADO'] == 1) & (datos['UCI'] == 1)
series = (datos[idx]
.groupby(['ENTIDAD_UM', 'FECHA_INGRESO'])
.count()['ORIGEN'])
return get_formato_series(series, entidades)
(series, entidades):
diccionario_cambio_edos = {'Ciudad De México': 'Ciudad de México',
'Coahuila De Zaragoza': 'Coahuila',
'Michoacán De Ocampo': 'Michoacán',
'Veracruz De Ignacio De La Llave': 'Veracruz'}
series = series.unstack(level=0).fillna(0).astype('int')
series.index.name = 'Fecha'
series.index = pd.to_datetime(series.index)
series = series.rename(columns=entidades)
series = series.rename(columns=diccionario_cambio_edos)
series = series.reindex(sorted(series.columns), axis=1)
series.loc[:, 'Nacional'] = series.sum(axis=1)
cols = list(series.columns)
cols = cols[-1:] + cols[:-1]
series = series[cols]
idx = pd.date_range(series.index.min(), series.index.max())
series = series.reindex(idx, fill_value=0)
series.index.name = 'Fecha'
return series
if __name__ == '__main__':
update_time = datetime.now() - timedelta(hours=6)
date = datetime.now() - timedelta(days=1)
date_filename = date.strftime('%Y%m%d')
date_iso = date.strftime('%Y-%m-%d')
repo = '..'
dir_datos_abiertos = os.path.join(repo, 'datos_abiertos', '')
dir_datos = os.path.join(repo, 'datos', '')
dir_geo = os.path.join(dir_datos, 'geograficos', '')
dir_demograficos = os.path.join(dir_datos, 'demograficos_variables', '')
dir_series_dge = os.path.join(dir_datos_abiertos, 'series_de_tiempo', '')
dir_series = os.path.join(dir_datos, 'series_de_tiempo', '')
dir_input = os.path.join(dir_datos_abiertos, 'raw', '')
input_filename = dir_input + f'datos_abiertos_{date_filename}.zip'
s_abiertos_df = pd.read_csv(input_filename, compression='zip')
cat = (pd.read_csv(dir_input + 'diccionario_datos/catalogo_entidades.csv')
.set_index('CLAVE_ENTIDAD')['ENTIDAD_FEDERATIVA']
.to_dict())
# cambia mayúsculas de estados por formato título
entidades = {key: val.title() for (key, val) in cat.items()}
# Datos abiertos
files = ['covid19_mex_confirmados.csv',
'covid19_mex_negativos.csv',
'covid19_mex_pendientes.csv',
'covid19_mex_pruebas-totales.csv',
'covid19_mex_muertes.csv',
'covid19_mex_hospitalizados.csv',
'covid19_mex_uci.csv',
'covid19_mex_ambulatorios.csv']
funciones = [confirmados_diarios_por_estado,
negativos_diarios_por_estado,
pruebas_pendientes_diarias_por_estado,
pruebas_totales_diarias_por_estado,
defunciones_diarias_por_estado,
hospitalizados_diarios_por_estado,
uci_diarios_por_estado,
ambulatorios_diarios_por_estado]
dfs = [func(datos_abiertos_df, entidades) for func in funciones]
for f, df in zip(files, dfs):
df.to_csv(f'{dir_series_dge}/nuevos/{f}')
df.cumsum().to_csv(f'{dir_series_dge}/acumulados/{f}')
## Series de tiempo estaticas (solo actualiza ultima fila) ##
# Formato unix sin quotes
csv.register_dialect('unixnq', delimiter=',', lineterminator='\n',
quoting=csv.QUOTE_NONE)
# Totales por estado
totales_file = dir_series + 'covid19_mex_casos_totales.csv'
fila_totales = dfs[0].cumsum().tail(1) # confirmados_diarios_por_estado
with open(totales_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_totales.values[0].tolist())
# Casos ultimas 24h
nuevos_file = dir_series + 'covid19_mex_casos_nuevos.csv'
totales_df = pd.read_csv(totales_file)
fila_nuevos = (totales_df.iloc[-1, 1:] - totales_df.iloc[-2, 1:]).astype(int)
with open(nuevos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevos.values.tolist()) # a series
# Muertes por estado
muertes_file = dir_series + 'covid19_mex_muertes.csv'
fila_muertes = dfs[4].cumsum().tail(1) # defunciones_diarias_por_estado
with open(muertes_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_muertes.values[0].tolist())
# Muertes nuevas por estado
muertes_nuevas_file = dir_series + 'covid19_mex_muertes_nuevas.csv'
muertes_df = pd.read_csv(muertes_file)
fila_nuevas = (muertes_df.iloc[-1, 1:] - muertes_df.iloc[-2, 1:]).astype(int)
with open(muertes_nuevas_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_nuevas.values.tolist()) # a series
# Sospechosos por estado
sospechosos_file = dir_series + 'covid19_mex_sospechosos.csv'
# pruebas_pendientes_diarias_por_estado
fila_sospechosos = dfs[2].cumsum().tail(1)
with open(sospechosos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_sospechosos.values[0].tolist())
# Sospechosos por estado
negativos_file = dir_series + 'covid19_mex_negativos.csv'
fila_negativos = dfs[1].cumsum().tail(1) # negativos_diarios_por_estado
with open(negativos_file, 'a') as f:
writer = csv.writer(f, 'unixnq')
writer.writerow([date_iso] + fila_negativos.values[0].tolist())
## Totales por estado en el archivo geojson ##
geojson_file = dir_geo + 'mexico.geojson'
edos_hoy_file = dir_datos + 'estados_hoy.csv'
updated_file = dir_datos + 'last_updated.csv'
gdf = gpd.read_file(geojson_file).set_index('name')
gdf.totales = fila_totales.drop('Nacional', axis=1).squeeze()
gdf.nuevos = fila_nuevos.drop('Nacional').squeeze() # series
gdf.muertes = fila_muertes.drop('Nacional', axis=1).squeeze()
gdf.muertes_nuevas = fila_nuevas.drop('Nacional').squeeze() # series
gdf.sospechosos = fila_sospechosos.drop('Nacional', axis=1).squeeze()
gdf.negativos = fila_negativos.drop('Nacional', axis=1).squeeze()
gdf.totales_100k = gdf.totales * 100000 / gdf.population
gdf.muertes_100k = gdf.muertes * 100000 / gdf.population
gdf.updated_at = str(update_time).replace(' ', 'T')
gdf = gdf.reset_index()
assert gdf.shape[1] == 14
gdf.to_file(geojson_file, driver='GeoJSON')
gdf.loc[0:0, ['updated_at']].to_csv(updated_file, index=False)
### Estados hoy ###
cols_edos_hoy = ['name', 'totales', 'nuevos',
'muertes', 'muertes_nuevas', 'sospechosos', 'negativos']
map_cols = {'name': 'Estado',
'totales': 'Confirmados totales',
'nuevos': 'Confirmados nuevos',
'muertes': 'Defunciones',
'muertes_nuevas': 'Defunciones nuevas',
'sospechosos': 'Sospechosos totales',
'negativos': 'Negativos totales'}
edos_hoy_df = gdf[cols_edos_hoy].rename(columns=map_cols)
edos_hoy_df.to_csv(edos_hoy_file, index=False)
print(f'Se procesaron exitosamente los datos abiertos de {input_filename}')
| true | true |
f71443471e33b1d928697eb1bc2dc49d6db4519d | 14,277 | py | Python | lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/community/network/plugins/modules/pn_trunk.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
""" PN CLI trunk-create/trunk-delete/trunk-modify """
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
short_description: CLI command to create/delete/modify a trunk.
deprecated:
removed_in: 2.0.0 # was Ansible 2.12
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
type: bool
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
type: bool
pn_pause:
description:
- Specify if pause frames are sent.
type: bool
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
type: bool
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
type: bool
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
type: bool
pn_host:
description:
- Host facing port control setting.
type: bool
'''
EXAMPLES = """
- name: Create trunk
community.network.pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: Delete trunk
community.network.pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
"""
This method is to generate the cli portion to launch the Netvisor cli.
It parses the username, password, switch parameters from module.
:param module: The Ansible module to fetch username, password and switch
:return: returns the cli string for further processing
"""
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
"""
This method checks for idempotency using the trunk-show command.
If a trunk with given name exists, return TRUNK_EXISTS as True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Global Booleans: TRUNK_EXISTS
"""
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
"""
This method executes the cli command on the target node(s) and returns the
output. The module then exits based on the output.
:param cli: the complete cli string to be executed on the target node(s).
:param module: The Ansible module to fetch command
"""
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
"""
This method gets appropriate command name for the state specified. It
returns the command name for the specified state.
:param state: The state for which the respective command name is required.
"""
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
""" This portion is for arguments parsing """
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
| 30.835853 | 81 | 0.62254 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
---
module: pn_trunk
author: "Pluribus Networks (@amitsi)"
short_description: CLI command to create/delete/modify a trunk.
deprecated:
removed_in: 2.0.0 # was Ansible 2.12
why: Doesn't support latest Pluribus Networks netvisor
alternative: Latest modules will be pushed in Ansible future versions.
description:
- Execute trunk-create or trunk-delete command.
- Trunks can be used to aggregate network links at Layer 2 on the local
switch. Use this command to create a new trunk.
options:
pn_cliusername:
description:
- Provide login username if user is not root.
required: False
pn_clipassword:
description:
- Provide login password if user is not root.
required: False
pn_cliswitch:
description:
- Target switch(es) to run the cli on.
required: False
default: 'local'
state:
description:
- State the action to perform. Use 'present' to create trunk,
'absent' to delete trunk and 'update' to modify trunk.
required: True
choices: ['present', 'absent', 'update']
pn_name:
description:
- Specify the name for the trunk configuration.
required: true
pn_ports:
description:
- Specify the port number(s) for the link(s) to aggregate into the trunk.
- Required for trunk-create.
pn_speed:
description:
- Specify the port speed or disable the port.
choices: ['disable', '10m', '100m', '1g', '2.5g', '10g', '40g']
pn_egress_rate_limit:
description:
- Specify an egress port data rate limit for the configuration.
pn_jumbo:
description:
- Specify if the port can receive jumbo frames.
type: bool
pn_lacp_mode:
description:
- Specify the LACP mode for the configuration.
choices: ['off', 'passive', 'active']
pn_lacp_priority:
description:
- Specify the LACP priority. This is a number between 1 and 65535 with a
default value of 32768.
pn_lacp_timeout:
description:
- Specify the LACP time out as slow (30 seconds) or fast (4seconds).
The default value is slow.
choices: ['slow', 'fast']
pn_lacp_fallback:
description:
- Specify the LACP fallback mode as bundles or individual.
choices: ['bundle', 'individual']
pn_lacp_fallback_timeout:
description:
- Specify the LACP fallback timeout in seconds. The range is between 30
and 60 seconds with a default value of 50 seconds.
pn_edge_switch:
description:
- Specify if the switch is an edge switch.
type: bool
pn_pause:
description:
- Specify if pause frames are sent.
type: bool
pn_description:
description:
- Specify a description for the trunk configuration.
pn_loopback:
description:
- Specify loopback if you want to use loopback.
type: bool
pn_mirror_receive:
description:
- Specify if the configuration receives mirrored traffic.
type: bool
pn_unknown_ucast_level:
description:
- Specify an unknown unicast level in percent. The default value is 100%.
pn_unknown_mcast_level:
description:
- Specify an unknown multicast level in percent. The default value is 100%.
pn_broadcast_level:
description:
- Specify a broadcast level in percent. The default value is 100%.
pn_port_macaddr:
description:
- Specify the MAC address of the port.
pn_loopvlans:
description:
- Specify a list of looping vlans.
pn_routing:
description:
- Specify if the port participates in routing on the network.
type: bool
pn_host:
description:
- Host facing port control setting.
type: bool
'''
EXAMPLES = """
- name: Create trunk
community.network.pn_trunk:
state: 'present'
pn_name: 'spine-to-leaf'
pn_ports: '11,12,13,14'
- name: Delete trunk
community.network.pn_trunk:
state: 'absent'
pn_name: 'spine-to-leaf'
"""
RETURN = """
command:
description: The CLI command run on the target node(s).
returned: always
type: str
stdout:
description: The set of responses from the trunk command.
returned: always
type: list
stderr:
description: The set of error responses from the trunk command.
returned: on error
type: list
changed:
description: Indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
import shlex
# Ansible boiler-plate
from ansible.module_utils.basic import AnsibleModule
TRUNK_EXISTS = None
def pn_cli(module):
username = module.params['pn_cliusername']
password = module.params['pn_clipassword']
cliswitch = module.params['pn_cliswitch']
if username and password:
cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)
else:
cli = '/usr/bin/cli --quiet '
if cliswitch == 'local':
cli += ' switch-local '
else:
cli += ' switch ' + cliswitch
return cli
def check_cli(module, cli):
name = module.params['pn_name']
show = cli + ' trunk-show format switch,name no-show-headers'
show = shlex.split(show)
out = module.run_command(show)[1]
out = out.split()
# Global flags
global TRUNK_EXISTS
if name in out:
TRUNK_EXISTS = True
else:
TRUNK_EXISTS = False
def run_cli(module, cli):
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
command = get_command_from_state(state)
cmd = shlex.split(cli)
# 'out' contains the output
# 'err' contains the error messages
result, out, err = module.run_command(cmd)
print_cli = cli.split(cliswitch)[1]
# Response in JSON format
if result != 0:
module.exit_json(
command=print_cli,
stderr=err.strip(),
msg="%s operation failed" % command,
changed=False
)
if out:
module.exit_json(
command=print_cli,
stdout=out.strip(),
msg="%s operation completed" % command,
changed=True
)
else:
module.exit_json(
command=print_cli,
msg="%s operation completed" % command,
changed=True
)
def get_command_from_state(state):
command = None
if state == 'present':
command = 'trunk-create'
if state == 'absent':
command = 'trunk-delete'
if state == 'update':
command = 'trunk-modify'
return command
def main():
module = AnsibleModule(
argument_spec=dict(
pn_cliusername=dict(required=False, type='str'),
pn_clipassword=dict(required=False, type='str', no_log=True),
pn_cliswitch=dict(required=False, type='str', default='local'),
state=dict(required=True, type='str',
choices=['present', 'absent', 'update']),
pn_name=dict(required=True, type='str'),
pn_ports=dict(type='str'),
pn_speed=dict(type='str',
choices=['disable', '10m', '100m', '1g', '2.5g',
'10g', '40g']),
pn_egress_rate_limit=dict(type='str'),
pn_jumbo=dict(type='bool'),
pn_lacp_mode=dict(type='str', choices=[
'off', 'passive', 'active']),
pn_lacp_priority=dict(type='int'),
pn_lacp_timeout=dict(type='str', choices=['slow', 'fast']),
pn_lacp_fallback=dict(type='str', choices=[
'bundle', 'individual']),
pn_lacp_fallback_timeout=dict(type='str'),
pn_edge_switch=dict(type='bool'),
pn_pause=dict(type='bool'),
pn_description=dict(type='str'),
pn_loopback=dict(type='bool'),
pn_mirror_receive=dict(type='bool'),
pn_unknown_ucast_level=dict(type='str'),
pn_unknown_mcast_level=dict(type='str'),
pn_broadcast_level=dict(type='str'),
pn_port_macaddr=dict(type='str'),
pn_loopvlans=dict(type='str'),
pn_routing=dict(type='bool'),
pn_host=dict(type='bool')
),
required_if=(
["state", "present", ["pn_name", "pn_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name"]]
)
)
# Accessing the arguments
state = module.params['state']
name = module.params['pn_name']
ports = module.params['pn_ports']
speed = module.params['pn_speed']
egress_rate_limit = module.params['pn_egress_rate_limit']
jumbo = module.params['pn_jumbo']
lacp_mode = module.params['pn_lacp_mode']
lacp_priority = module.params['pn_lacp_priority']
lacp_timeout = module.params['pn_lacp_timeout']
lacp_fallback = module.params['pn_lacp_fallback']
lacp_fallback_timeout = module.params['pn_lacp_fallback_timeout']
edge_switch = module.params['pn_edge_switch']
pause = module.params['pn_pause']
description = module.params['pn_description']
loopback = module.params['pn_loopback']
mirror_receive = module.params['pn_mirror_receive']
unknown_ucast_level = module.params['pn_unknown_ucast_level']
unknown_mcast_level = module.params['pn_unknown_mcast_level']
broadcast_level = module.params['pn_broadcast_level']
port_macaddr = module.params['pn_port_macaddr']
loopvlans = module.params['pn_loopvlans']
routing = module.params['pn_routing']
host = module.params['pn_host']
command = get_command_from_state(state)
# Building the CLI command string
cli = pn_cli(module)
if command == 'trunk-delete':
check_cli(module, cli)
if TRUNK_EXISTS is False:
module.exit_json(
skipped=True,
msg='Trunk with name %s does not exist' % name
)
cli += ' %s name %s ' % (command, name)
else:
if command == 'trunk-create':
check_cli(module, cli)
if TRUNK_EXISTS is True:
module.exit_json(
skipped=True,
msg='Trunk with name %s already exists' % name
)
cli += ' %s name %s ' % (command, name)
# Appending options
if ports:
cli += ' ports ' + ports
if speed:
cli += ' speed ' + speed
if egress_rate_limit:
cli += ' egress-rate-limit ' + egress_rate_limit
if jumbo is True:
cli += ' jumbo '
if jumbo is False:
cli += ' no-jumbo '
if lacp_mode:
cli += ' lacp-mode ' + lacp_mode
if lacp_priority:
cli += ' lacp-priority ' + lacp_priority
if lacp_timeout:
cli += ' lacp-timeout ' + lacp_timeout
if lacp_fallback:
cli += ' lacp-fallback ' + lacp_fallback
if lacp_fallback_timeout:
cli += ' lacp-fallback-timeout ' + lacp_fallback_timeout
if edge_switch is True:
cli += ' edge-switch '
if edge_switch is False:
cli += ' no-edge-switch '
if pause is True:
cli += ' pause '
if pause is False:
cli += ' no-pause '
if description:
cli += ' description ' + description
if loopback is True:
cli += ' loopback '
if loopback is False:
cli += ' no-loopback '
if mirror_receive is True:
cli += ' mirror-receive-only '
if mirror_receive is False:
cli += ' no-mirror-receive-only '
if unknown_ucast_level:
cli += ' unknown-ucast-level ' + unknown_ucast_level
if unknown_mcast_level:
cli += ' unknown-mcast-level ' + unknown_mcast_level
if broadcast_level:
cli += ' broadcast-level ' + broadcast_level
if port_macaddr:
cli += ' port-mac-address ' + port_macaddr
if loopvlans:
cli += ' loopvlans ' + loopvlans
if routing is True:
cli += ' routing '
if routing is False:
cli += ' no-routing '
if host is True:
cli += ' host-enable '
if host is False:
cli += ' host-disable '
run_cli(module, cli)
if __name__ == '__main__':
main()
| true | true |
f7144496800e55420ec75dde8d365a87524ea74a | 37 | py | Python | rssdldmng/__init__.py | alexpayne482/rssdldmng | 4428f10171902861702fc0f528d3d9576923541a | [
"MIT"
] | null | null | null | rssdldmng/__init__.py | alexpayne482/rssdldmng | 4428f10171902861702fc0f528d3d9576923541a | [
"MIT"
] | 1 | 2019-11-25T15:54:02.000Z | 2019-11-25T15:54:02.000Z | rssdldmng/__init__.py | alexpayne482/rssdldmng | 4428f10171902861702fc0f528d3d9576923541a | [
"MIT"
] | null | null | null | """Init file for RSS downloader."""
| 18.5 | 36 | 0.648649 | true | true | |
f71444d8f4c578982eaf1f4ddd50ab20ff8817b7 | 4,618 | py | Python | test/python/testconsole.py | malywonsz/txtai | ace1b04161062430887eb2153961abcd819a5afb | [
"Apache-2.0"
] | null | null | null | test/python/testconsole.py | malywonsz/txtai | ace1b04161062430887eb2153961abcd819a5afb | [
"Apache-2.0"
] | 47 | 2021-10-02T22:48:03.000Z | 2021-12-29T02:36:20.000Z | test/python/testconsole.py | malywonsz/txtai | ace1b04161062430887eb2153961abcd819a5afb | [
"Apache-2.0"
] | null | null | null | """
Console module tests
"""
import contextlib
import io
import os
import tempfile
import unittest
from txtai.console import Console
from txtai.embeddings import Embeddings
APPLICATION = """
path: %s
workflow:
test:
tasks:
- task: console
"""
class TestConsole(unittest.TestCase):
"""
Console tests.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test data.
"""
cls.data = [
"US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day",
]
# Create embeddings model, backed by sentence-transformers & transformers
cls.embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2", "content": True})
# Create an index for the list of text
cls.embeddings.index([(uid, text, None) for uid, text in enumerate(cls.data)])
# Create app paths
cls.apppath = os.path.join(tempfile.gettempdir(), "console.yml")
cls.embedpath = os.path.join(tempfile.gettempdir(), "embeddings.console")
# Create app.yml
with open(cls.apppath, "w", encoding="utf-8") as out:
out.write(APPLICATION % cls.embedpath)
# Save index
cls.embeddings.save(cls.embedpath)
# Create console
cls.console = Console(cls.embedpath)
def testApplication(self):
"""
Test application
"""
self.assertIn("console.yml", self.command(f".load {self.apppath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testConfig(self):
"""
Test .config command
"""
self.assertIn("tasks", self.command(".config"))
def testEmbeddings(self):
"""
Test embeddings index
"""
self.assertIn("embeddings", self.command(f".load {self.embedpath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testEmbeddingsNoDatabase(self):
"""
Test embeddings with no database/content
"""
console = Console()
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create an index for the list of text
embeddings.index([(uid, text, None) for uid, text in enumerate(self.data)])
# Set embeddings on console
console.app = embeddings
self.assertIn("4", self.command("feel good story", console))
def testEmpty(self):
"""
Test empty console instance
"""
console = Console()
self.assertIn("AttributeError", self.command("search", console))
def testHighlight(self):
"""
Test .highlight command
"""
self.assertIn("highlight", self.command(".highlight"))
self.assertIn("wins", self.command("feel good story"))
self.assertIn("Taiwan", self.command("asia"))
def testPreloop(self):
"""
Test preloop
"""
self.assertIn("txtai console", self.preloop())
def testWorkflow(self):
"""
Test .workflow command
"""
self.command(f".load {self.apppath}")
self.assertIn("echo", self.command(".workflow test echo"))
def command(self, command, console=None):
"""
Runs a console command.
Args:
command: command to run
console: console instance, defaults to self.console
Returns:
command output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
if not console:
console = self.console
console.onecmd(command)
return output.getvalue()
def preloop(self):
"""
Runs console.preloop and redirects stdout.
Returns:
preloop output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
self.console.preloop()
return output.getvalue()
| 26.54023 | 109 | 0.591815 |
import contextlib
import io
import os
import tempfile
import unittest
from txtai.console import Console
from txtai.embeddings import Embeddings
APPLICATION = """
path: %s
workflow:
test:
tasks:
- task: console
"""
class TestConsole(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = [
"US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day",
]
# Create embeddings model, backed by sentence-transformers & transformers
cls.embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2", "content": True})
# Create an index for the list of text
cls.embeddings.index([(uid, text, None) for uid, text in enumerate(cls.data)])
# Create app paths
cls.apppath = os.path.join(tempfile.gettempdir(), "console.yml")
cls.embedpath = os.path.join(tempfile.gettempdir(), "embeddings.console")
# Create app.yml
with open(cls.apppath, "w", encoding="utf-8") as out:
out.write(APPLICATION % cls.embedpath)
# Save index
cls.embeddings.save(cls.embedpath)
# Create console
cls.console = Console(cls.embedpath)
def testApplication(self):
self.assertIn("console.yml", self.command(f".load {self.apppath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testConfig(self):
self.assertIn("tasks", self.command(".config"))
def testEmbeddings(self):
self.assertIn("embeddings", self.command(f".load {self.embedpath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testEmbeddingsNoDatabase(self):
console = Console()
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create an index for the list of text
embeddings.index([(uid, text, None) for uid, text in enumerate(self.data)])
# Set embeddings on console
console.app = embeddings
self.assertIn("4", self.command("feel good story", console))
def testEmpty(self):
console = Console()
self.assertIn("AttributeError", self.command("search", console))
def testHighlight(self):
self.assertIn("highlight", self.command(".highlight"))
self.assertIn("wins", self.command("feel good story"))
self.assertIn("Taiwan", self.command("asia"))
def testPreloop(self):
self.assertIn("txtai console", self.preloop())
def testWorkflow(self):
self.command(f".load {self.apppath}")
self.assertIn("echo", self.command(".workflow test echo"))
def command(self, command, console=None):
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
if not console:
console = self.console
console.onecmd(command)
return output.getvalue()
def preloop(self):
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
self.console.preloop()
return output.getvalue()
| true | true |
f7144505df291fdcf3ff246068f77eaa76ee3d0a | 7,859 | py | Python | django/apps/config.py | DrMeers/django | 83a3add4bed8d8d49f93b30c817c66908b0a26ba | [
"BSD-3-Clause"
] | 1 | 2019-02-10T19:33:27.000Z | 2019-02-10T19:33:27.000Z | django/apps/config.py | avkryukov/django | f90be002d9d3c10b87c74741986e2cbf9f2b858e | [
"BSD-3-Clause"
] | null | null | null | django/apps/config.py | avkryukov/django | f90be002d9d3c10b87c74741986e2cbf9f2b858e | [
"BSD-3-Clause"
] | null | null | null | from importlib import import_module
import os
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initally set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must load the app config class
# located at <mod_path>.<cls_name>.
# Avoid django.utils.module_loading.import_by_path because it
# masks errors -- it reraises ImportError as ImproperlyConfigured.
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
# Emulate the error that "from <mod_path> import <cls_name>"
# would raise when <mod_path> exists but not <cls_name>, with
# more context (Python just says "cannot import name ...").
raise ImportError(
"cannot import name '%s' from '%s'" % (cls_name, mod_path))
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
if self.models is None:
raise LookupError(
"App '%s' doesn't have any models." % self.label)
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| 40.096939 | 81 | 0.618908 | from importlib import import_module
import os
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from django.utils._os import upath
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
def __init__(self, app_name, app_module):
self.name = app_name
self.module = app_module
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
self.models_module = None
# Mapping of lower case model names to model classes. Initally set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must load the app config class
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
raise ImportError(
"cannot import name '%s' from '%s'" % (cls_name, mod_path))
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def get_model(self, model_name):
if self.models is None:
raise LookupError(
"App '%s' doesn't have any models." % self.label)
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
| true | true |
f7144537f8fc87d001f4ac40dde7224820902c65 | 632 | py | Python | django-server/climate_commander/jobs/migrations/0003_job_run_time.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | null | null | null | django-server/climate_commander/jobs/migrations/0003_job_run_time.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | 1 | 2016-08-03T21:49:58.000Z | 2016-08-03T21:49:58.000Z | django-server/climate_commander/jobs/migrations/0003_job_run_time.py | jrising/climate-commander | 123cf5a07b87eb1a3bdb44378ee27712b6563ec3 | [
"MIT"
] | 1 | 2016-07-13T18:19:56.000Z | 2016-07-13T18:19:56.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-19 05:37
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_remove_job_run_time'),
]
operations = [
migrations.AddField(
model_name='job',
name='run_time',
field=models.DateTimeField(default=datetime.datetime(2016, 8, 19, 5, 37, 14, 816610, tzinfo=utc), verbose_name='Time of the Last Run'),
preserve_default=False,
),
]
| 26.333333 | 147 | 0.647152 |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('jobs', '0002_remove_job_run_time'),
]
operations = [
migrations.AddField(
model_name='job',
name='run_time',
field=models.DateTimeField(default=datetime.datetime(2016, 8, 19, 5, 37, 14, 816610, tzinfo=utc), verbose_name='Time of the Last Run'),
preserve_default=False,
),
]
| true | true |
f714453f736bf7e39cc67b173d03bf9106ffd006 | 4,152 | py | Python | benchmark/startQiskit2375.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit2375.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit2375.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.cx(input_qubit[3],input_qubit[0]) # number=37
prog.z(input_qubit[3]) # number=38
prog.cx(input_qubit[3],input_qubit[0]) # number=39
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2375.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.890756 | 140 | 0.651734 |
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cx(input_qubit[3],input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.cx(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.cz(input_qubit[2],input_qubit[0])
prog.h(input_qubit[0])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.x(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
prog.cx(input_qubit[0],input_qubit[2])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit2375.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f7144556651053589116ff8ee6290dc79a7bff13 | 1,851 | py | Python | rdmo/questions/migrations/0038_rename_de_to_lang2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 77 | 2016-08-09T11:40:20.000Z | 2022-03-06T11:03:26.000Z | rdmo/questions/migrations/0038_rename_de_to_lang2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 377 | 2016-07-01T13:59:36.000Z | 2022-03-30T13:53:19.000Z | rdmo/questions/migrations/0038_rename_de_to_lang2.py | Raspeanut/rdmo | 9f785010a499c372a2f8368ccf76d2ea4150adcb | [
"Apache-2.0"
] | 47 | 2016-06-23T11:32:19.000Z | 2022-03-01T11:34:37.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-29 16:22
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0037_rename_en_to_lang1'),
]
operations = [
migrations.RenameField(
model_name='catalog',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='question',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='question',
old_name='text_de',
new_name='text_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='section',
old_name='title_de',
new_name='title_lang2',
),
]
| 28.045455 | 49 | 0.553755 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0037_rename_en_to_lang1'),
]
operations = [
migrations.RenameField(
model_name='catalog',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='question',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='question',
old_name='text_de',
new_name='text_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='question',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='help_de',
new_name='help_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='title_de',
new_name='title_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_de',
new_name='verbose_name_lang2',
),
migrations.RenameField(
model_name='questionset',
old_name='verbose_name_plural_de',
new_name='verbose_name_plural_lang2',
),
migrations.RenameField(
model_name='section',
old_name='title_de',
new_name='title_lang2',
),
]
| true | true |
f71445882aac3e35cd2d41b9696c200ce10affe8 | 3,635 | py | Python | examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py | CoMind-Technologies/deepinterpolation | 2f583c4fdde4ed92139e40eb8076dd5b129d29d9 | [
"Unlicense"
] | 178 | 2020-10-16T19:51:21.000Z | 2022-03-11T01:25:22.000Z | examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py | CoMind-Technologies/deepinterpolation | 2f583c4fdde4ed92139e40eb8076dd5b129d29d9 | [
"Unlicense"
] | 46 | 2020-10-17T14:28:23.000Z | 2022-02-18T18:09:12.000Z | examples/paper_generation_code/2020-07-31-local_fmri_training_mouse.py | CoMind-Technologies/deepinterpolation | 2f583c4fdde4ed92139e40eb8076dd5b129d29d9 | [
"Unlicense"
] | 40 | 2020-10-18T19:01:27.000Z | 2022-03-17T15:49:54.000Z | import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 10
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 2
generator_test_param["pre_post_z"] = 3
generator_test_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 0
generator_test_param["end_frame"] = 100
generator_test_param["total_nb_block"] = 10000
generator_test_param["steps_per_epoch"] = steps_per_epoch
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 2
generator_param["pre_post_z"] = 3
generator_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 0
generator_param["end_frame"] = 400
generator_param["total_nb_block"] = 5000000
generator_param["steps_per_epoch"] = steps_per_epoch
network_param["type"] = "network"
network_param["name"] = "fmri_volume_dense_denoiser"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 10
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"//Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
path_generator = os.path.join(jobdir, "generator.json")
json_obj = JsonSaver(generator_param)
json_obj.save_json(path_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
training_class = trainer_obj.find_and_build()(
train_generator, test_generator, network_callback, path_training
)
training_class.run()
training_class.finalize()
| 30.041322 | 148 | 0.784869 | import deepinterpolation as de
import sys
from shutil import copyfile
import os
from deepinterpolation.generic import JsonSaver, ClassLoader
import datetime
from typing import Any, Dict
now = datetime.datetime.now()
run_uid = now.strftime("%Y_%m_%d_%H_%M")
training_param = {}
generator_param = {}
network_param = {}
generator_test_param = {}
steps_per_epoch = 10
generator_test_param["type"] = "generator"
generator_test_param["name"] = "FmriGenerator"
generator_test_param["pre_post_x"] = 3
generator_test_param["pre_post_y"] = 2
generator_test_param["pre_post_z"] = 3
generator_test_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_test_param["batch_size"] = 1000
generator_test_param["start_frame"] = 0
generator_test_param["end_frame"] = 100
generator_test_param["total_nb_block"] = 10000
generator_test_param["steps_per_epoch"] = steps_per_epoch
generator_param["type"] = "generator"
generator_param["name"] = "FmriGenerator"
generator_param["pre_post_x"] = 3
generator_param["pre_post_y"] = 2
generator_param["pre_post_z"] = 3
generator_param[
"train_path"
] = "/Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/mouse/sub-106-ses-1-func-sub-106_ses-1_task-rest_acq-EPI_bold.nii"
generator_param["batch_size"] = 1000
generator_param["start_frame"] = 0
generator_param["end_frame"] = 400
generator_param["total_nb_block"] = 5000000
generator_param["steps_per_epoch"] = steps_per_epoch
network_param["type"] = "network"
network_param["name"] = "fmri_volume_dense_denoiser"
training_param["type"] = "trainer"
training_param["name"] = "core_trainer"
training_param["run_uid"] = run_uid
training_param["batch_size"] = generator_test_param["batch_size"]
training_param["steps_per_epoch"] = steps_per_epoch
training_param["period_save"] = 10
training_param["nb_gpus"] = 0
training_param["apply_learning_decay"] = 0
training_param["nb_times_through_data"] = 1
training_param["learning_rate"] = 0.0001
training_param["loss"] = "mean_absolute_error"
training_param["model_string"] = (
network_param["name"]
+ "_"
+ training_param["loss"]
+ "_"
+ training_param["run_uid"]
)
jobdir = (
"//Users/jeromel/Documents/Work documents/Allen Institute/Projects/Deep2P/fMRI/trained_fmri_models/"
+ training_param["model_string"]
+ "_"
+ run_uid
)
training_param["output_dir"] = jobdir
try:
os.mkdir(jobdir)
except:
print("folder already exists")
path_training = os.path.join(jobdir, "training.json")
json_obj = JsonSaver(training_param)
json_obj.save_json(path_training)
path_generator = os.path.join(jobdir, "generator.json")
json_obj = JsonSaver(generator_param)
json_obj.save_json(path_generator)
path_test_generator = os.path.join(jobdir, "test_generator.json")
json_obj = JsonSaver(generator_test_param)
json_obj.save_json(path_test_generator)
path_network = os.path.join(jobdir, "network.json")
json_obj = JsonSaver(network_param)
json_obj.save_json(path_network)
generator_obj = ClassLoader(path_generator)
generator_test_obj = ClassLoader(path_test_generator)
network_obj = ClassLoader(path_network)
trainer_obj = ClassLoader(path_training)
train_generator = generator_obj.find_and_build()(path_generator)
test_generator = generator_test_obj.find_and_build()(path_test_generator)
network_callback = network_obj.find_and_build()(path_network)
training_class = trainer_obj.find_and_build()(
train_generator, test_generator, network_callback, path_training
)
training_class.run()
training_class.finalize()
| true | true |
f71445884d094696a2b319a9793ec87601132945 | 1,030 | py | Python | code/Ex02.py | mariolpantunes/ml-deti | a47fdb5df70e3f6fda5768be14f97462dfe057fb | [
"MIT"
] | 8 | 2016-04-25T22:36:35.000Z | 2016-10-29T16:47:34.000Z | code/Ex02.py | mariolpantunes/ml-deti | a47fdb5df70e3f6fda5768be14f97462dfe057fb | [
"MIT"
] | null | null | null | code/Ex02.py | mariolpantunes/ml-deti | a47fdb5df70e3f6fda5768be14f97462dfe057fb | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import arff
import numpy as np
from sklearn import linear_model
# Load dataset
dataset = arff.load(open('dataset/dataset01.arff', 'r'))
data = np.array(dataset['data'])
# Reshape vector
X1 = data[:, 0].reshape(-1, 1)
X2 = np.multiply(X1, X1)
X = np.concatenate((X1, X2), axis=1)
Y = data[:, 1].reshape(-1, 1)
# Plot points
plt.scatter(X1, Y, color='black')
plt.xticks(())
plt.yticks(())
plt.show()
# Create linear regression object
model = linear_model.LinearRegression()
# Train the model using X and Y
model.fit(X, Y)
# The coefficients
print("Y = %.2fX^2 + %.2fX + %.2f" % (model.coef_[0][0], model.coef_[0][1], model.intercept_))
# The mean square error
print("Residual sum of squares: %.2f" % np.mean((model.predict(X) - Y) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % model.score(X, Y))
# Plot outputs
plt.scatter(X1, Y, color='black')
plt.plot(X1, model.predict(X), color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| 23.409091 | 94 | 0.67767 | import matplotlib.pyplot as plt
import arff
import numpy as np
from sklearn import linear_model
dataset = arff.load(open('dataset/dataset01.arff', 'r'))
data = np.array(dataset['data'])
X1 = data[:, 0].reshape(-1, 1)
X2 = np.multiply(X1, X1)
X = np.concatenate((X1, X2), axis=1)
Y = data[:, 1].reshape(-1, 1)
plt.scatter(X1, Y, color='black')
plt.xticks(())
plt.yticks(())
plt.show()
model = linear_model.LinearRegression()
model.fit(X, Y)
print("Y = %.2fX^2 + %.2fX + %.2f" % (model.coef_[0][0], model.coef_[0][1], model.intercept_))
print("Residual sum of squares: %.2f" % np.mean((model.predict(X) - Y) ** 2))
print('Variance score: %.2f' % model.score(X, Y))
plt.scatter(X1, Y, color='black')
plt.plot(X1, model.predict(X), color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| true | true |
f714468f5d55c957348c0992aa36ec674a65a747 | 291 | py | Python | test/test_fit.py | malyvsen/unifit | 4cd6eceb9fa0dda31a742bd34b22f70a80464bef | [
"MIT"
] | null | null | null | test/test_fit.py | malyvsen/unifit | 4cd6eceb9fa0dda31a742bd34b22f70a80464bef | [
"MIT"
] | null | null | null | test/test_fit.py | malyvsen/unifit | 4cd6eceb9fa0dda31a742bd34b22f70a80464bef | [
"MIT"
] | null | null | null | import scipy.stats
import unifit
class TestFit:
data = scipy.stats.cauchy.rvs(size=256)
def test_basic(self):
unifit.fit(self.data)
def test_unnamed(self):
unifit.fit(
self.data,
distributions=unifit.distributions.values()
)
| 16.166667 | 55 | 0.611684 | import scipy.stats
import unifit
class TestFit:
data = scipy.stats.cauchy.rvs(size=256)
def test_basic(self):
unifit.fit(self.data)
def test_unnamed(self):
unifit.fit(
self.data,
distributions=unifit.distributions.values()
)
| true | true |
f71446a08f9ffe05ef9b5e466dd97b0725d3771b | 60,170 | py | Python | nim.py | FauveNoir/allumette | e5b90aa795c1d4001e3bfcf88056a215337fd70e | [
"OML"
] | 1 | 2017-02-09T16:42:09.000Z | 2017-02-09T16:42:09.000Z | nim.py | FauveNoir/allumette | e5b90aa795c1d4001e3bfcf88056a215337fd70e | [
"OML"
] | null | null | null | nim.py | FauveNoir/allumette | e5b90aa795c1d4001e3bfcf88056a215337fd70e | [
"OML"
] | 3 | 2017-02-04T02:17:46.000Z | 2017-12-20T11:02:36.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import random
import sys
import time
import re
import copy
from optparse import OptionParser
import pygame
from pygame.locals import *
version = "0.1"
usage = "usage: %prog [ --lvl [0-5] | ]"
parser = OptionParser(usage=usage, version="%prog 0.1")
parser.add_option("-m", help="Number of match",
default=0, action="store", dest="numberOfMatch")
parser.add_option("-v", help="The variant of Nim",
default=0, action="store", dest="varient")
parser.add_option("-w", help="Mode, there is two values possibles “ttl” and “ltl”",
default=0, action="store", dest="varient")
(options, args) = parser.parse_args()
if not options.numberOfMatch:
# If no lelvel was explicitly choosen by the user, it is automatically set
# to 0.
options.numberOfMatch = 15
innitialNumberOfMatch = int(options.numberOfMatch)
currentNumberOfMatch = int(innitialNumberOfMatch)
class borderSize:
def __init__(self):
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
class surfaceInformations:
def __init__(self):
self.width = 0
self.height = 0
self.y = 0
self.x = 0
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
if self.y != 0:
self.ratio = self.x / self.y
class whatToDo:
def __init__(self):
self.programHaveToContinue = True
self.variant = "trivial"
self.number = numberOfInitialMatch
self.wtw = "ttl"
print("This is Nim " + version + "\n")
mainDir = os.path.dirname(os.path.realpath(__file__))
# Colour deffinitions
background_colour = (144, 124, 106)
text_zone_colour = (81, 69, 58)
history_area_colour = (69, 59, 49)
indicator_colour = (70, 60, 50)
prompt_colour = (25, 21, 18)
creme_colour = (236, 228, 217)
yellow_colour = (205, 153, 29)
winingMainText_colour = (236, 232, 228)
purple_colour = (133, 0, 58)
red = (225, 0, 0)
class variants:
def __init__(self):
self.name = ""
self.number = 15
self.wtw = "ttl"
trivial = variants()
trivial.name = "Trivial"
trivial.number = 15
trivial.wtw = "ttl"
marienbad = variants()
marienbad.name = "Marienbad"
marienbad.number = 5
marienbad.wtw = "ttl"
knowenVarients = [trivial, marienbad]
viarentNames = []
for varientRow in knowenVarients:
viarentNames.append(varientRow.name)
# Sizes deffinitions
xSize = 640
ySize = 480
textZoneHeigh = 16
maxPaddingBetwenMatch = 3
matchPicRatio = 6.925
numberOfInitialMatch = innitialNumberOfMatch
historyAreaWidth = 67
circleRadius = 10
gameAreaDim = [0, 0]
matchAreaDim = [0, 0]
matchAreaPos = [0, 0]
indicatorDim = [127, 55]
matchAreaBorder = borderSize()
matchAreaBorder.top = 40
matchAreaBorder.bottom = 80
matchAreaBorder.left = 40
matchAreaBorder.right = 40
trianglePromptWidth = 7
textUserInput = []
normaUserInput = []
textUserInput = []
normalUserInput = []
exMode = False
normalMode = True
textToAnalyse = ""
normalTextToAnalyse = ""
allowedMatchDel = ["1", "2", "3"]
pygame.init()
screen = pygame.display.set_mode((xSize, ySize), RESIZABLE)
charInputed = [K_TAB, K_SPACE, K_EXCLAIM, K_QUOTEDBL, K_HASH, K_DOLLAR, K_AMPERSAND, K_QUOTE, K_LEFTPAREN, K_RIGHTPAREN, K_ASTERISK, K_PLUS, K_COMMA, K_MINUS, K_PERIOD, K_SLASH, K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_COLON, K_SEMICOLON, K_LESS, K_EQUALS, K_GREATER, K_QUESTION,
K_AT, K_LEFTBRACKET, K_BACKSLASH, K_RIGHTBRACKET, K_CARET, K_UNDERSCORE, K_BACKQUOTE, K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_v, K_w, K_x, K_y, K_z, K_KP_PERIOD, K_KP_DIVIDE, K_KP_MULTIPLY, K_KP_MINUS, K_KP_PLUS, K_KP_EQUALS]
def makeTextZone(nameToDisplay, secondName):
# Redifining variables
xSize, ySize = screen.get_size()
# Textzone deffinition
textZone = pygame.Surface((xSize, textZoneHeigh))
textZone.fill(text_zone_colour)
heighTextZonePosition = ySize - textZoneHeigh
promptFont = pygame.font.SysFont("monospace", 14, bold=True)
# Option title deffinition
secondPromptZone = pygame.Surface((1, 1))
secondPromptZoneInfo = surfaceInformations()
secondEcart = 0
secondLittleEcart = 0
secondPromptZoneInfo.width = 0
if secondName != None:
textSecondSizeWidth, textSecondSizeHeight = promptFont.size(secondName)
secondPromptZoneInfo.width = textSecondSizeWidth + 8
secondPromptZoneInfo.heigh = textZoneHeigh
secondPromptZone = pygame.Surface((secondPromptZoneInfo.width, secondPromptZoneInfo.heigh))
secondPromptZone.fill(yellow_colour)
secondPromptText = promptFont.render(secondName, 1, prompt_colour)
secondTextSizeWidth, secondTextSizeHeight = promptFont.size(secondName)
secondPromptTriangle = pygame.draw.polygon(screen, prompt_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
secondEcart = secondPromptZoneInfo.width + trianglePromptWidth
secondLittleEcart = trianglePromptWidth
# promptzone deffinition
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
promptZoneInfo = surfaceInformations()
promptZoneInfo.width = textSizeWidth + 8
promptZoneInfo.heigh = textZoneHeigh
promptZone = pygame.Surface((promptZoneInfo.width + secondLittleEcart, promptZoneInfo.heigh))
promptZone.fill(prompt_colour)
promptText = promptFont.render(nameToDisplay, 1, (205, 153, 29))
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
# initialize font; must be called after 'pygame.init()' to avoid 'Font not
# Initialized' error
myfont = pygame.font.SysFont("monospace", 14)
# render text
label = myfont.render("".join(textUserInput), 1, (255, 255, 255))
#bliting cascade
screen.blit(textZone, (0, heighTextZonePosition))
screen.blit(promptZone, (0 + secondPromptZoneInfo.width, heighTextZonePosition))
promptTriangle = pygame.draw.polygon(screen, prompt_colour, [[promptZoneInfo.width + secondEcart, ySize - textZoneHeigh], [
promptZoneInfo.width + secondEcart, ySize], [promptZoneInfo.width + secondEcart + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(promptText, (4 + secondEcart, heighTextZonePosition + 1))
if secondName != None:
screen.blit(secondPromptZone, (0, heighTextZonePosition))
screen.blit(secondPromptText, (4, heighTextZonePosition + 1))
secondPromptTriangle = pygame.draw.polygon(screen, yellow_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(label, (promptZoneInfo.width +
trianglePromptWidth + 4, heighTextZonePosition))
finalNormalUserInput = ""
def analyseTyping(variant, numberOfInitialMatch, wtw):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global screen
global finalNormalUserInput
global generalState
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
functionHaveToContinue = True
for event in pygame.event.get():
if event.type == VIDEORESIZE:
screen = pygame.display.set_mode(event.size, RESIZABLE)
if event.type == QUIT:
programHaveToContinue = False
if event.type == KEYDOWN:
if (event.unicode == ":") and ("".join(normalUserInput) == ""):
exMode = True
normalMode = False
if exMode == True:
if event.key is K_ESCAPE:
exMode = False
normalMode = True
textUserInput = []
elif event.key in charInputed:
textUserInput.append(event.unicode)
elif event.key == K_BACKSPACE and textUserInput != []:
del textUserInput[-1]
if len(textUserInput) == 1:
exMode = False
normalMode = True
del textUserInput[-1]
elif event.key in [K_RETURN, K_KP_ENTER]:
textToAnalyse = "".join(textUserInput[1:])
textUserInput = []
exMode = False
if textUserInput == []:
exMode = False
normalMode = True
elif normalMode == True:
if (event.key is K_ESCAPE) and (normalUserInput != []):
normalUserInput = []
elif event.key == K_p:
normalUserInput = []
keyboardInput["mode"] = "pause"
elif (event.key is K_ESCAPE) and (normalUserInput == []):
normalUserInput = []
keyboardInput["mode"] = "escape"
elif (event.key not in [K_RETURN, K_KP_ENTER, K_ESCAPE]):
normalUserInput.append(event.unicode)
elif (event.key in [K_RETURN, K_KP_ENTER]):
finalNormalUserInput = "".join(normalUserInput)
normalUserInput = []
if textToAnalyse == "about":
textToAnalyse = ""
aboutScreen(screen)
elif textToAnalyse in ["quit", "q"]:
textToAnalyse = ""
programHaveToContinue = False
# elif textToAnalyse in ["new", "n"]:
#elif re.match("n(ew| *)?$", textToAnalyse) is not None:
elif re.match("n(ew)?( +((trivial)|(marienbad)))?( +[0-9]+)?( +(((ttl)|(take-the-last))|((ltl)|(let-the-last))))? *$", textToAnalyse) is not None:
programHaveToContinue = True
functionHaveToContinue = False
syntaxToExtractOptions = "n(ew)?( +(?P<variente>(trivial|marienbad)))?( +(?P<number>[0-9]+))?( +(?P<wtw>((ttl)|(ltl))))?"
newGameOptions = re.match(syntaxToExtractOptions,textToAnalyse)
textToAnalyse = ""
if (newGameOptions.group("variente") == None) :
generalState.variant = variant
else:
generalState.variant = newGameOptions.group("variente")
if ( newGameOptions.group("number") == None) :
generalState.number = numberOfInitialMatch
else:
generalState.number = int(newGameOptions.group("number"))
if ( newGameOptions.group("wtw") == None) :
generalState.wtw = wtw
else:
generalState.wtw = newGameOptions.group("wtw")
print("New " + str(generalState.variant) + ";" + str(generalState.number) + ";" + str(generalState.wtw) + " game.")
elif keyboardInput["mode"] == "escape":
keyboardInput["mode"] = "escape"
elif keyboardInput["mode"] == "pause":
keyboardInput["mode"] = "pause"
else:
keyboardInput["mode"] = "ex"
keyboardInput["content"] = textToAnalyse
if normalUserInput != []:
keyboardInput["mode"] = "normal"
keyboardInput["content"] = normalUserInput
return functionHaveToContinue, keyboardInput
def makeAPause(variant, numberOfInitialMatch, wtw, beginingOfGame):
global winingMainText_colour
global indicator_colour
global programHaveToContinue
resumeMainText_colour = (163, 143, 125)
pauseMainText_colour = winingMainText_colour
pauseTextInfo = surfaceInformations()
resumeTextInfo = surfaceInformations()
timeBeforePause = int(time.time()) - beginingOfGame
timeOfEndOfGame = int(time.time()) - beginingOfGame
functionHaveToContinue = True
while functionHaveToContinue and programHaveToContinue:
xSize, ySize = screen.get_size()
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
screen.fill(indicator_colour)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
# Bliting the text "PAUSE"
pauseTextContent = "Pause".upper()
pauseFont = pygame.font.SysFont("CMU Typewriter Text", 112, bold=True)
pauseText = pauseFont.render(pauseTextContent, 1, pauseMainText_colour)
pauseTextInfo.width, pauseTextInfo.height = pauseFont.size(pauseTextContent)
pauseTextInfo.x = (xSize - pauseTextInfo.width) / 2
pauseTextInfo.y = (ySize/2) - pauseTextInfo.height
screen.blit(pauseText, (pauseTextInfo.x, pauseTextInfo.y))
# Bliting the text resume text
resumeTextContent = "Type Escape key to continue."
resumeFont = pygame.font.SysFont("CMU Typewriter Text", 14, bold=True)
resumeText = resumeFont.render(resumeTextContent, 1, resumeMainText_colour)
resumeTextInfo.width, resumeTextInfo.height = resumeFont.size(resumeTextContent)
resumeTextInfo.x = (xSize - resumeTextInfo.width) / 2
resumeTextInfo.y = (ySize- 14) - resumeTextInfo.height - 30
screen.blit(resumeText, (resumeTextInfo.x, resumeTextInfo.y))
makeTextZone(variant,"Pause")
#####################
pygame.display.flip()
#####################
timeToReturn = int(time.time()) - timeBeforePause
return timeToReturn
def makeTimetZone(beginingOfGame):
timeZoneInformation = surfaceInformations()
timeZoneBackground = surfaceInformations()
timeZoneInformation.left = 2
timeZoneInformation.right = 2
xSize, ySize = screen.get_size()
myfont = pygame.font.SysFont("monospace", 14)
secondSinceBegining = int(time.time()) - beginingOfGame
m, s = divmod(secondSinceBegining, 60)
h, m = divmod(m, 60)
timePassed = "%02d:%02d" % (m, s)
heighTextZonePosition = ySize - textZoneHeigh
timeZoneText = myfont.render(timePassed, 1, (0, 0, 0))
timeZoneInformation.width, timeZoneInformation.height = myfont.size(
timePassed)
timeZoneInformation.x = xSize - timeZoneInformation.width - timeZoneInformation.left
timeZoneInformation.y = ySize - textZoneHeigh
timeZoneBackground.width = timeZoneInformation.width + \
(timeZoneInformation.left + timeZoneInformation.right)
timeZoneBackground.height = textZoneHeigh
timeZoneBackground.y = heighTextZonePosition
timeZoneBackground.x = timeZoneInformation.x - 2
timeZoneBackgroundSurface = pygame.Surface(
(timeZoneBackground.width, timeZoneBackground.height))
timeZoneBackgroundSurface.fill(creme_colour)
screen.blit(timeZoneBackgroundSurface,
(timeZoneBackground.x, timeZoneBackground.y))
screen.blit(timeZoneText, (timeZoneInformation.x, timeZoneInformation.y))
timeZoneBorder = pygame.draw.polygon(screen, yellow_colour, [[timeZoneBackground.x, timeZoneBackground.y], [timeZoneBackground.x, timeZoneBackground.y + timeZoneBackground.height - 2], [
timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y + timeZoneBackground.height - 2], [timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y]], 2)
return timeZoneBackground.width
normalUserInput = []
def aboutScreen(screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
functionHaveToContinue = True
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
while functionHaveToContinue and programHaveToContinue:
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
# Appling variables
screen.fill(background_colour)
xSize, ySize = screen.get_size()
# Illustartion deffinition
illustrationInformation = surfaceInformations()
illustration = pygame.image.load(
mainDir + "/" + "about-illustration.png").convert_alpha()
illustrationInformation.width, illustrationInformation.height = illustration.get_size()
illustrationInformationRatio = illustrationInformation.width / \
illustrationInformation.height
if illustrationInformation.width > xSize:
illustrationInformation.width = xSize * (3 / 4)
illustrationInformation.height = illustrationInformation.width / \
illustrationInformationRatio
if illustrationInformation.height > ySize:
illustrationInformation.height = ySize * (3 / 4)
illustrationInformation.width = illustrationInformation.height * \
illustrationInformationRatio
illustrationInformation.y = (
ySize - illustrationInformation.height) / 2
illustrationInformation.x = (xSize - illustrationInformation.width) / 2
illustration = pygame.transform.scale(illustration, (int(
illustrationInformation.width), int(illustrationInformation.height)))
screen.blit(illustration, (illustrationInformation.x,
illustrationInformation.y))
makeTextZone("About", None)
#####################
pygame.display.flip()
#####################
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def playTrivial(currentMatchNumber,wtw):
if wtw == "ttl":
modulator = 0
elif wtw == "ltl":
modulator = 1
if currentMatchNumber != 0:
if ((currentMatchNumber - 1) % 4) == modulator:
answer = 1
elif ((currentMatchNumber - 2) % 4) == modulator:
answer = 2
elif ((currentMatchNumber - 3) % 4) == modulator:
answer = 3
else:
answer = random.randint(1, 3)
else:
answer = 0
return answer
def trivialAnalysis(currentMatchNumber, initialMatchNumber, wtw, userInput):
if currentMatchNumber != 0:
numberOfMatchToDel = 0
if currentMatchNumber >= 3:
authorisedNumbers = [3, 2, 1]
elif currentMatchNumber == 2:
authorisedNumbers = [2, 1]
elif currentMatchNumber == 1:
authorisedNumbers = [1]
if list(userInput)[0] == "=":
action = "application"
stringToEvaluate = userInput[1:]
elif list(userInput)[0] == "-":
action = "soustraction"
stringToEvaluate = userInput[1:]
else:
action = "soustraction"
stringToEvaluate = userInput
if representsInt(stringToEvaluate):
if action == "soustraction":
numberOfMatchToDel = int(stringToEvaluate)
elif action == "application":
numberOfMatchToDel = currentMatchNumber - int(stringToEvaluate)
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
if numberOfMatchToDel != 0:
if numberOfMatchToDel in authorisedNumbers:
numberLetByUser = initialMatchNumber - numberOfMatchToDel
answer = [True, numberLetByUser, numberOfMatchToDel]
else:
answer = [False, "“" +
str(numberOfMatchToDel) + "” is too big."]
elif (numberOfMatchToDel == 0):
answer = [False, "“0” is not a valid answer."]
else:
answer = [True, 0, 0]
return answer
def winingFallingScreenMatchExplosion(winer, variant, numberOfInitialMatch, time):
xSize, ySize = screen.get_size()
if winer == True:
matchInformation = surfaceInformations()
matchS = []
match = 0
while match < 1000:
matchS.append(pygame.image.load(
mainDir + "/" + "match-animation.png").convert_alpha())
matchInformation.heigh = random.randint(0, ySize)
matchInformation.weight = random.randint(0, xSize)
rotation = random.randint(0, 360)
matchS[match] = pygame.transform.rotate(matchS[match], rotation)
screen.blit(
matchS[match], (matchInformation.weight, matchInformation.heigh))
match = match + 1
elif winer == False:
print("machin")
def formateSecondToDotedTime(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h == 0:
formatedTime = "%02d:%02d" % (m, s)
else:
formatedTime = "%02d:%02d:%02d" % (h, m, s)
return formatedTime
def winingFallingScreen(winer, variant, numberOfInitialMatch, time):
global indicator_colour
global winingMainText_colour
global purple_colour
lineSeparationColor = (205, 153, 29)
helpText_color = (163, 143, 125)
fallingMainText_colour = winingMainText_colour
xSize, ySize = screen.get_size()
time = formateSecondToDotedTime(time)
if winer == True:
winingTextInfo = surfaceInformations()
winingTimeTextInfo = surfaceInformations()
winingHelpTextInfo = surfaceInformations()
screen.fill(indicator_colour)
# Bliting the text "You win"
winingFont = pygame.font.SysFont("CMU Typewriter Text", 44, bold=True)
winingText = winingFont.render("You win!", 1, winingMainText_colour)
winingTextInfo.width, winingTextInfo.height = winingFont.size("You win!")
winingTextInfo.x = (xSize - winingTextInfo.width) / 2
winingTextInfo.y = 40
screen.blit(winingText, (winingTextInfo.x, winingTextInfo.y))
# Bliting the time passed
winingTimeFont = pygame.font.SysFont("CMU Typewriter Text", 137, bold=True)
winingTimeText = winingTimeFont.render(time, 1, lineSeparationColor)
winingTimeTextInfo.width, winingTimeTextInfo.height = winingTimeFont.size(time)
winingTimeTextInfo.x = (xSize - winingTimeTextInfo.width) / 2
winingTimeTextInfo.y = 90
screen.blit(winingTimeText, (winingTimeTextInfo.x, winingTimeTextInfo.y))
# Bliting help text
helpText = "Type :new to begin new game or :help for more options."
winingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
winingHelpText = winingHelpFont.render(helpText, 1, helpText_color)
winingHelpTextInfo.width, winingHelpTextInfo.height = winingHelpFont.size(helpText)
winingHelpTextInfo.x = (xSize - winingHelpTextInfo.width) / 2
winingHelpTextInfo.y = ySize-90
screen.blit(winingHelpText, (winingHelpTextInfo.x, winingHelpTextInfo.y))
elif winer == False:
fallingTextInfo = surfaceInformations()
fallingTimeTextInfo = surfaceInformations()
fallingHelpTextInfo = surfaceInformations()
screen.fill(purple_colour)
# Bliting the text "You win"
fallingTextContent = "You loose!"
fallingFont = pygame.font.SysFont("CMU Typewriter Text", 52, bold=True)
fallingText = fallingFont.render(fallingTextContent, 1, fallingMainText_colour)
fallingTextInfo.width, fallingTextInfo.height = fallingFont.size(fallingTextContent)
fallingTextInfo.x = (xSize - fallingTextInfo.width) / 2
fallingTextInfo.y = (ySize/2) - fallingTextInfo.height
screen.blit(fallingText, (fallingTextInfo.x, fallingTextInfo.y))
# Bliting help text
helpText = "Type :new to begin new game or :help for more options."
fallingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
fallingHelpText = fallingHelpFont.render(helpText, 1, helpText_color)
fallingHelpTextInfo.width, fallingHelpTextInfo.height = fallingHelpFont.size(helpText)
fallingHelpTextInfo.x = (xSize - fallingHelpTextInfo.width) / 2
fallingHelpTextInfo.y = ySize-90
screen.blit(fallingHelpText, (fallingHelpTextInfo.x, fallingHelpTextInfo.y))
def printMarienbadListOfTry(screen, listOfTry):
global historyAreaWidth
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
scroowlingHistory = 0
rightHistoryAreaWidth = 0
for aTryGame in listOfTry:
tempSizeWidth, tempSizeHeigh = historyFont.size(aTryGame)
if tempSizeWidth > rightHistoryAreaWidth:
rightHistoryAreaWidth=tempSizeWidth
rightHistoryAreaWidth=rightHistoryAreaWidth+2
historyAreaWidth = rightHistoryAreaWidth + 35 + 20
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
while row < len(listOfTry):
if (row % 2 == 0): # even
row_coulour = (234, 226, 215)
arrowSign = "←"
else: # odd
row_coulour = (207, 194, 184)
arrowSign = "→"
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, (0, 0, 0))
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def printListOfTry(screen, listOfTry):
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
scroowlingHistory = 0
while row < len(listOfTry):
if (row % 2 == 0): # even
row_coulour = (234, 226, 215)
arrowSign = "←"
else: # odd
row_coulour = (207, 194, 184)
arrowSign = "→"
if listOfTry[row] == 1:
numberToDelColor = (0, 126, 223)
if listOfTry[row] == 2:
numberToDelColor = (40, 149, 0)
if listOfTry[row] == 3:
numberToDelColor = (215, 0, 95)
print("This row: " + str(row))
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
print(len(arrowBackground))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, numberToDelColor)
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
print("It success")
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def showVariant(screen, wtw, posX):
yellow_colour = (205, 153, 29)
xSize, ySize = screen.get_size()
variantFont = pygame.font.SysFont("monospace", 14, bold=True)
wtwText = variantFont.render(wtw, 1, (225, 225, 225))
# Size deffinition
variantBackgroundInformation = surfaceInformations()
variantBackgroundInformation.left = 2
variantBackgroundInformation.right = 2
variantBackgroundInformation.height = textZoneHeigh
variantBackgroundInformation.y = ySize - textZoneHeigh
variantTextInformation = surfaceInformations()
variantTextInformation.width, variantTextInformation.height = variantFont.size(wtw)
variantBackgroundInformation.width = variantTextInformation.width
variantBackgroundInformation.width = variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
variantBackgroundInformation.x = xSize - variantBackgroundInformation.width - posX
variantTextInformation.x = variantBackgroundInformation.x + 1 + variantBackgroundInformation.left
variantTextInformation.y = variantBackgroundInformation.y + 1
#creation
variantBackground = pygame.Surface(
(variantBackgroundInformation.width, variantBackgroundInformation.height))
variantBackground.fill(yellow_colour)
#Blitting
screen.blit(variantBackground, (variantBackgroundInformation.x, variantBackgroundInformation.y))
screen.blit(wtwText, (variantTextInformation.x, variantTextInformation.y))
#Ending
return variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
def trivial(numberOfInitialMatch, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
allowedEntry = ["1", "2", "3"]
beginingOfGame = int(time.time())
currentNumberOfMatch = numberOfInitialMatch
normalTextInformation = surfaceInformations()
indicatorTextInformation = surfaceInformations()
listOfTry = []
functionHaveToContinue = True
myfont = pygame.font.SysFont("monospace", 14)
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Trivial", numberOfInitialMatch, wtw, beginingOfGame)
# Redifining variables
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
# indicator area variables
indicatorPosition = ((historyAreaWidth + ((xSize - historyAreaWidth) -
indicatorDim[0]) / 2), ySize - textZoneHeigh - indicatorDim[1])
indicatorArea = pygame.Surface((indicatorDim[0], indicatorDim[1]))
# Appling variables
screen.fill(background_colour)
if weHaveAWiner == False:
printListOfTry(screen, listOfTry)
# Indicator area deffinition
indicatorArea.fill(indicator_colour)
screen.blit(indicatorArea, (indicatorPosition[
0], indicatorPosition[1]))
indicatorBorderPositionLeft = (
int(indicatorPosition[0] + circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionLeft[
0], indicatorBorderPositionLeft[1]), circleRadius)
indicatorBorderPositionRight = (int(
indicatorPosition[0] + indicatorDim[0] - circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionRight[
0], indicatorBorderPositionRight[1]), circleRadius)
indicatorRadiusCompleterPosition = (
indicatorPosition[0] + circleRadius, indicatorPosition[1] - circleRadius)
indicatorRadiusCompleterDim = (
indicatorDim[0] - 2 * circleRadius, circleRadius)
indicatorRadiusCompleterArea = pygame.Surface(
(indicatorRadiusCompleterDim[0], indicatorRadiusCompleterDim[1]))
indicatorRadiusCompleterArea.fill(indicator_colour)
screen.blit(indicatorRadiusCompleterArea, (indicatorRadiusCompleterPosition[
0], indicatorRadiusCompleterPosition[1]))
# Matchs deffinition
maxMatchAreaDim = [xSize - historyAreaWidth - (2 * matchAreaBorder.right), ySize - textZoneHeigh - indicatorDim[
1] - matchAreaBorder.top - matchAreaBorder.bottom]
maxMatchDim = [0, 0]
maxMatchDim[0] = maxMatchAreaDim[0] / (numberOfInitialMatch * 1.5)
maxMatchDim[1] = maxMatchDim[0] * matchPicRatio
if maxMatchDim[1] > maxMatchAreaDim[1]:
matchDim = [int(maxMatchAreaDim[1] / matchPicRatio),
int(maxMatchAreaDim[1])]
else:
matchDim = [int(maxMatchDim[0]), int(
maxMatchDim[0] * matchPicRatio)]
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
matchMaxWidth, matchMaxHeight = tempImageMatch.get_rect().size
if matchDim[0] > matchMaxWidth:
matchDim[0] = matchMaxWidth
matchDim[1] = matchMaxHeight
matchAreaDim = [matchDim[0] * numberOfInitialMatch, matchDim[1]]
matchAreaPos = [historyAreaWidth + matchAreaBorder.left + (
(maxMatchAreaDim[0] - matchAreaDim[0]) / 2), (ySize - indicatorDim[1] - matchDim[1]) / 2]
secondMatchAreaPos = [matchAreaPos[
0] + (matchAreaDim[0] - (numberOfInitialMatch * 1.5) * matchDim[0]) / 2, matchAreaPos[1]]
matchRessizing = matchMaxWidth/matchDim[0]
if wtw == "ttl":
lastBurnedMatch = [1, 2, 3]
elif wtw == "ltl":
lastBurnedMatch = [2, 3, 4]
i = 0
matchS = []
while i < numberOfInitialMatch:
if i < currentNumberOfMatch:
if currentNumberOfMatch in lastBurnedMatch:
initialSignDistanceToMatch = matchDim[1]/7
if i+1 in lastBurnedMatch:
matchS.append(pygame.image.load(
mainDir + "/" + "match-burned.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
initialSignDistanceToMatch = matchDim[1]/24
if i >= (currentNumberOfMatch - 3):
matchS.append(pygame.image.load(
mainDir + "/" + "match-allowed.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match-void.png").convert_alpha())
matchLeftVoid = 0
if i != 0:
matchLeftVoid = matchDim[0] / 2
currentMatchPos = [secondMatchAreaPos[
0] + i * (matchLeftVoid + matchDim[0]), secondMatchAreaPos[1]]
matchS[i] = pygame.transform.scale(
matchS[i], (matchDim[0], matchDim[1]))
screen.blit(
matchS[i], (currentMatchPos[0], currentMatchPos[1]))
if i == 0:
#adding crown or warning sign
initialSignPos = [0,0]
initialSignPos[1] = currentMatchPos[1] - initialSignDistanceToMatch
if wtw == "ttl":
initialSign = pygame.image.load(mainDir + "/" + "crown.png").convert_alpha()
if wtw == "ltl":
initialSign = pygame.image.load(mainDir + "/" + "skull.png").convert_alpha()
initialSignSize = initialSign.get_rect().size
initialSignSize = [int(initialSignSize[0]/matchRessizing),int(initialSignSize[1]/matchRessizing)]
initialSign = pygame.transform.scale(initialSign, (initialSignSize[0], initialSignSize[1]))
initialSignPos[0] = (currentMatchPos[0]+(matchDim[0]/2)) - (initialSignSize[0]/2)
screen.blit(initialSign, (initialSignPos[0], initialSignPos[1]))
i = i + 1
indicatorFont = pygame.font.SysFont("monospace", 34)
indicatorTextContent = str(
currentNumberOfMatch) + "/" + str(numberOfInitialMatch)
indicatorText = indicatorFont.render(
indicatorTextContent, 1, (255, 255, 255))
indicatorTextInformation.width, indicatorTextInformation.height = indicatorFont.size(
indicatorTextContent)
indicatorTextInformation.x = indicatorPosition[
0] + (indicatorDim[0] - indicatorTextInformation.width) / 2
indicatorTextInformation.y = indicatorPosition[1] + 5
screen.blit(indicatorText, (indicatorTextInformation.x,
indicatorTextInformation.y))
if finalNormalUserInput:
getFromAnalysis = trivialAnalysis(
currentNumberOfMatch, numberOfInitialMatch, wtw, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
userPlayed = getFromAnalysis[2]
listOfTry.append(userPlayed)
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playTrivial(
currentNumberOfMatch - userPlayed,wtw)
listOfTry.append(computerPlayed)
currentNumberOfMatch = currentNumberOfMatch - userPlayed
if ((currentNumberOfMatch == 0) and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = True
else:
currentNumberOfMatch = currentNumberOfMatch - computerPlayed
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = False
numberOfMatchDelled = numberOfInitialMatch - currentNumberOfMatch
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
weHaveAWiner = True
timeOfEndOfGame = int(time.time()) - beginingOfGame
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
if textToanalyse in allowedEntry:
normalTextZone = myfont.render(
"".join(textToanalyse), 1, (255, 255, 255))
screen.blit(normalTextZone, (100, 100))
makeTextZone("Trivial", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = myfont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = myfont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
# testSurface = pygame.Surface((indicatorTextInformation.width, indicatorTextInformation.height))
# testSurface.fill(red)
# screen.blit(testSurface, (indicatorTextInformation.x,indicatorTextInformation.y))
#####################
pygame.display.flip()
#####################
while functionHaveToContinue and programHaveToContinue:
winingFallingScreen(
winer, wtw, numberOfInitialMatch, timeOfEndOfGame)
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
makeTextZone("Trivial", None)
#####################
pygame.display.flip()
#####################
return False
def marienbadInitialColumns(numberOfLines):
matchMatrix = []
columns = (numberOfLines*2)-1
number = 0
i = 1
while i <= columns:
if i <= (columns/2)+1:
number=number+1
else:
number=number-1
matchMatrix.append(number)
i=i+1
return matchMatrix
def marienbadIsItAWinerSituation(matchMatrix, wtw):
columnWithMatch = []
i=0
for row in matchMatrix:
if row != 0:
columnWithMatch.append(i)
i=i+1
if wtw == "ttl":
if len(columnWithMatch)==1:
winingColumn=columnWithMatch
else:
winingColumn=False
elif wtw == "ltl":
if (len(columnWithMatch)==1) and (matchMatrix[columnWithMatch[0]] > 1):
winingColumn=columnWithMatch
elif (len(columnWithMatch) == 2 ) and (matchMatrix[columnWithMatch[0]] == 1) and (matchMatrix[columnWithMatch[1]] == 1):
winingColumn=columnWithMatch
else:
winingColumn=False
else:
winingColumn=False
return winingColumn
def getNimSum(matchMatrix):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = [0] * numberOfLines
i=0
for column in matchMatrix:
j=0
while j < column:
lineSums[j]=lineSums[j]+1
j=j+1
i=i+1
return lineSums
def playMarienbad(matchMatrix,wtw):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = getNimSum(matchMatrix)
allowdedColumnToPlay = []
i=0
for column in matchMatrix:
if column > 0:
allowdedColumnToPlay.append(i)
i=i+1
lineSumsBinari = calculateLineSumsBinari(lineSums)
print(lineSumsBinari)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
print(listOfDigits)
itIsPossibleToWin = False
for aDigit in listOfDigits:
if (int(aDigit)%2 == 1):
itIsPossibleToWin = True
matchLineContainingOdd = None
if itIsPossibleToWin == False:
columnToPlay = random.sample(allowdedColumnToPlay, 1)[0]
maxNumberInTheColumn=matchMatrix[columnToPlay]
numberOfMatchToPlay = random.randint(1,maxNumberInTheColumn)
whatComputerWillPlay = [columnToPlay,numberOfMatchToPlay]
columnToPlay = whatComputerWillPlay
else:
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
matchLineContainingOdd = marienbadWitchMatchLineContainOdd(matchMatrix)
columnToPlay = matchLineContainingOdd
return columnToPlay
def marienbadWitchColumnIsOdd(listOfDigits):
for i in range(len(listOfDigits)):
aDigit = listOfDigits[i]
if (int(aDigit)%2 == 1):
return i
def calculateLineSumsBinari(lineSums):
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
return lineSumsBinari
def marienbadWitchMatchLineContainOdd(matchMatrix):
lineSums = getNimSum(matchMatrix)
lineSumsBinari = calculateLineSumsBinari(lineSums)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
# Convert LineSums to Binary representation
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
# Normalise non-sinificative zeros
i = 0
maxLen = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
if tempLen > maxLen:
maxLen = tempLen
i=i+1
i = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
howZeroToAdd = maxLen - tempLen
if howZeroToAdd > 0:
for j in range(1,howZeroToAdd+1):
lineSumsBinari[i] = "0" + str(lineSumsBinari[i])
else:
lineSumsBinari[i] = str(lineSumsBinari[i])
i=i+1
#Only let the theSumColumnContainingTheOddDigitNTH digit in each binaryNum
octetsOfDesiredColumn = []
i = 0
for binaryNum in lineSumsBinari:
extractedOctet = list(str(binaryNum))[theSumColumnContainingTheOddDigit]
octetsOfDesiredColumn.append(extractedOctet)
i=i+1
# Search the lines containing 1
i = 0
linesImpliyingOdd = []
for i in range(0,len(octetsOfDesiredColumn)):
if octetsOfDesiredColumn[i] == "1":
linesImpliyingOdd.append(i)
i=i+1
higherMatchLine = linesImpliyingOdd[-1]
# Search the column matching the lines.
i = 0
for match in matchMatrix:
if match == higherMatchLine:
theColumn=i
i=i+1
print("matchMatrix: " + str(matchMatrix))
print("lineSums: " + str(lineSums))
print("higherMatchLine: " + str(higherMatchLine))
print("Là ↓")
print(theColumn)
return(theColumn)
def marienbadAnalysis(matchMatrix, userInput):
# Constant for all the folowing operations
columns = len(matchMatrix)
numberOfLines = 2 * (columns+1)
allowedColumns = range(columns)
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
# Test if it is possible to play
continueFunction = False
for column in matchMatrix:
if (column != 0) and (continueFunction == False) :
continueFunction = True
if (continueFunction == True):
numberOfMatchsToDel = 0
syntaxToTestImputValidity = "^ *([0-9]+) *(=|-) *([0-9]+) *$"
if re.match(syntaxToTestImputValidity, userInput) is not None:
print("True")
syntaxToExtractOptions = "^ *(?P<column>[0-9]+) *(?P<operator>(=|-)) *(?P<numberOfMatchUsed>[0-9]+) *$"
deletingMatchOparation = re.match(syntaxToExtractOptions,userInput)
columnToDelOnIt = int(deletingMatchOparation.group("column"))
numberOfMatchUsed = int(deletingMatchOparation.group("numberOfMatchUsed"))
delletingOperator = deletingMatchOparation.group("operator")
if (columnToDelOnIt in allowedColumns) :
if (numberOfMatchUsed != 0) or (delletingOperator != "-"):
if (delletingOperator == "=") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = matchMatrix[columnToDelOnIt]-numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not set a number higher than content."]
elif (delletingOperator == "-") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not use a number higher than content."]
else:
answer = [False, "You can not del no match!"]
else:
answer = [False, "“" + str(deletingMatchOparation.group("column")) + "” is not in valid range."]
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
else:
answer = [False, 0]
return answer
def marienbad(numberOfLines, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
global historyAreaWidth
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
currentMatchMatrix = copy.deepcopy(maximumMatchMatrix)
numberOfColumns = numberOfLines*2 - 1
# Initialisation
beginingOfGame = int(time.time())
listOfTry = []
functionHaveToContinue = True
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
if weHaveAWiner == False:
functionHaveToContinue, textToanalyse = analyseTyping("marienbad", numberOfLines, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Marienbad", numberOfInitialMatch, wtw, beginingOfGame)
# Redifining variables
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
# loading images
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
# Creatiing surface information
gameAreaInfo = surfaceInformations()
realGameAreaInfo = surfaceInformations()
matchInfo = surfaceInformations()
maxMatchInfo = surfaceInformations()
matchAreaInfo = surfaceInformations()
normalTextInformation = surfaceInformations()
wtwZoneInfo = surfaceInformations()
columnNumberInfo = surfaceInformations()
matchHorizontalSeparation = 0
# Fixing constants
matchInfo.top = 10
realGameAreaInfo.top = 20
realGameAreaInfo.bottom = 30
realGameAreaInfo.left = 30
realGameAreaInfo.right = 30
# Calculatiing element’s size
realGameAreaInfo.height = ySize - textZoneHeigh - realGameAreaInfo.top - realGameAreaInfo.bottom
realGameAreaInfo.width = xSize - historyAreaWidth - realGameAreaInfo.left - realGameAreaInfo.right
maxMatchInfo.width, maxMatchInfo.height = tempImageMatch.get_rect().size
matchInfo.height = realGameAreaInfo.height / (numberOfLines*1.2)
matchInfo.top = matchInfo.height*0.2
if matchInfo.height >= maxMatchInfo.height:
matchInfo.height = maxMatchInfo.height
matchInfo.width = maxMatchInfo.width
else:
matchInfo.width = matchInfo.height / matchPicRatio
matchHorizontalSeparation = (realGameAreaInfo.width - (matchInfo.width*numberOfColumns)) / (numberOfColumns-1)
if matchHorizontalSeparation > matchInfo.height*0.66:
matchHorizontalSeparation = matchInfo.height*0.66
# calculating positions
matchAreaInfo.width = matchInfo.width*numberOfColumns + (numberOfColumns-1)*matchHorizontalSeparation
realGameAreaInfo.x = historyAreaWidth + realGameAreaInfo.left + (realGameAreaInfo.width-matchAreaInfo.width)/2
matchAreaInfo.height = matchInfo.height*numberOfLines + (numberOfLines-1)*matchInfo.top
realGameAreaInfo.y = realGameAreaInfo.top + (realGameAreaInfo.height-matchAreaInfo.height)/2
matchPositions = []
i = 0
for numberOfMatchInAColumn in maximumMatchMatrix:
j = 0
matchPositions.append([])
cumuledX = matchInfo.width + matchHorizontalSeparation
while j < numberOfMatchInAColumn:
matchPositions[i].append(surfaceInformations())
cumuledY = matchInfo.height + matchInfo.top
matchPositions[i][j].x = realGameAreaInfo.x + i*cumuledX
matchPositions[i][j].y = ySize-textZoneHeigh - realGameAreaInfo.y - (j+1)*cumuledY
j=j+1
i = i+1
# Bliting first interface
screen.fill(background_colour)
printMarienbadListOfTry(screen, listOfTry)
# Treating normal imput
if finalNormalUserInput:
getFromAnalysis = marienbadAnalysis(currentMatchMatrix, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
currentMatchMatrix = getFromAnalysis[1]
listOfTry.append(getFromAnalysis[2])
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playMarienbad(currentMatchMatrix,wtw)
listOfTry.append(str(computerPlayed) + "-" + "1")
currentMatchMatrix[computerPlayed] = currentMatchMatrix[computerPlayed]-1
# Defining if we are in wining position
winingColumn = marienbadIsItAWinerSituation(currentMatchMatrix, wtw)
# Bliting the game
columnNumberFont = pygame.font.SysFont("monospace", 18, bold=True)
i = 0
for column in matchPositions:
j = 0
for match in column:
if (currentMatchMatrix[i] < maximumMatchMatrix[i]) and (j+1 > currentMatchMatrix[i]):
visualMatch = pygame.image.load(mainDir + "/" + "match-void.png").convert_alpha()
else:
if winingColumn:
visualMatch = pygame.image.load(mainDir + "/" + "match-burned.png").convert_alpha()
else:
visualMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
visualMatch = pygame.transform.scale(visualMatch, (int(matchInfo.width), int(matchInfo.height)))
screen.blit(visualMatch, (match.x, match.y))
j=j+1
columnNumberImage = columnNumberFont.render(str(i), 1, (0, 0,0))
columnNumberInfo.width, columnNumberInfo.height = columnNumberImage.get_size()
columnNumberInfo.x = column[0].x + (column[0].width/2) - (columnNumberInfo.width/2)
screen.blit(columnNumberImage, (columnNumberInfo.x, column[0].y+matchInfo.height+12))
i = i+1
# Bliting second interface
makeTextZone("Marienbad", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
# Display normal mode text
normalFont = pygame.font.SysFont("monospace", 14)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = normalFont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = normalFont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
#####################
pygame.display.flip()
#####################
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
while functionHaveToContinue and programHaveToContinue:
winingFallingScreen(
winer, wtw, numberOfInitialMatch, timeOfEndOfGame)
functionHaveToContinue, textToanalyse = analyseTyping(
"marienbad", numberOfInitialMatch, wtw)
makeTextZone("Marienbad", None)
#####################
pygame.display.flip()
#####################
return False
programHaveToContinue = True
variant = None
generalState = whatToDo()
def main(variant="trivial", number=numberOfInitialMatch, wtw="ttl"):
global generalState
global programHaveToContinue
while programHaveToContinue:
if variant not in [0, None, ""]:
variant = generalState.variant
if number not in [0, None, ""]:
number = generalState.number
if wtw not in [0, None, ""]:
wtw = generalState.wtw
if variant == "trivial":
trivial(number, wtw, screen)
elif variant == "marienbad":
marienbad(number, wtw, screen)
main("trivial", numberOfInitialMatch, "ttl")
| 38.76933 | 307 | 0.627372 |
import os
import random
import sys
import time
import re
import copy
from optparse import OptionParser
import pygame
from pygame.locals import *
version = "0.1"
usage = "usage: %prog [ --lvl [0-5] | ]"
parser = OptionParser(usage=usage, version="%prog 0.1")
parser.add_option("-m", help="Number of match",
default=0, action="store", dest="numberOfMatch")
parser.add_option("-v", help="The variant of Nim",
default=0, action="store", dest="varient")
parser.add_option("-w", help="Mode, there is two values possibles “ttl” and “ltl”",
default=0, action="store", dest="varient")
(options, args) = parser.parse_args()
if not options.numberOfMatch:
options.numberOfMatch = 15
innitialNumberOfMatch = int(options.numberOfMatch)
currentNumberOfMatch = int(innitialNumberOfMatch)
class borderSize:
def __init__(self):
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
class surfaceInformations:
def __init__(self):
self.width = 0
self.height = 0
self.y = 0
self.x = 0
self.top = 0
self.bototm = 0
self.right = 0
self.left = 0
if self.y != 0:
self.ratio = self.x / self.y
class whatToDo:
def __init__(self):
self.programHaveToContinue = True
self.variant = "trivial"
self.number = numberOfInitialMatch
self.wtw = "ttl"
print("This is Nim " + version + "\n")
mainDir = os.path.dirname(os.path.realpath(__file__))
background_colour = (144, 124, 106)
text_zone_colour = (81, 69, 58)
history_area_colour = (69, 59, 49)
indicator_colour = (70, 60, 50)
prompt_colour = (25, 21, 18)
creme_colour = (236, 228, 217)
yellow_colour = (205, 153, 29)
winingMainText_colour = (236, 232, 228)
purple_colour = (133, 0, 58)
red = (225, 0, 0)
class variants:
def __init__(self):
self.name = ""
self.number = 15
self.wtw = "ttl"
trivial = variants()
trivial.name = "Trivial"
trivial.number = 15
trivial.wtw = "ttl"
marienbad = variants()
marienbad.name = "Marienbad"
marienbad.number = 5
marienbad.wtw = "ttl"
knowenVarients = [trivial, marienbad]
viarentNames = []
for varientRow in knowenVarients:
viarentNames.append(varientRow.name)
xSize = 640
ySize = 480
textZoneHeigh = 16
maxPaddingBetwenMatch = 3
matchPicRatio = 6.925
numberOfInitialMatch = innitialNumberOfMatch
historyAreaWidth = 67
circleRadius = 10
gameAreaDim = [0, 0]
matchAreaDim = [0, 0]
matchAreaPos = [0, 0]
indicatorDim = [127, 55]
matchAreaBorder = borderSize()
matchAreaBorder.top = 40
matchAreaBorder.bottom = 80
matchAreaBorder.left = 40
matchAreaBorder.right = 40
trianglePromptWidth = 7
textUserInput = []
normaUserInput = []
textUserInput = []
normalUserInput = []
exMode = False
normalMode = True
textToAnalyse = ""
normalTextToAnalyse = ""
allowedMatchDel = ["1", "2", "3"]
pygame.init()
screen = pygame.display.set_mode((xSize, ySize), RESIZABLE)
charInputed = [K_TAB, K_SPACE, K_EXCLAIM, K_QUOTEDBL, K_HASH, K_DOLLAR, K_AMPERSAND, K_QUOTE, K_LEFTPAREN, K_RIGHTPAREN, K_ASTERISK, K_PLUS, K_COMMA, K_MINUS, K_PERIOD, K_SLASH, K_0, K_1, K_2, K_3, K_4, K_5, K_6, K_7, K_8, K_9, K_COLON, K_SEMICOLON, K_LESS, K_EQUALS, K_GREATER, K_QUESTION,
K_AT, K_LEFTBRACKET, K_BACKSLASH, K_RIGHTBRACKET, K_CARET, K_UNDERSCORE, K_BACKQUOTE, K_a, K_b, K_c, K_d, K_e, K_f, K_g, K_h, K_i, K_j, K_k, K_l, K_m, K_n, K_o, K_p, K_q, K_r, K_s, K_t, K_u, K_v, K_w, K_x, K_y, K_z, K_KP_PERIOD, K_KP_DIVIDE, K_KP_MULTIPLY, K_KP_MINUS, K_KP_PLUS, K_KP_EQUALS]
def makeTextZone(nameToDisplay, secondName):
xSize, ySize = screen.get_size()
textZone = pygame.Surface((xSize, textZoneHeigh))
textZone.fill(text_zone_colour)
heighTextZonePosition = ySize - textZoneHeigh
promptFont = pygame.font.SysFont("monospace", 14, bold=True)
secondPromptZone = pygame.Surface((1, 1))
secondPromptZoneInfo = surfaceInformations()
secondEcart = 0
secondLittleEcart = 0
secondPromptZoneInfo.width = 0
if secondName != None:
textSecondSizeWidth, textSecondSizeHeight = promptFont.size(secondName)
secondPromptZoneInfo.width = textSecondSizeWidth + 8
secondPromptZoneInfo.heigh = textZoneHeigh
secondPromptZone = pygame.Surface((secondPromptZoneInfo.width, secondPromptZoneInfo.heigh))
secondPromptZone.fill(yellow_colour)
secondPromptText = promptFont.render(secondName, 1, prompt_colour)
secondTextSizeWidth, secondTextSizeHeight = promptFont.size(secondName)
secondPromptTriangle = pygame.draw.polygon(screen, prompt_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
secondEcart = secondPromptZoneInfo.width + trianglePromptWidth
secondLittleEcart = trianglePromptWidth
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
promptZoneInfo = surfaceInformations()
promptZoneInfo.width = textSizeWidth + 8
promptZoneInfo.heigh = textZoneHeigh
promptZone = pygame.Surface((promptZoneInfo.width + secondLittleEcart, promptZoneInfo.heigh))
promptZone.fill(prompt_colour)
promptText = promptFont.render(nameToDisplay, 1, (205, 153, 29))
textSizeWidth, textSizeHeight = promptFont.size(nameToDisplay)
# Initialized' error
myfont = pygame.font.SysFont("monospace", 14)
label = myfont.render("".join(textUserInput), 1, (255, 255, 255))
screen.blit(textZone, (0, heighTextZonePosition))
screen.blit(promptZone, (0 + secondPromptZoneInfo.width, heighTextZonePosition))
promptTriangle = pygame.draw.polygon(screen, prompt_colour, [[promptZoneInfo.width + secondEcart, ySize - textZoneHeigh], [
promptZoneInfo.width + secondEcart, ySize], [promptZoneInfo.width + secondEcart + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(promptText, (4 + secondEcart, heighTextZonePosition + 1))
if secondName != None:
screen.blit(secondPromptZone, (0, heighTextZonePosition))
screen.blit(secondPromptText, (4, heighTextZonePosition + 1))
secondPromptTriangle = pygame.draw.polygon(screen, yellow_colour, [[secondPromptZoneInfo.width, ySize - textZoneHeigh], [
secondPromptZoneInfo.width, ySize], [secondPromptZoneInfo.width + trianglePromptWidth, ySize - (textZoneHeigh / 2)]], 0)
screen.blit(label, (promptZoneInfo.width +
trianglePromptWidth + 4, heighTextZonePosition))
finalNormalUserInput = ""
def analyseTyping(variant, numberOfInitialMatch, wtw):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global screen
global finalNormalUserInput
global generalState
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
functionHaveToContinue = True
for event in pygame.event.get():
if event.type == VIDEORESIZE:
screen = pygame.display.set_mode(event.size, RESIZABLE)
if event.type == QUIT:
programHaveToContinue = False
if event.type == KEYDOWN:
if (event.unicode == ":") and ("".join(normalUserInput) == ""):
exMode = True
normalMode = False
if exMode == True:
if event.key is K_ESCAPE:
exMode = False
normalMode = True
textUserInput = []
elif event.key in charInputed:
textUserInput.append(event.unicode)
elif event.key == K_BACKSPACE and textUserInput != []:
del textUserInput[-1]
if len(textUserInput) == 1:
exMode = False
normalMode = True
del textUserInput[-1]
elif event.key in [K_RETURN, K_KP_ENTER]:
textToAnalyse = "".join(textUserInput[1:])
textUserInput = []
exMode = False
if textUserInput == []:
exMode = False
normalMode = True
elif normalMode == True:
if (event.key is K_ESCAPE) and (normalUserInput != []):
normalUserInput = []
elif event.key == K_p:
normalUserInput = []
keyboardInput["mode"] = "pause"
elif (event.key is K_ESCAPE) and (normalUserInput == []):
normalUserInput = []
keyboardInput["mode"] = "escape"
elif (event.key not in [K_RETURN, K_KP_ENTER, K_ESCAPE]):
normalUserInput.append(event.unicode)
elif (event.key in [K_RETURN, K_KP_ENTER]):
finalNormalUserInput = "".join(normalUserInput)
normalUserInput = []
if textToAnalyse == "about":
textToAnalyse = ""
aboutScreen(screen)
elif textToAnalyse in ["quit", "q"]:
textToAnalyse = ""
programHaveToContinue = False
elif re.match("n(ew)?( +((trivial)|(marienbad)))?( +[0-9]+)?( +(((ttl)|(take-the-last))|((ltl)|(let-the-last))))? *$", textToAnalyse) is not None:
programHaveToContinue = True
functionHaveToContinue = False
syntaxToExtractOptions = "n(ew)?( +(?P<variente>(trivial|marienbad)))?( +(?P<number>[0-9]+))?( +(?P<wtw>((ttl)|(ltl))))?"
newGameOptions = re.match(syntaxToExtractOptions,textToAnalyse)
textToAnalyse = ""
if (newGameOptions.group("variente") == None) :
generalState.variant = variant
else:
generalState.variant = newGameOptions.group("variente")
if ( newGameOptions.group("number") == None) :
generalState.number = numberOfInitialMatch
else:
generalState.number = int(newGameOptions.group("number"))
if ( newGameOptions.group("wtw") == None) :
generalState.wtw = wtw
else:
generalState.wtw = newGameOptions.group("wtw")
print("New " + str(generalState.variant) + ";" + str(generalState.number) + ";" + str(generalState.wtw) + " game.")
elif keyboardInput["mode"] == "escape":
keyboardInput["mode"] = "escape"
elif keyboardInput["mode"] == "pause":
keyboardInput["mode"] = "pause"
else:
keyboardInput["mode"] = "ex"
keyboardInput["content"] = textToAnalyse
if normalUserInput != []:
keyboardInput["mode"] = "normal"
keyboardInput["content"] = normalUserInput
return functionHaveToContinue, keyboardInput
def makeAPause(variant, numberOfInitialMatch, wtw, beginingOfGame):
global winingMainText_colour
global indicator_colour
global programHaveToContinue
resumeMainText_colour = (163, 143, 125)
pauseMainText_colour = winingMainText_colour
pauseTextInfo = surfaceInformations()
resumeTextInfo = surfaceInformations()
timeBeforePause = int(time.time()) - beginingOfGame
timeOfEndOfGame = int(time.time()) - beginingOfGame
functionHaveToContinue = True
while functionHaveToContinue and programHaveToContinue:
xSize, ySize = screen.get_size()
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
screen.fill(indicator_colour)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
pauseTextContent = "Pause".upper()
pauseFont = pygame.font.SysFont("CMU Typewriter Text", 112, bold=True)
pauseText = pauseFont.render(pauseTextContent, 1, pauseMainText_colour)
pauseTextInfo.width, pauseTextInfo.height = pauseFont.size(pauseTextContent)
pauseTextInfo.x = (xSize - pauseTextInfo.width) / 2
pauseTextInfo.y = (ySize/2) - pauseTextInfo.height
screen.blit(pauseText, (pauseTextInfo.x, pauseTextInfo.y))
resumeTextContent = "Type Escape key to continue."
resumeFont = pygame.font.SysFont("CMU Typewriter Text", 14, bold=True)
resumeText = resumeFont.render(resumeTextContent, 1, resumeMainText_colour)
resumeTextInfo.width, resumeTextInfo.height = resumeFont.size(resumeTextContent)
resumeTextInfo.x = (xSize - resumeTextInfo.width) / 2
resumeTextInfo.y = (ySize- 14) - resumeTextInfo.height - 30
screen.blit(resumeText, (resumeTextInfo.x, resumeTextInfo.y))
makeTextZone(variant,"Pause")
nceBegining = int(time.time()) - beginingOfGame
m, s = divmod(secondSinceBegining, 60)
h, m = divmod(m, 60)
timePassed = "%02d:%02d" % (m, s)
heighTextZonePosition = ySize - textZoneHeigh
timeZoneText = myfont.render(timePassed, 1, (0, 0, 0))
timeZoneInformation.width, timeZoneInformation.height = myfont.size(
timePassed)
timeZoneInformation.x = xSize - timeZoneInformation.width - timeZoneInformation.left
timeZoneInformation.y = ySize - textZoneHeigh
timeZoneBackground.width = timeZoneInformation.width + \
(timeZoneInformation.left + timeZoneInformation.right)
timeZoneBackground.height = textZoneHeigh
timeZoneBackground.y = heighTextZonePosition
timeZoneBackground.x = timeZoneInformation.x - 2
timeZoneBackgroundSurface = pygame.Surface(
(timeZoneBackground.width, timeZoneBackground.height))
timeZoneBackgroundSurface.fill(creme_colour)
screen.blit(timeZoneBackgroundSurface,
(timeZoneBackground.x, timeZoneBackground.y))
screen.blit(timeZoneText, (timeZoneInformation.x, timeZoneInformation.y))
timeZoneBorder = pygame.draw.polygon(screen, yellow_colour, [[timeZoneBackground.x, timeZoneBackground.y], [timeZoneBackground.x, timeZoneBackground.y + timeZoneBackground.height - 2], [
timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y + timeZoneBackground.height - 2], [timeZoneBackground.x + timeZoneBackground.width - 2, timeZoneBackground.y]], 2)
return timeZoneBackground.width
normalUserInput = []
def aboutScreen(screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
functionHaveToContinue = True
keyboardInput = dict()
keyboardInput["mode"] = "normal"
keyboardInput["content"] = ""
while functionHaveToContinue and programHaveToContinue:
functionHaveToContinue, textToanalyse = analyseTyping(None, None, None)
if textToanalyse["mode"] == "escape":
functionHaveToContinue = False
screen.fill(background_colour)
xSize, ySize = screen.get_size()
illustrationInformation = surfaceInformations()
illustration = pygame.image.load(
mainDir + "/" + "about-illustration.png").convert_alpha()
illustrationInformation.width, illustrationInformation.height = illustration.get_size()
illustrationInformationRatio = illustrationInformation.width / \
illustrationInformation.height
if illustrationInformation.width > xSize:
illustrationInformation.width = xSize * (3 / 4)
illustrationInformation.height = illustrationInformation.width / \
illustrationInformationRatio
if illustrationInformation.height > ySize:
illustrationInformation.height = ySize * (3 / 4)
illustrationInformation.width = illustrationInformation.height * \
illustrationInformationRatio
illustrationInformation.y = (
ySize - illustrationInformation.height) / 2
illustrationInformation.x = (xSize - illustrationInformation.width) / 2
illustration = pygame.transform.scale(illustration, (int(
illustrationInformation.width), int(illustrationInformation.height)))
screen.blit(illustration, (illustrationInformation.x,
illustrationInformation.y))
makeTextZone("About", None)
chNumber - 2) % 4) == modulator:
answer = 2
elif ((currentMatchNumber - 3) % 4) == modulator:
answer = 3
else:
answer = random.randint(1, 3)
else:
answer = 0
return answer
def trivialAnalysis(currentMatchNumber, initialMatchNumber, wtw, userInput):
if currentMatchNumber != 0:
numberOfMatchToDel = 0
if currentMatchNumber >= 3:
authorisedNumbers = [3, 2, 1]
elif currentMatchNumber == 2:
authorisedNumbers = [2, 1]
elif currentMatchNumber == 1:
authorisedNumbers = [1]
if list(userInput)[0] == "=":
action = "application"
stringToEvaluate = userInput[1:]
elif list(userInput)[0] == "-":
action = "soustraction"
stringToEvaluate = userInput[1:]
else:
action = "soustraction"
stringToEvaluate = userInput
if representsInt(stringToEvaluate):
if action == "soustraction":
numberOfMatchToDel = int(stringToEvaluate)
elif action == "application":
numberOfMatchToDel = currentMatchNumber - int(stringToEvaluate)
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
if numberOfMatchToDel != 0:
if numberOfMatchToDel in authorisedNumbers:
numberLetByUser = initialMatchNumber - numberOfMatchToDel
answer = [True, numberLetByUser, numberOfMatchToDel]
else:
answer = [False, "“" +
str(numberOfMatchToDel) + "” is too big."]
elif (numberOfMatchToDel == 0):
answer = [False, "“0” is not a valid answer."]
else:
answer = [True, 0, 0]
return answer
def winingFallingScreenMatchExplosion(winer, variant, numberOfInitialMatch, time):
xSize, ySize = screen.get_size()
if winer == True:
matchInformation = surfaceInformations()
matchS = []
match = 0
while match < 1000:
matchS.append(pygame.image.load(
mainDir + "/" + "match-animation.png").convert_alpha())
matchInformation.heigh = random.randint(0, ySize)
matchInformation.weight = random.randint(0, xSize)
rotation = random.randint(0, 360)
matchS[match] = pygame.transform.rotate(matchS[match], rotation)
screen.blit(
matchS[match], (matchInformation.weight, matchInformation.heigh))
match = match + 1
elif winer == False:
print("machin")
def formateSecondToDotedTime(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h == 0:
formatedTime = "%02d:%02d" % (m, s)
else:
formatedTime = "%02d:%02d:%02d" % (h, m, s)
return formatedTime
def winingFallingScreen(winer, variant, numberOfInitialMatch, time):
global indicator_colour
global winingMainText_colour
global purple_colour
lineSeparationColor = (205, 153, 29)
helpText_color = (163, 143, 125)
fallingMainText_colour = winingMainText_colour
xSize, ySize = screen.get_size()
time = formateSecondToDotedTime(time)
if winer == True:
winingTextInfo = surfaceInformations()
winingTimeTextInfo = surfaceInformations()
winingHelpTextInfo = surfaceInformations()
screen.fill(indicator_colour)
winingFont = pygame.font.SysFont("CMU Typewriter Text", 44, bold=True)
winingText = winingFont.render("You win!", 1, winingMainText_colour)
winingTextInfo.width, winingTextInfo.height = winingFont.size("You win!")
winingTextInfo.x = (xSize - winingTextInfo.width) / 2
winingTextInfo.y = 40
screen.blit(winingText, (winingTextInfo.x, winingTextInfo.y))
winingTimeFont = pygame.font.SysFont("CMU Typewriter Text", 137, bold=True)
winingTimeText = winingTimeFont.render(time, 1, lineSeparationColor)
winingTimeTextInfo.width, winingTimeTextInfo.height = winingTimeFont.size(time)
winingTimeTextInfo.x = (xSize - winingTimeTextInfo.width) / 2
winingTimeTextInfo.y = 90
screen.blit(winingTimeText, (winingTimeTextInfo.x, winingTimeTextInfo.y))
helpText = "Type :new to begin new game or :help for more options."
winingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
winingHelpText = winingHelpFont.render(helpText, 1, helpText_color)
winingHelpTextInfo.width, winingHelpTextInfo.height = winingHelpFont.size(helpText)
winingHelpTextInfo.x = (xSize - winingHelpTextInfo.width) / 2
winingHelpTextInfo.y = ySize-90
screen.blit(winingHelpText, (winingHelpTextInfo.x, winingHelpTextInfo.y))
elif winer == False:
fallingTextInfo = surfaceInformations()
fallingTimeTextInfo = surfaceInformations()
fallingHelpTextInfo = surfaceInformations()
screen.fill(purple_colour)
fallingTextContent = "You loose!"
fallingFont = pygame.font.SysFont("CMU Typewriter Text", 52, bold=True)
fallingText = fallingFont.render(fallingTextContent, 1, fallingMainText_colour)
fallingTextInfo.width, fallingTextInfo.height = fallingFont.size(fallingTextContent)
fallingTextInfo.x = (xSize - fallingTextInfo.width) / 2
fallingTextInfo.y = (ySize/2) - fallingTextInfo.height
screen.blit(fallingText, (fallingTextInfo.x, fallingTextInfo.y))
helpText = "Type :new to begin new game or :help for more options."
fallingHelpFont = pygame.font.SysFont("CMU Typewriter Text", 23, bold=True)
fallingHelpText = fallingHelpFont.render(helpText, 1, helpText_color)
fallingHelpTextInfo.width, fallingHelpTextInfo.height = fallingHelpFont.size(helpText)
fallingHelpTextInfo.x = (xSize - fallingHelpTextInfo.width) / 2
fallingHelpTextInfo.y = ySize-90
screen.blit(fallingHelpText, (fallingHelpTextInfo.x, fallingHelpTextInfo.y))
def printMarienbadListOfTry(screen, listOfTry):
global historyAreaWidth
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
scroowlingHistory = 0
rightHistoryAreaWidth = 0
for aTryGame in listOfTry:
tempSizeWidth, tempSizeHeigh = historyFont.size(aTryGame)
if tempSizeWidth > rightHistoryAreaWidth:
rightHistoryAreaWidth=tempSizeWidth
rightHistoryAreaWidth=rightHistoryAreaWidth+2
historyAreaWidth = rightHistoryAreaWidth + 35 + 20
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
while row < len(listOfTry):
if (row % 2 == 0):
row_coulour = (234, 226, 215)
arrowSign = "←"
else:
row_coulour = (207, 194, 184)
arrowSign = "→"
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, (0, 0, 0))
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def printListOfTry(screen, listOfTry):
historyFont = pygame.font.SysFont("monospace", 14, bold=True)
pageUpDownFont = pygame.font.SysFont("monospace", 18, bold=True)
pageUpDownColor = (220, 36, 4)
lineSeparationColor = (205, 153, 29)
realLineSeparationPlayed = (54,46,38)
xSize, ySize = screen.get_size()
arrowBackground = []
row = 0
arrowPosX = 40
delledNumberPosX = 53
historyZone = pygame.Surface((historyAreaWidth, ySize))
historyZone.fill(history_area_colour)
screen.blit(historyZone, (0, 0))
scroowlingHistory = 0
while row < len(listOfTry):
if (row % 2 == 0):
row_coulour = (234, 226, 215)
arrowSign = "←"
else:
row_coulour = (207, 194, 184)
arrowSign = "→"
if listOfTry[row] == 1:
numberToDelColor = (0, 126, 223)
if listOfTry[row] == 2:
numberToDelColor = (40, 149, 0)
if listOfTry[row] == 3:
numberToDelColor = (215, 0, 95)
print("This row: " + str(row))
arrowBackground.append(pygame.Surface(
(historyAreaWidth, textZoneHeigh)))
print(len(arrowBackground))
arrowBackground[row].fill(row_coulour)
rowPosY = ySize - textZoneHeigh - \
(len(listOfTry) - row) * textZoneHeigh
historyNumberText = historyFont.render(str(row), 1, (0, 0, 0))
historyArrowText = historyFont.render(arrowSign, 1, (0, 0, 0))
numberDelledText = historyFont.render(
str(listOfTry[row]), 1, numberToDelColor)
screen.blit(arrowBackground[row], (0, rowPosY))
screen.blit(historyNumberText, (2, rowPosY + 2))
screen.blit(historyArrowText, (arrowPosX, rowPosY + 2))
screen.blit(numberDelledText, (delledNumberPosX, rowPosY + 2))
row = row + 1
print("It success")
realHistoryHeigh = (len(listOfTry) + 1) * textZoneHeigh
lineHistorySeparation = pygame.Surface((1, ySize))
lineHistorySeparation.fill(lineSeparationColor)
screen.blit(lineHistorySeparation, (35, 0))
realLineHistorySeparation = pygame.Surface((1, realHistoryHeigh))
realLineHistorySeparation.fill(realLineSeparationPlayed)
screen.blit(realLineHistorySeparation, (35, ySize-realHistoryHeigh))
if realHistoryHeigh > ySize:
pageUpText = pageUpDownFont.render("⇈", 1, pageUpDownColor)
screen.blit(pageUpText, (historyAreaWidth + 8, 4))
shadowTop = pygame.image.load(mainDir + "/" + "history-top-shadow.png").convert_alpha()
shadowTop = pygame.transform.scale(shadowTop, (historyAreaWidth, 8))
screen.blit(shadowTop, (0, 0))
def showVariant(screen, wtw, posX):
yellow_colour = (205, 153, 29)
xSize, ySize = screen.get_size()
variantFont = pygame.font.SysFont("monospace", 14, bold=True)
wtwText = variantFont.render(wtw, 1, (225, 225, 225))
variantBackgroundInformation = surfaceInformations()
variantBackgroundInformation.left = 2
variantBackgroundInformation.right = 2
variantBackgroundInformation.height = textZoneHeigh
variantBackgroundInformation.y = ySize - textZoneHeigh
variantTextInformation = surfaceInformations()
variantTextInformation.width, variantTextInformation.height = variantFont.size(wtw)
variantBackgroundInformation.width = variantTextInformation.width
variantBackgroundInformation.width = variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
variantBackgroundInformation.x = xSize - variantBackgroundInformation.width - posX
variantTextInformation.x = variantBackgroundInformation.x + 1 + variantBackgroundInformation.left
variantTextInformation.y = variantBackgroundInformation.y + 1
variantBackground = pygame.Surface(
(variantBackgroundInformation.width, variantBackgroundInformation.height))
variantBackground.fill(yellow_colour)
screen.blit(variantBackground, (variantBackgroundInformation.x, variantBackgroundInformation.y))
screen.blit(wtwText, (variantTextInformation.x, variantTextInformation.y))
return variantBackgroundInformation.width + variantBackgroundInformation.left + variantBackgroundInformation.right
def trivial(numberOfInitialMatch, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
allowedEntry = ["1", "2", "3"]
beginingOfGame = int(time.time())
currentNumberOfMatch = numberOfInitialMatch
normalTextInformation = surfaceInformations()
indicatorTextInformation = surfaceInformations()
listOfTry = []
functionHaveToContinue = True
myfont = pygame.font.SysFont("monospace", 14)
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
functionHaveToContinue, textToanalyse = analyseTyping(
"trivial", numberOfInitialMatch, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Trivial", numberOfInitialMatch, wtw, beginingOfGame)
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
indicatorPosition = ((historyAreaWidth + ((xSize - historyAreaWidth) -
indicatorDim[0]) / 2), ySize - textZoneHeigh - indicatorDim[1])
indicatorArea = pygame.Surface((indicatorDim[0], indicatorDim[1]))
screen.fill(background_colour)
if weHaveAWiner == False:
printListOfTry(screen, listOfTry)
indicatorArea.fill(indicator_colour)
screen.blit(indicatorArea, (indicatorPosition[
0], indicatorPosition[1]))
indicatorBorderPositionLeft = (
int(indicatorPosition[0] + circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionLeft[
0], indicatorBorderPositionLeft[1]), circleRadius)
indicatorBorderPositionRight = (int(
indicatorPosition[0] + indicatorDim[0] - circleRadius), int(indicatorPosition[1]))
pygame.draw.circle(screen, indicator_colour, (indicatorBorderPositionRight[
0], indicatorBorderPositionRight[1]), circleRadius)
indicatorRadiusCompleterPosition = (
indicatorPosition[0] + circleRadius, indicatorPosition[1] - circleRadius)
indicatorRadiusCompleterDim = (
indicatorDim[0] - 2 * circleRadius, circleRadius)
indicatorRadiusCompleterArea = pygame.Surface(
(indicatorRadiusCompleterDim[0], indicatorRadiusCompleterDim[1]))
indicatorRadiusCompleterArea.fill(indicator_colour)
screen.blit(indicatorRadiusCompleterArea, (indicatorRadiusCompleterPosition[
0], indicatorRadiusCompleterPosition[1]))
maxMatchAreaDim = [xSize - historyAreaWidth - (2 * matchAreaBorder.right), ySize - textZoneHeigh - indicatorDim[
1] - matchAreaBorder.top - matchAreaBorder.bottom]
maxMatchDim = [0, 0]
maxMatchDim[0] = maxMatchAreaDim[0] / (numberOfInitialMatch * 1.5)
maxMatchDim[1] = maxMatchDim[0] * matchPicRatio
if maxMatchDim[1] > maxMatchAreaDim[1]:
matchDim = [int(maxMatchAreaDim[1] / matchPicRatio),
int(maxMatchAreaDim[1])]
else:
matchDim = [int(maxMatchDim[0]), int(
maxMatchDim[0] * matchPicRatio)]
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
matchMaxWidth, matchMaxHeight = tempImageMatch.get_rect().size
if matchDim[0] > matchMaxWidth:
matchDim[0] = matchMaxWidth
matchDim[1] = matchMaxHeight
matchAreaDim = [matchDim[0] * numberOfInitialMatch, matchDim[1]]
matchAreaPos = [historyAreaWidth + matchAreaBorder.left + (
(maxMatchAreaDim[0] - matchAreaDim[0]) / 2), (ySize - indicatorDim[1] - matchDim[1]) / 2]
secondMatchAreaPos = [matchAreaPos[
0] + (matchAreaDim[0] - (numberOfInitialMatch * 1.5) * matchDim[0]) / 2, matchAreaPos[1]]
matchRessizing = matchMaxWidth/matchDim[0]
if wtw == "ttl":
lastBurnedMatch = [1, 2, 3]
elif wtw == "ltl":
lastBurnedMatch = [2, 3, 4]
i = 0
matchS = []
while i < numberOfInitialMatch:
if i < currentNumberOfMatch:
if currentNumberOfMatch in lastBurnedMatch:
initialSignDistanceToMatch = matchDim[1]/7
if i+1 in lastBurnedMatch:
matchS.append(pygame.image.load(
mainDir + "/" + "match-burned.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
initialSignDistanceToMatch = matchDim[1]/24
if i >= (currentNumberOfMatch - 3):
matchS.append(pygame.image.load(
mainDir + "/" + "match-allowed.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match.png").convert_alpha())
else:
matchS.append(pygame.image.load(
mainDir + "/" + "match-void.png").convert_alpha())
matchLeftVoid = 0
if i != 0:
matchLeftVoid = matchDim[0] / 2
currentMatchPos = [secondMatchAreaPos[
0] + i * (matchLeftVoid + matchDim[0]), secondMatchAreaPos[1]]
matchS[i] = pygame.transform.scale(
matchS[i], (matchDim[0], matchDim[1]))
screen.blit(
matchS[i], (currentMatchPos[0], currentMatchPos[1]))
if i == 0:
initialSignPos = [0,0]
initialSignPos[1] = currentMatchPos[1] - initialSignDistanceToMatch
if wtw == "ttl":
initialSign = pygame.image.load(mainDir + "/" + "crown.png").convert_alpha()
if wtw == "ltl":
initialSign = pygame.image.load(mainDir + "/" + "skull.png").convert_alpha()
initialSignSize = initialSign.get_rect().size
initialSignSize = [int(initialSignSize[0]/matchRessizing),int(initialSignSize[1]/matchRessizing)]
initialSign = pygame.transform.scale(initialSign, (initialSignSize[0], initialSignSize[1]))
initialSignPos[0] = (currentMatchPos[0]+(matchDim[0]/2)) - (initialSignSize[0]/2)
screen.blit(initialSign, (initialSignPos[0], initialSignPos[1]))
i = i + 1
indicatorFont = pygame.font.SysFont("monospace", 34)
indicatorTextContent = str(
currentNumberOfMatch) + "/" + str(numberOfInitialMatch)
indicatorText = indicatorFont.render(
indicatorTextContent, 1, (255, 255, 255))
indicatorTextInformation.width, indicatorTextInformation.height = indicatorFont.size(
indicatorTextContent)
indicatorTextInformation.x = indicatorPosition[
0] + (indicatorDim[0] - indicatorTextInformation.width) / 2
indicatorTextInformation.y = indicatorPosition[1] + 5
screen.blit(indicatorText, (indicatorTextInformation.x,
indicatorTextInformation.y))
if finalNormalUserInput:
getFromAnalysis = trivialAnalysis(
currentNumberOfMatch, numberOfInitialMatch, wtw, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
userPlayed = getFromAnalysis[2]
listOfTry.append(userPlayed)
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playTrivial(
currentNumberOfMatch - userPlayed,wtw)
listOfTry.append(computerPlayed)
currentNumberOfMatch = currentNumberOfMatch - userPlayed
if ((currentNumberOfMatch == 0) and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = True
else:
currentNumberOfMatch = currentNumberOfMatch - computerPlayed
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
winer = False
numberOfMatchDelled = numberOfInitialMatch - currentNumberOfMatch
if (currentNumberOfMatch == 0 and (wtw == "ttl")) or ((currentNumberOfMatch == 1) and (wtw == "ltl")):
weHaveAWiner = True
timeOfEndOfGame = int(time.time()) - beginingOfGame
else:
print("we have a winer")
timeOfEndOfGame = int(time.time()) - beginingOfGame
if textToanalyse in allowedEntry:
normalTextZone = myfont.render(
"".join(textToanalyse), 1, (255, 255, 255))
screen.blit(normalTextZone, (100, 100))
makeTextZone("Trivial", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = myfont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = myfont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
chMatrix:
if row != 0:
columnWithMatch.append(i)
i=i+1
if wtw == "ttl":
if len(columnWithMatch)==1:
winingColumn=columnWithMatch
else:
winingColumn=False
elif wtw == "ltl":
if (len(columnWithMatch)==1) and (matchMatrix[columnWithMatch[0]] > 1):
winingColumn=columnWithMatch
elif (len(columnWithMatch) == 2 ) and (matchMatrix[columnWithMatch[0]] == 1) and (matchMatrix[columnWithMatch[1]] == 1):
winingColumn=columnWithMatch
else:
winingColumn=False
else:
winingColumn=False
return winingColumn
def getNimSum(matchMatrix):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = [0] * numberOfLines
i=0
for column in matchMatrix:
j=0
while j < column:
lineSums[j]=lineSums[j]+1
j=j+1
i=i+1
return lineSums
def playMarienbad(matchMatrix,wtw):
columns = len(matchMatrix)
numberOfLines = int((columns+1)/2)
lineSums = getNimSum(matchMatrix)
allowdedColumnToPlay = []
i=0
for column in matchMatrix:
if column > 0:
allowdedColumnToPlay.append(i)
i=i+1
lineSumsBinari = calculateLineSumsBinari(lineSums)
print(lineSumsBinari)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
print(listOfDigits)
itIsPossibleToWin = False
for aDigit in listOfDigits:
if (int(aDigit)%2 == 1):
itIsPossibleToWin = True
matchLineContainingOdd = None
if itIsPossibleToWin == False:
columnToPlay = random.sample(allowdedColumnToPlay, 1)[0]
maxNumberInTheColumn=matchMatrix[columnToPlay]
numberOfMatchToPlay = random.randint(1,maxNumberInTheColumn)
whatComputerWillPlay = [columnToPlay,numberOfMatchToPlay]
columnToPlay = whatComputerWillPlay
else:
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
matchLineContainingOdd = marienbadWitchMatchLineContainOdd(matchMatrix)
columnToPlay = matchLineContainingOdd
return columnToPlay
def marienbadWitchColumnIsOdd(listOfDigits):
for i in range(len(listOfDigits)):
aDigit = listOfDigits[i]
if (int(aDigit)%2 == 1):
return i
def calculateLineSumsBinari(lineSums):
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
return lineSumsBinari
def marienbadWitchMatchLineContainOdd(matchMatrix):
lineSums = getNimSum(matchMatrix)
lineSumsBinari = calculateLineSumsBinari(lineSums)
finalSum = sum(lineSumsBinari)
listOfDigits=list(str(finalSum))
theSumColumnContainingTheOddDigit = marienbadWitchColumnIsOdd(listOfDigits)
lineSumsBinari = []
i = 0
for decimalNum in lineSums:
lineSumsBinari.append(int("{0:b}".format(decimalNum)))
i = 0
maxLen = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
if tempLen > maxLen:
maxLen = tempLen
i=i+1
i = 0
for binaryNum in lineSumsBinari:
tempLen = len(str(binaryNum))
howZeroToAdd = maxLen - tempLen
if howZeroToAdd > 0:
for j in range(1,howZeroToAdd+1):
lineSumsBinari[i] = "0" + str(lineSumsBinari[i])
else:
lineSumsBinari[i] = str(lineSumsBinari[i])
i=i+1
octetsOfDesiredColumn = []
i = 0
for binaryNum in lineSumsBinari:
extractedOctet = list(str(binaryNum))[theSumColumnContainingTheOddDigit]
octetsOfDesiredColumn.append(extractedOctet)
i=i+1
i = 0
linesImpliyingOdd = []
for i in range(0,len(octetsOfDesiredColumn)):
if octetsOfDesiredColumn[i] == "1":
linesImpliyingOdd.append(i)
i=i+1
higherMatchLine = linesImpliyingOdd[-1]
i = 0
for match in matchMatrix:
if match == higherMatchLine:
theColumn=i
i=i+1
print("matchMatrix: " + str(matchMatrix))
print("lineSums: " + str(lineSums))
print("higherMatchLine: " + str(higherMatchLine))
print("Là ↓")
print(theColumn)
return(theColumn)
def marienbadAnalysis(matchMatrix, userInput):
columns = len(matchMatrix)
numberOfLines = 2 * (columns+1)
allowedColumns = range(columns)
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
continueFunction = False
for column in matchMatrix:
if (column != 0) and (continueFunction == False) :
continueFunction = True
if (continueFunction == True):
numberOfMatchsToDel = 0
syntaxToTestImputValidity = "^ *([0-9]+) *(=|-) *([0-9]+) *$"
if re.match(syntaxToTestImputValidity, userInput) is not None:
print("True")
syntaxToExtractOptions = "^ *(?P<column>[0-9]+) *(?P<operator>(=|-)) *(?P<numberOfMatchUsed>[0-9]+) *$"
deletingMatchOparation = re.match(syntaxToExtractOptions,userInput)
columnToDelOnIt = int(deletingMatchOparation.group("column"))
numberOfMatchUsed = int(deletingMatchOparation.group("numberOfMatchUsed"))
delletingOperator = deletingMatchOparation.group("operator")
if (columnToDelOnIt in allowedColumns) :
if (numberOfMatchUsed != 0) or (delletingOperator != "-"):
if (delletingOperator == "=") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = matchMatrix[columnToDelOnIt]-numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not set a number higher than content."]
elif (delletingOperator == "-") :
if (numberOfMatchUsed <= matchMatrix[columnToDelOnIt]):
numberOfMatchsToDel = numberOfMatchUsed
matchMatrix[columnToDelOnIt] = matchMatrix[columnToDelOnIt]-numberOfMatchsToDel
answer = [True, matchMatrix, str(columnToDelOnIt) + "-" + str(numberOfMatchsToDel)]
else:
answer = [False, "You can not use a number higher than content."]
else:
answer = [False, "You can not del no match!"]
else:
answer = [False, "“" + str(deletingMatchOparation.group("column")) + "” is not in valid range."]
else:
answer = [False, "“" + userInput + "” is not a valid syntax."]
else:
answer = [False, 0]
return answer
def marienbad(numberOfLines, wtw, screen):
global programHaveToContinue
global textUserInput
global normalUserInput
global exMode
global normalMode
global textToAnalyse
global normalTextToAnalyse
global finalNormalUserInput
global historyAreaWidth
maximumMatchMatrix = marienbadInitialColumns(numberOfLines)
currentMatchMatrix = copy.deepcopy(maximumMatchMatrix)
numberOfColumns = numberOfLines*2 - 1
beginingOfGame = int(time.time())
listOfTry = []
functionHaveToContinue = True
errorToDisplay = False
weHaveAWiner = False
winer = None
while functionHaveToContinue and programHaveToContinue and (weHaveAWiner == False):
userPlayed = 0
computerPlayed = 0
if weHaveAWiner == False:
functionHaveToContinue, textToanalyse = analyseTyping("marienbad", numberOfLines, wtw)
if textToanalyse["mode"] == "pause":
print("In pause")
beginingOfGame = makeAPause("Marienbad", numberOfInitialMatch, wtw, beginingOfGame)
xSize, ySize = screen.get_size()
gameAreaDim[0] = xSize - historyAreaWidth
tempImageMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
gameAreaInfo = surfaceInformations()
realGameAreaInfo = surfaceInformations()
matchInfo = surfaceInformations()
maxMatchInfo = surfaceInformations()
matchAreaInfo = surfaceInformations()
normalTextInformation = surfaceInformations()
wtwZoneInfo = surfaceInformations()
columnNumberInfo = surfaceInformations()
matchHorizontalSeparation = 0
matchInfo.top = 10
realGameAreaInfo.top = 20
realGameAreaInfo.bottom = 30
realGameAreaInfo.left = 30
realGameAreaInfo.right = 30
realGameAreaInfo.height = ySize - textZoneHeigh - realGameAreaInfo.top - realGameAreaInfo.bottom
realGameAreaInfo.width = xSize - historyAreaWidth - realGameAreaInfo.left - realGameAreaInfo.right
maxMatchInfo.width, maxMatchInfo.height = tempImageMatch.get_rect().size
matchInfo.height = realGameAreaInfo.height / (numberOfLines*1.2)
matchInfo.top = matchInfo.height*0.2
if matchInfo.height >= maxMatchInfo.height:
matchInfo.height = maxMatchInfo.height
matchInfo.width = maxMatchInfo.width
else:
matchInfo.width = matchInfo.height / matchPicRatio
matchHorizontalSeparation = (realGameAreaInfo.width - (matchInfo.width*numberOfColumns)) / (numberOfColumns-1)
if matchHorizontalSeparation > matchInfo.height*0.66:
matchHorizontalSeparation = matchInfo.height*0.66
matchAreaInfo.width = matchInfo.width*numberOfColumns + (numberOfColumns-1)*matchHorizontalSeparation
realGameAreaInfo.x = historyAreaWidth + realGameAreaInfo.left + (realGameAreaInfo.width-matchAreaInfo.width)/2
matchAreaInfo.height = matchInfo.height*numberOfLines + (numberOfLines-1)*matchInfo.top
realGameAreaInfo.y = realGameAreaInfo.top + (realGameAreaInfo.height-matchAreaInfo.height)/2
matchPositions = []
i = 0
for numberOfMatchInAColumn in maximumMatchMatrix:
j = 0
matchPositions.append([])
cumuledX = matchInfo.width + matchHorizontalSeparation
while j < numberOfMatchInAColumn:
matchPositions[i].append(surfaceInformations())
cumuledY = matchInfo.height + matchInfo.top
matchPositions[i][j].x = realGameAreaInfo.x + i*cumuledX
matchPositions[i][j].y = ySize-textZoneHeigh - realGameAreaInfo.y - (j+1)*cumuledY
j=j+1
i = i+1
screen.fill(background_colour)
printMarienbadListOfTry(screen, listOfTry)
if finalNormalUserInput:
getFromAnalysis = marienbadAnalysis(currentMatchMatrix, finalNormalUserInput)
finalNormalUserInput = False
if getFromAnalysis[0] == True:
currentMatchMatrix = getFromAnalysis[1]
listOfTry.append(getFromAnalysis[2])
else:
errorToDisplay = getFromAnalysis[1]
if getFromAnalysis[0] == True:
computerPlayed = playMarienbad(currentMatchMatrix,wtw)
listOfTry.append(str(computerPlayed) + "-" + "1")
currentMatchMatrix[computerPlayed] = currentMatchMatrix[computerPlayed]-1
winingColumn = marienbadIsItAWinerSituation(currentMatchMatrix, wtw)
columnNumberFont = pygame.font.SysFont("monospace", 18, bold=True)
i = 0
for column in matchPositions:
j = 0
for match in column:
if (currentMatchMatrix[i] < maximumMatchMatrix[i]) and (j+1 > currentMatchMatrix[i]):
visualMatch = pygame.image.load(mainDir + "/" + "match-void.png").convert_alpha()
else:
if winingColumn:
visualMatch = pygame.image.load(mainDir + "/" + "match-burned.png").convert_alpha()
else:
visualMatch = pygame.image.load(mainDir + "/" + "match.png").convert_alpha()
visualMatch = pygame.transform.scale(visualMatch, (int(matchInfo.width), int(matchInfo.height)))
screen.blit(visualMatch, (match.x, match.y))
j=j+1
columnNumberImage = columnNumberFont.render(str(i), 1, (0, 0,0))
columnNumberInfo.width, columnNumberInfo.height = columnNumberImage.get_size()
columnNumberInfo.x = column[0].x + (column[0].width/2) - (columnNumberInfo.width/2)
screen.blit(columnNumberImage, (columnNumberInfo.x, column[0].y+matchInfo.height+12))
i = i+1
makeTextZone("Marienbad", None)
timeZoneWidth = makeTimetZone(beginingOfGame)
wtwZoneWidth = showVariant(screen, wtw, timeZoneWidth)
normalFont = pygame.font.SysFont("monospace", 14)
if textToanalyse["mode"] == "normal":
errorToDisplay = False
normalText = normalFont.render(
"".join(textToanalyse["content"]), 1, (255, 255, 255))
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
if errorToDisplay != False:
normalText = normalFont.render(errorToDisplay, 1, red)
normalTextInformation.width, normalTextInformation.height = normalText.get_size()
normalTextInformation.x = xSize - normalTextInformation.width - 5 - wtwZoneWidth - timeZoneWidth
normalTextInformation.y = ySize - textZoneHeigh
screen.blit(normalText, (normalTextInformation.x,
normalTextInformation.y))
makeTextZone("Marienbad", None)
number = generalState.number
if wtw not in [0, None, ""]:
wtw = generalState.wtw
if variant == "trivial":
trivial(number, wtw, screen)
elif variant == "marienbad":
marienbad(number, wtw, screen)
main("trivial", numberOfInitialMatch, "ttl")
| true | true |
f7144816b85989a438082526f2dc145b5a22fa38 | 470 | py | Python | animalid/random_id.py | Alphadelta14/animalid | 0b97a84ead34be2de623de1258aae16c4e8d83d2 | [
"MIT"
] | 5 | 2016-12-15T14:56:15.000Z | 2022-02-15T13:32:33.000Z | animalid/random_id.py | Alphadelta14/animalid | 0b97a84ead34be2de623de1258aae16c4e8d83d2 | [
"MIT"
] | 1 | 2016-10-06T17:37:39.000Z | 2016-10-06T17:37:39.000Z | animalid/random_id.py | Alphadelta14/animalid | 0b97a84ead34be2de623de1258aae16c4e8d83d2 | [
"MIT"
] | 1 | 2020-12-10T16:05:29.000Z | 2020-12-10T16:05:29.000Z | """Where the magic happens."""
import random
from animalid import alloys, animals, colors, fabrics, opinions, origins, shapes, sizes
FIRST_ADJECTIVES = opinions + shapes + sizes
SECOND_ADJECTIVES = alloys + colors + fabrics + origins
def generate_animal_id():
"""What it's all about."""
return "_".join(
[
random.choice(FIRST_ADJECTIVES),
random.choice(SECOND_ADJECTIVES),
random.choice(animals),
]
)
| 23.5 | 87 | 0.644681 | import random
from animalid import alloys, animals, colors, fabrics, opinions, origins, shapes, sizes
FIRST_ADJECTIVES = opinions + shapes + sizes
SECOND_ADJECTIVES = alloys + colors + fabrics + origins
def generate_animal_id():
return "_".join(
[
random.choice(FIRST_ADJECTIVES),
random.choice(SECOND_ADJECTIVES),
random.choice(animals),
]
)
| true | true |
f71448ef2c575cc60ec1ec5c6f6dc91a3603fb16 | 6,237 | py | Python | rcnn/lib/python3.6/site-packages/sphinx/make_mode.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | 1 | 2019-01-12T13:17:32.000Z | 2019-01-12T13:17:32.000Z | rcnn/lib/python3.6/site-packages/sphinx/make_mode.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | rcnn/lib/python3.6/site-packages/sphinx/make_mode.py | dreamingweaver/making_passportImage | 68f23411780ff82abe934dfae5fc04acb80f2c49 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
sphinx.make_mode
~~~~~~~~~~~~~~~~
sphinx-build -M command-line handling.
This replaces the old, platform-dependent and once-generated content
of Makefile / make.bat.
This is in its own module so that importing it is fast. It should not
import the main Sphinx modules (like sphinx.applications, sphinx.builders).
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import subprocess
import sys
from os import path
import sphinx
from sphinx import cmdline
from sphinx.util.console import color_terminal, nocolor, bold, blue # type: ignore
from sphinx.util.osutil import cd, rmtree
if False:
# For type annotation
from typing import List # NOQA
proj_name = os.getenv('SPHINXPROJ', '<project>')
BUILDERS = [
("", "html", "to make standalone HTML files"),
("", "dirhtml", "to make HTML files named index.html in directories"),
("", "singlehtml", "to make a single large HTML file"),
("", "pickle", "to make pickle files"),
("", "json", "to make JSON files"),
("", "htmlhelp", "to make HTML files and an HTML help project"),
("", "qthelp", "to make HTML files and a qthelp project"),
("", "devhelp", "to make HTML files and a Devhelp project"),
("", "epub", "to make an epub"),
("", "latex", "to make LaTeX files, you can set PAPER=a4 or PAPER=letter"),
("posix", "latexpdf", "to make LaTeX and PDF files (default pdflatex)"),
("posix", "latexpdfja", "to make LaTeX files and run them through platex/dvipdfmx"),
("", "text", "to make text files"),
("", "man", "to make manual pages"),
("", "texinfo", "to make Texinfo files"),
("posix", "info", "to make Texinfo files and run them through makeinfo"),
("", "gettext", "to make PO message catalogs"),
("", "changes", "to make an overview of all changed/added/deprecated items"),
("", "xml", "to make Docutils-native XML files"),
("", "pseudoxml", "to make pseudoxml-XML files for display purposes"),
("", "linkcheck", "to check all external links for integrity"),
("", "doctest", "to run all doctests embedded in the documentation "
"(if enabled)"),
("", "coverage", "to run coverage check of the documentation (if enabled)"),
]
class Make(object):
def __init__(self, srcdir, builddir, opts):
# type: (unicode, unicode, List[unicode]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
# type: (unicode) -> unicode
return path.join(self.builddir, *comps)
def build_clean(self):
# type: () -> int
if not path.exists(self.builddir):
return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
return 0
def build_help(self):
# type: () -> None
if not color_terminal():
nocolor()
print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def build_latexpdf(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_latexpdfja(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf-ja'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_info(self):
# type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
try:
with cd(self.builddir_join('texinfo')):
return subprocess.call([self.makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_gettext(self):
# type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
opts.extend(['-D', 'latex_elements.papersize=' + papersize + 'paper'])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
return cmdline.main(args + opts)
def run_make_mode(args):
# type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
return 1
make = Make(args[1], args[2], args[3:])
run_method = 'build_' + args[0]
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(args[0])
| 37.125 | 106 | 0.568062 |
from __future__ import print_function
import os
import subprocess
import sys
from os import path
import sphinx
from sphinx import cmdline
from sphinx.util.console import color_terminal, nocolor, bold, blue
from sphinx.util.osutil import cd, rmtree
if False:
from typing import List
proj_name = os.getenv('SPHINXPROJ', '<project>')
BUILDERS = [
("", "html", "to make standalone HTML files"),
("", "dirhtml", "to make HTML files named index.html in directories"),
("", "singlehtml", "to make a single large HTML file"),
("", "pickle", "to make pickle files"),
("", "json", "to make JSON files"),
("", "htmlhelp", "to make HTML files and an HTML help project"),
("", "qthelp", "to make HTML files and a qthelp project"),
("", "devhelp", "to make HTML files and a Devhelp project"),
("", "epub", "to make an epub"),
("", "latex", "to make LaTeX files, you can set PAPER=a4 or PAPER=letter"),
("posix", "latexpdf", "to make LaTeX and PDF files (default pdflatex)"),
("posix", "latexpdfja", "to make LaTeX files and run them through platex/dvipdfmx"),
("", "text", "to make text files"),
("", "man", "to make manual pages"),
("", "texinfo", "to make Texinfo files"),
("posix", "info", "to make Texinfo files and run them through makeinfo"),
("", "gettext", "to make PO message catalogs"),
("", "changes", "to make an overview of all changed/added/deprecated items"),
("", "xml", "to make Docutils-native XML files"),
("", "pseudoxml", "to make pseudoxml-XML files for display purposes"),
("", "linkcheck", "to check all external links for integrity"),
("", "doctest", "to run all doctests embedded in the documentation "
"(if enabled)"),
("", "coverage", "to run coverage check of the documentation (if enabled)"),
]
class Make(object):
def __init__(self, srcdir, builddir, opts):
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make')
def builddir_join(self, *comps):
return path.join(self.builddir, *comps)
def build_clean(self):
if not path.exists(self.builddir):
return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
return 0
def build_help(self):
if not color_terminal():
nocolor()
print(bold("Sphinx v%s" % sphinx.__display_version__))
print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def build_latexpdf(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_latexpdfja(self):
# type: () -> int
if self.run_generic_build('latex') > 0:
return 1
try:
with cd(self.builddir_join('latex')):
return subprocess.call([self.makecmd, 'all-pdf-ja'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_info(self):
# type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
try:
with cd(self.builddir_join('texinfo')):
return subprocess.call([self.makecmd, 'info'])
except OSError:
print('Error: Failed to run: %s' % self.makecmd)
return 1
def build_gettext(self):
# type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
return 0
def run_generic_build(self, builder, doctreedir=None):
# type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
opts.extend(['-D', 'latex_elements.papersize=' + papersize + 'paper'])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
return cmdline.main(args + opts)
def run_make_mode(args):
# type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
return 1
make = Make(args[1], args[2], args[3:])
run_method = 'build_' + args[0]
if hasattr(make, run_method):
return getattr(make, run_method)()
return make.run_generic_build(args[0])
| true | true |
f7144910af197a90e026161df30c5200d7a0dd17 | 1,623 | py | Python | greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py | dhwalters423/iot-reference-architectures | cb966fec51b73c4403744b0e8a6060f05fe92013 | [
"MIT-0"
] | 1 | 2022-01-20T12:26:42.000Z | 2022-01-20T12:26:42.000Z | greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py | dhwalters423/iot-reference-architectures | cb966fec51b73c4403744b0e8a6060f05fe92013 | [
"MIT-0"
] | null | null | null | greengrass-v2/poll-api/artifacts/com.greengrass.FakeApi/1.0.0/app.py | dhwalters423/iot-reference-architectures | cb966fec51b73c4403744b0e8a6060f05fe92013 | [
"MIT-0"
] | null | null | null | #!/usr/bin/env python3
import json
import time
from random import gauss
from flask import Flask
number_of_devices = 10
number_of_values_per_second = 2
last_request = None
app = Flask(__name__)
@app.route('/')
def index():
return 'Server is running'
def get_time_ms():
return int(time.time() * 1000)
def generate_one_device(device_number, number_of_values, time_between_values):
temp_data = []
now = get_time_ms()
for i in range(number_of_values):
value = gauss(5, 2)
temp_data.append(
[int(now - gauss(1000 * time_between_values, 500)), "datum", str(value), value, 0])
return {f"device_{device_number}": temp_data}
@app.route('/data')
def data():
global last_request
now = get_time_ms()
if last_request is None:
last_request = get_time_ms() - 10000
# Generate the desired number of values per second
number_of_values = int((now - last_request) / 1000 * number_of_values_per_second)
if number_of_values == 0:
return json.dumps({})
last_request = now
temp_data = {}
for i in range(number_of_devices):
temp_data.update(generate_one_device(i, number_of_values, 1))
return json.dumps({"device_data": {
"descriptions": [
"timestamp",
"name",
"text_value",
"numeric_value",
"source"
],
"points": temp_data
}
})
| 23.521739 | 95 | 0.557609 |
import json
import time
from random import gauss
from flask import Flask
number_of_devices = 10
number_of_values_per_second = 2
last_request = None
app = Flask(__name__)
@app.route('/')
def index():
return 'Server is running'
def get_time_ms():
return int(time.time() * 1000)
def generate_one_device(device_number, number_of_values, time_between_values):
temp_data = []
now = get_time_ms()
for i in range(number_of_values):
value = gauss(5, 2)
temp_data.append(
[int(now - gauss(1000 * time_between_values, 500)), "datum", str(value), value, 0])
return {f"device_{device_number}": temp_data}
@app.route('/data')
def data():
global last_request
now = get_time_ms()
if last_request is None:
last_request = get_time_ms() - 10000
number_of_values = int((now - last_request) / 1000 * number_of_values_per_second)
if number_of_values == 0:
return json.dumps({})
last_request = now
temp_data = {}
for i in range(number_of_devices):
temp_data.update(generate_one_device(i, number_of_values, 1))
return json.dumps({"device_data": {
"descriptions": [
"timestamp",
"name",
"text_value",
"numeric_value",
"source"
],
"points": temp_data
}
})
| true | true |
f714494bfe0ecac22c74155fa6c7a76f477af690 | 2,430 | py | Python | src/stats/intro_stats.py | JacobEkedahl/detect-intros-from-video | 9b2bac1c7209558711072f967a3359d2ca698cd4 | [
"MIT"
] | 5 | 2020-06-05T05:10:25.000Z | 2022-03-10T05:12:14.000Z | src/stats/intro_stats.py | JacobEkedahl/detect-intros-from-video | 9b2bac1c7209558711072f967a3359d2ca698cd4 | [
"MIT"
] | null | null | null | src/stats/intro_stats.py | JacobEkedahl/detect-intros-from-video | 9b2bac1c7209558711072f967a3359d2ca698cd4 | [
"MIT"
] | 3 | 2020-06-06T13:21:23.000Z | 2021-03-08T22:24:18.000Z | import matplotlib.pyplot as plt
import utils.extractor as extractor
import utils.file_handler as file_handler
import utils.time_handler as time_handler
def plot_intros():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = map(get_start_time_seconds, only_valid_intros)
y_data = map(get_size_from_intro, only_valid_intros)
# naming the x axis
plt.xlabel('Start time of intro (Seconds)')
# naming the y axis
plt.ylabel('Length of intro (Seconds)')
plt.grid(True)
plt.scatter(list(x_data), list(y_data))
plt.show()
def plot_hist_sizes():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_size_from_intro, only_valid_intros))
plt.xlabel('Length of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=40)
plt.show()
def plot_hist_frequency():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_start_time_seconds, only_valid_intros))
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=60)
plt.show()
def plot_all_intros():
x_titles = ['Start time of intro (Seconds)', 'Length of intro (Seconds)']
y_title = 'Frequency'
titles = ['Start times of intros','Lengths of intros']
colors = ['blue', 'blue']
bins = [60, 40]
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_size = list(map(get_size_from_intro, only_valid_intros))
x_start = list(map(get_start_time_seconds, only_valid_intros))
x_data = [x_start, x_size]
fig, axs = plt.subplots(1, 2)
axs = axs.ravel()
for idx, ax in enumerate(axs):
ax.hist(x_data[idx], bins=bins[idx], fc=colors[idx])
# ax.set_title(titles[idx])
ax.set_xlabel(x_titles[idx])
ax.set_ylabel(y_title)
ax.grid()
plt.tight_layout()
plt.show()
def get_size_from_intro(intro):
start = time_handler.timestamp(intro["start"]) / 1000
end = time_handler.timestamp(intro["end"]) / 1000
return abs(start - end)
def get_start_time_seconds(intro):
return time_handler.timestamp(intro["start"]) / 1000
| 33.75 | 77 | 0.676955 | import matplotlib.pyplot as plt
import utils.extractor as extractor
import utils.file_handler as file_handler
import utils.time_handler as time_handler
def plot_intros():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = map(get_start_time_seconds, only_valid_intros)
y_data = map(get_size_from_intro, only_valid_intros)
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Length of intro (Seconds)')
plt.grid(True)
plt.scatter(list(x_data), list(y_data))
plt.show()
def plot_hist_sizes():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_size_from_intro, only_valid_intros))
plt.xlabel('Length of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=40)
plt.show()
def plot_hist_frequency():
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_data = list(map(get_start_time_seconds, only_valid_intros))
plt.xlabel('Start time of intro (Seconds)')
plt.ylabel('Frequency')
plt.grid(True)
plt.hist(x_data, bins=60)
plt.show()
def plot_all_intros():
x_titles = ['Start time of intro (Seconds)', 'Length of intro (Seconds)']
y_title = 'Frequency'
titles = ['Start times of intros','Lengths of intros']
colors = ['blue', 'blue']
bins = [60, 40]
intros = extractor.get_intros_from_data()
only_valid_intros = [x for x in intros if not x["end"] == "00:00:00"]
x_size = list(map(get_size_from_intro, only_valid_intros))
x_start = list(map(get_start_time_seconds, only_valid_intros))
x_data = [x_start, x_size]
fig, axs = plt.subplots(1, 2)
axs = axs.ravel()
for idx, ax in enumerate(axs):
ax.hist(x_data[idx], bins=bins[idx], fc=colors[idx])
ax.set_xlabel(x_titles[idx])
ax.set_ylabel(y_title)
ax.grid()
plt.tight_layout()
plt.show()
def get_size_from_intro(intro):
start = time_handler.timestamp(intro["start"]) / 1000
end = time_handler.timestamp(intro["end"]) / 1000
return abs(start - end)
def get_start_time_seconds(intro):
return time_handler.timestamp(intro["start"]) / 1000
| true | true |
f7144a1dcf2877b5b9556bdaf5f3fa28830fe1f3 | 930 | py | Python | perfkitbenchmarker/linux_packages/libpng.py | xiaolihope/PerfKitBenchmarker-1.7.0 | 7699b1073a80d7a92fd3db93da742b93a2ecf900 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/libpng.py | xiaolihope/PerfKitBenchmarker-1.7.0 | 7699b1073a80d7a92fd3db93da742b93a2ecf900 | [
"Apache-2.0"
] | null | null | null | perfkitbenchmarker/linux_packages/libpng.py | xiaolihope/PerfKitBenchmarker-1.7.0 | 7699b1073a80d7a92fd3db93da742b93a2ecf900 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing libpng installation and cleanup functions."""
def YumInstall(vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng')
vm.InstallPackages('libpng-devel')
def AptInstall(vm):
"""Installs the libpng package on the VM."""
vm.InstallPackages('libpng3 libpng12-dev')
| 33.214286 | 74 | 0.754839 |
def YumInstall(vm):
vm.InstallPackages('libpng')
vm.InstallPackages('libpng-devel')
def AptInstall(vm):
vm.InstallPackages('libpng3 libpng12-dev')
| true | true |
f7144a6156a81fa3ee7667295196b9e059922910 | 1,860 | py | Python | tests/test_strings.py | idanmoradarthas/DataScienceUtils | be4806ebcb9ab0e2cdd189842227bd242f0c8910 | [
"MIT"
] | 19 | 2019-12-26T15:44:58.000Z | 2021-06-14T00:36:24.000Z | tests/test_strings.py | federicodecillia/DataScienceUtils | be4806ebcb9ab0e2cdd189842227bd242f0c8910 | [
"MIT"
] | 2 | 2019-12-06T12:32:41.000Z | 2020-11-27T11:54:15.000Z | tests/test_strings.py | federicodecillia/DataScienceUtils | be4806ebcb9ab0e2cdd189842227bd242f0c8910 | [
"MIT"
] | 3 | 2021-01-16T09:08:15.000Z | 2021-01-29T10:57:11.000Z | import pandas
from ds_utils.strings import append_tags_to_frame, extract_significant_terms_from_subset
def test_append_tags_to_frame():
x_train = pandas.DataFrame([{"article_name": "1", "article_tags": "ds,ml,dl"},
{"article_name": "2", "article_tags": "ds,ml"}])
x_test = pandas.DataFrame([{"article_name": "3", "article_tags": "ds,ml,py"}])
x_train_expected = pandas.DataFrame([{"article_name": "1", "tag_ds": 1, "tag_ml": 1, "tag_dl": 1},
{"article_name": "2", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_test_expected = pandas.DataFrame([{"article_name": "3", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_train_with_tags, x_test_with_tags = append_tags_to_frame(x_train, x_test, "article_tags", "tag_")
pandas.testing.assert_frame_equal(x_train_expected, x_train_with_tags, check_like=True)
pandas.testing.assert_frame_equal(x_test_expected, x_test_with_tags, check_like=True)
def test_significant_terms():
corpus = ['This is the first document.', 'This document is the second document.', 'And this is the third one.',
'Is this the first document?']
data_frame = pandas.DataFrame(corpus, columns=["content"])
subset_data_frame = data_frame[data_frame.index > 1]
terms = extract_significant_terms_from_subset(data_frame, subset_data_frame, "content")
expected = pandas.Series(
[1.0, 1.0, 1.0, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.5, 0.25, 0.0],
index=['third', 'one', 'and', 'this', 'the', 'is', 'first', 'document', 'second'])
pandas.testing.assert_series_equal(expected, terms)
| 54.705882 | 115 | 0.64086 | import pandas
from ds_utils.strings import append_tags_to_frame, extract_significant_terms_from_subset
def test_append_tags_to_frame():
x_train = pandas.DataFrame([{"article_name": "1", "article_tags": "ds,ml,dl"},
{"article_name": "2", "article_tags": "ds,ml"}])
x_test = pandas.DataFrame([{"article_name": "3", "article_tags": "ds,ml,py"}])
x_train_expected = pandas.DataFrame([{"article_name": "1", "tag_ds": 1, "tag_ml": 1, "tag_dl": 1},
{"article_name": "2", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_test_expected = pandas.DataFrame([{"article_name": "3", "tag_ds": 1, "tag_ml": 1, "tag_dl": 0}],
columns=["article_name", "tag_dl", "tag_ds", "tag_ml"])
x_train_with_tags, x_test_with_tags = append_tags_to_frame(x_train, x_test, "article_tags", "tag_")
pandas.testing.assert_frame_equal(x_train_expected, x_train_with_tags, check_like=True)
pandas.testing.assert_frame_equal(x_test_expected, x_test_with_tags, check_like=True)
def test_significant_terms():
corpus = ['This is the first document.', 'This document is the second document.', 'And this is the third one.',
'Is this the first document?']
data_frame = pandas.DataFrame(corpus, columns=["content"])
subset_data_frame = data_frame[data_frame.index > 1]
terms = extract_significant_terms_from_subset(data_frame, subset_data_frame, "content")
expected = pandas.Series(
[1.0, 1.0, 1.0, 0.6666666666666666, 0.6666666666666666, 0.6666666666666666, 0.5, 0.25, 0.0],
index=['third', 'one', 'and', 'this', 'the', 'is', 'first', 'document', 'second'])
pandas.testing.assert_series_equal(expected, terms)
| true | true |
f7144b8fa809f715ad4be47d6e7cdd7ba4be43fd | 63 | py | Python | caravaggio_rest_api/haystack/__init__.py | xalperte/django-caravaggio-rest-api | 36fcdc6b77982fc7fd2462f2c8997911f14047c4 | [
"MIT"
] | null | null | null | caravaggio_rest_api/haystack/__init__.py | xalperte/django-caravaggio-rest-api | 36fcdc6b77982fc7fd2462f2c8997911f14047c4 | [
"MIT"
] | null | null | null | caravaggio_rest_api/haystack/__init__.py | xalperte/django-caravaggio-rest-api | 36fcdc6b77982fc7fd2462f2c8997911f14047c4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*
# Copyright (c) 2018 PreSeries Tech, SL
| 21 | 39 | 0.619048 | true | true | |
f7144c3823d0412a188fb793a469ea4fa0b57caf | 139 | py | Python | examples/container.py | hugovk/Cyberbrain | 3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed | [
"MIT"
] | 2,440 | 2019-09-21T04:21:55.000Z | 2022-03-30T09:47:47.000Z | examples/container.py | hugovk/Cyberbrain | 3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed | [
"MIT"
] | 103 | 2019-09-21T15:19:59.000Z | 2022-03-28T06:27:40.000Z | examples/container.py | hugovk/Cyberbrain | 3b3789a7c23edf95c7f7bac94c2d165e9aaf86ed | [
"MIT"
] | 162 | 2019-07-16T08:03:18.000Z | 2022-03-30T02:51:21.000Z | from cyberbrain import trace
@trace
def container():
x = list(range(1000))
return x
if __name__ == "__main__":
container()
| 11.583333 | 28 | 0.647482 | from cyberbrain import trace
@trace
def container():
x = list(range(1000))
return x
if __name__ == "__main__":
container()
| true | true |
f7144d1cbcf7cf787868c444942d133284af243b | 7,461 | py | Python | lib/kubernetes/client/models/v1_lease_spec.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 7 | 2019-12-21T00:14:14.000Z | 2021-03-11T14:51:37.000Z | lib/kubernetes/client/models/v1_lease_spec.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 29 | 2019-10-09T11:16:21.000Z | 2020-06-23T09:32:09.000Z | lib/kubernetes/client/models/v1_lease_spec.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 1 | 2021-05-07T10:13:31.000Z | 2021-05-07T10:13:31.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1LeaseSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'acquire_time': 'datetime',
'holder_identity': 'str',
'lease_duration_seconds': 'int',
'lease_transitions': 'int',
'renew_time': 'datetime'
}
attribute_map = {
'acquire_time': 'acquireTime',
'holder_identity': 'holderIdentity',
'lease_duration_seconds': 'leaseDurationSeconds',
'lease_transitions': 'leaseTransitions',
'renew_time': 'renewTime'
}
def __init__(self, acquire_time=None, holder_identity=None, lease_duration_seconds=None, lease_transitions=None, renew_time=None):
"""
V1LeaseSpec - a model defined in Swagger
"""
self._acquire_time = None
self._holder_identity = None
self._lease_duration_seconds = None
self._lease_transitions = None
self._renew_time = None
self.discriminator = None
if acquire_time is not None:
self.acquire_time = acquire_time
if holder_identity is not None:
self.holder_identity = holder_identity
if lease_duration_seconds is not None:
self.lease_duration_seconds = lease_duration_seconds
if lease_transitions is not None:
self.lease_transitions = lease_transitions
if renew_time is not None:
self.renew_time = renew_time
@property
def acquire_time(self):
"""
Gets the acquire_time of this V1LeaseSpec.
acquireTime is a time when the current lease was acquired.
:return: The acquire_time of this V1LeaseSpec.
:rtype: datetime
"""
return self._acquire_time
@acquire_time.setter
def acquire_time(self, acquire_time):
"""
Sets the acquire_time of this V1LeaseSpec.
acquireTime is a time when the current lease was acquired.
:param acquire_time: The acquire_time of this V1LeaseSpec.
:type: datetime
"""
self._acquire_time = acquire_time
@property
def holder_identity(self):
"""
Gets the holder_identity of this V1LeaseSpec.
holderIdentity contains the identity of the holder of a current lease.
:return: The holder_identity of this V1LeaseSpec.
:rtype: str
"""
return self._holder_identity
@holder_identity.setter
def holder_identity(self, holder_identity):
"""
Sets the holder_identity of this V1LeaseSpec.
holderIdentity contains the identity of the holder of a current lease.
:param holder_identity: The holder_identity of this V1LeaseSpec.
:type: str
"""
self._holder_identity = holder_identity
@property
def lease_duration_seconds(self):
"""
Gets the lease_duration_seconds of this V1LeaseSpec.
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:return: The lease_duration_seconds of this V1LeaseSpec.
:rtype: int
"""
return self._lease_duration_seconds
@lease_duration_seconds.setter
def lease_duration_seconds(self, lease_duration_seconds):
"""
Sets the lease_duration_seconds of this V1LeaseSpec.
leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.
:param lease_duration_seconds: The lease_duration_seconds of this V1LeaseSpec.
:type: int
"""
self._lease_duration_seconds = lease_duration_seconds
@property
def lease_transitions(self):
"""
Gets the lease_transitions of this V1LeaseSpec.
leaseTransitions is the number of transitions of a lease between holders.
:return: The lease_transitions of this V1LeaseSpec.
:rtype: int
"""
return self._lease_transitions
@lease_transitions.setter
def lease_transitions(self, lease_transitions):
"""
Sets the lease_transitions of this V1LeaseSpec.
leaseTransitions is the number of transitions of a lease between holders.
:param lease_transitions: The lease_transitions of this V1LeaseSpec.
:type: int
"""
self._lease_transitions = lease_transitions
@property
def renew_time(self):
"""
Gets the renew_time of this V1LeaseSpec.
renewTime is a time when the current holder of a lease has last updated the lease.
:return: The renew_time of this V1LeaseSpec.
:rtype: datetime
"""
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
"""
Sets the renew_time of this V1LeaseSpec.
renewTime is a time when the current holder of a lease has last updated the lease.
:param renew_time: The renew_time of this V1LeaseSpec.
:type: datetime
"""
self._renew_time = renew_time
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1LeaseSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.217573 | 162 | 0.600456 |
from pprint import pformat
from six import iteritems
import re
class V1LeaseSpec(object):
swagger_types = {
'acquire_time': 'datetime',
'holder_identity': 'str',
'lease_duration_seconds': 'int',
'lease_transitions': 'int',
'renew_time': 'datetime'
}
attribute_map = {
'acquire_time': 'acquireTime',
'holder_identity': 'holderIdentity',
'lease_duration_seconds': 'leaseDurationSeconds',
'lease_transitions': 'leaseTransitions',
'renew_time': 'renewTime'
}
def __init__(self, acquire_time=None, holder_identity=None, lease_duration_seconds=None, lease_transitions=None, renew_time=None):
self._acquire_time = None
self._holder_identity = None
self._lease_duration_seconds = None
self._lease_transitions = None
self._renew_time = None
self.discriminator = None
if acquire_time is not None:
self.acquire_time = acquire_time
if holder_identity is not None:
self.holder_identity = holder_identity
if lease_duration_seconds is not None:
self.lease_duration_seconds = lease_duration_seconds
if lease_transitions is not None:
self.lease_transitions = lease_transitions
if renew_time is not None:
self.renew_time = renew_time
@property
def acquire_time(self):
return self._acquire_time
@acquire_time.setter
def acquire_time(self, acquire_time):
self._acquire_time = acquire_time
@property
def holder_identity(self):
return self._holder_identity
@holder_identity.setter
def holder_identity(self, holder_identity):
self._holder_identity = holder_identity
@property
def lease_duration_seconds(self):
return self._lease_duration_seconds
@lease_duration_seconds.setter
def lease_duration_seconds(self, lease_duration_seconds):
self._lease_duration_seconds = lease_duration_seconds
@property
def lease_transitions(self):
return self._lease_transitions
@lease_transitions.setter
def lease_transitions(self, lease_transitions):
self._lease_transitions = lease_transitions
@property
def renew_time(self):
return self._renew_time
@renew_time.setter
def renew_time(self, renew_time):
self._renew_time = renew_time
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1LeaseSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f7144fc6d2b714a04c1490bde3b0de182a0d41aa | 874 | py | Python | setup.py | estanislaoledesma/genper | 5996b8bc199d8cecc74b7f6d03b67a4c356b4beb | [
"MIT"
] | 2 | 2021-09-24T20:10:40.000Z | 2021-12-23T21:03:16.000Z | setup.py | estanislaoledesma/genper | 5996b8bc199d8cecc74b7f6d03b67a4c356b4beb | [
"MIT"
] | 4 | 2021-09-24T19:25:38.000Z | 2021-12-22T00:49:07.000Z | setup.py | estanislaoledesma/genper | 5996b8bc199d8cecc74b7f6d03b67a4c356b4beb | [
"MIT"
] | null | null | null | # coding: utf-8
import os
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from setuptools import setup, find_packages
setup(
name = "genper",
version = "1.0.0",
author = "Estanislao Ledesma",
author_email = "estanislaomledesma@gmail.com",
description = ("Software de tomografía por microondas"),
license = "MIT",
keywords = "genper tomografía microondas",
packages = find_packages(),
long_description = read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
) | 31.214286 | 79 | 0.663616 |
import os
# README file and 2) it's easier to type in the README file than to put a raw
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
from setuptools import setup, find_packages
setup(
name = "genper",
version = "1.0.0",
author = "Estanislao Ledesma",
author_email = "estanislaomledesma@gmail.com",
description = ("Software de tomografía por microondas"),
license = "MIT",
keywords = "genper tomografía microondas",
packages = find_packages(),
long_description = read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
) | true | true |
f714504d98c7f4400644588df8c63bfbda6d348d | 6,903 | py | Python | spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null | spiketoolkit/validation/quality_metric_classes/parameter_dictionaries.py | seankmartin/spiketoolkit | 38261d95045b1cd689363579c10ab3aa0a1ab7c0 | [
"MIT"
] | null | null | null |
from collections import OrderedDict
recording_params_dict = OrderedDict([('apply_filter', True), ('freq_min',300.0), ('freq_max',6000.0)])
#Defining GUI Params
keys = list(recording_params_dict.keys())
types = [type(recording_params_dict[key]) for key in keys]
values = [recording_params_dict[key] for key in keys]
recording_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If True, apply filter"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "High-pass frequency"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Low-pass frequency"}]
feature_params_dict = OrderedDict([('max_spikes_per_unit',300), ('recompute_info',False), ('save_features_props',True)])
#Defining GUI Params
keys = list(feature_params_dict.keys())
types = [type(feature_params_dict[key]) for key in keys]
values = [feature_params_dict[key] for key in keys]
feature_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "The maximum number of spikes to extract per unit to compute features."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If True, will always re-extract waveforms."},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "If true, it will save the features in the sorting extractor."}]
amplitude_params_dict = OrderedDict([('amp_method',"absolute"), ('amp_peak',"both"), ('amp_frames_before',3), ('amp_frames_after',3)])
#Defining GUI Params
keys = list(amplitude_params_dict.keys())
types = [type(amplitude_params_dict[key]) for key in keys]
values = [amplitude_params_dict[key] for key in keys]
amplitude_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Frames before peak to compute amplitude"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Frames after peak to compute amplitude"}]
pca_scores_params_dict = OrderedDict([('n_comp',3), ('ms_before',1.0), ('ms_after',2.0), ('dtype',None), ('max_spikes_for_pca',100000)])
#Defining GUI Params
keys = list(pca_scores_params_dict.keys())
types = [type(pca_scores_params_dict[key]) for key in keys]
values = [pca_scores_params_dict[key] for key in keys]
pca_scores_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "n_compFeatures in template-gui format"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Time period in ms to cut waveforms before the spike events"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Time period in ms to cut waveforms after the spike events"},
{'name': keys[3], 'type': 'dtype', 'value': values[3], 'default': values[3], 'title': "The numpy dtype of the waveforms"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "The maximum number of spikes to use to compute PCA."}]
epoch_params_dict =OrderedDict([('epoch_tuples',None), ('epoch_names',None)])
def get_recording_params():
return recording_params_dict.copy()
def get_amplitude_params():
return amplitude_params_dict.copy()
def get_pca_scores_params():
return pca_scores_params_dict.copy()
def get_epoch_params():
return epoch_params_dict.copy()
def get_feature_params():
return feature_params_dict.copy()
def get_recording_gui_params():
return recording_gui_params.copy()
def get_amplitude_gui_params():
return amplitude_gui_params.copy()
def get_pca_scores_gui_params():
return pca_scores_gui_params.copy()
def get_feature_gui_params():
return feature_gui_params.copy()
def update_param_dicts(recording_params=None, amplitude_params=None,
pca_scores_params=None, epoch_params=None,
feature_params=None):
param_dicts = []
if recording_params is not None:
if not set(recording_params.keys()).issubset(
set(get_recording_params().keys())
):
raise ValueError("Improper parameter entered into the recording param dict.")
else:
recording_params = OrderedDict(get_recording_params(), **recording_params)
param_dicts.append(recording_params)
if amplitude_params is not None:
if not set(amplitude_params.keys()).issubset(
set(get_amplitude_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
amplitude_params = OrderedDict(get_amplitude_params(), **amplitude_params)
param_dicts.append(amplitude_params)
if pca_scores_params is not None:
if not set(pca_scores_params.keys()).issubset(
set(get_pca_scores_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
pca_scores_params = OrderedDict(get_pca_scores_params(), **pca_scores_params)
param_dicts.append(pca_scores_params)
if epoch_params is not None:
if not set(epoch_params.keys()).issubset(
set(get_epoch_params().keys())
):
raise ValueError("Improper parameter entered into the epoch params dict")
else:
epoch_params = OrderedDict(get_epoch_params(), **epoch_params)
param_dicts.append(epoch_params)
if feature_params is not None:
if not set(feature_params.keys()).issubset(
set(get_feature_params().keys())
):
raise ValueError("Improper parameter entered into the feature param dict.")
else:
feature_params = OrderedDict(get_feature_params(), **feature_params)
param_dicts.append(feature_params)
return param_dicts
| 56.121951 | 310 | 0.653774 |
from collections import OrderedDict
recording_params_dict = OrderedDict([('apply_filter', True), ('freq_min',300.0), ('freq_max',6000.0)])
keys = list(recording_params_dict.keys())
types = [type(recording_params_dict[key]) for key in keys]
values = [recording_params_dict[key] for key in keys]
recording_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If True, apply filter"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "High-pass frequency"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Low-pass frequency"}]
feature_params_dict = OrderedDict([('max_spikes_per_unit',300), ('recompute_info',False), ('save_features_props',True)])
keys = list(feature_params_dict.keys())
types = [type(feature_params_dict[key]) for key in keys]
values = [feature_params_dict[key] for key in keys]
feature_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "The maximum number of spikes to extract per unit to compute features."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If True, will always re-extract waveforms."},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "If true, it will save the features in the sorting extractor."}]
amplitude_params_dict = OrderedDict([('amp_method',"absolute"), ('amp_peak',"both"), ('amp_frames_before',3), ('amp_frames_after',3)])
keys = list(amplitude_params_dict.keys())
types = [type(amplitude_params_dict[key]) for key in keys]
values = [amplitude_params_dict[key] for key in keys]
amplitude_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "If 'absolute' (default), amplitudes are absolute amplitudes in uV are returned. If 'relative', amplitudes are returned as ratios between waveform amplitudes and template amplitudes."},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "If maximum channel has to be found among negative peaks ('neg'), positive ('pos') or both ('both' - default)"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Frames before peak to compute amplitude"},
{'name': keys[3], 'type': str(types[3].__name__), 'value': values[3], 'default': values[3], 'title': "Frames after peak to compute amplitude"}]
pca_scores_params_dict = OrderedDict([('n_comp',3), ('ms_before',1.0), ('ms_after',2.0), ('dtype',None), ('max_spikes_for_pca',100000)])
keys = list(pca_scores_params_dict.keys())
types = [type(pca_scores_params_dict[key]) for key in keys]
values = [pca_scores_params_dict[key] for key in keys]
pca_scores_gui_params = [{'name': keys[0], 'type': str(types[0].__name__), 'value': values[0], 'default': values[0], 'title': "n_compFeatures in template-gui format"},
{'name': keys[1], 'type': str(types[1].__name__), 'value': values[1], 'default': values[1], 'title': "Time period in ms to cut waveforms before the spike events"},
{'name': keys[2], 'type': str(types[2].__name__), 'value': values[2], 'default': values[2], 'title': "Time period in ms to cut waveforms after the spike events"},
{'name': keys[3], 'type': 'dtype', 'value': values[3], 'default': values[3], 'title': "The numpy dtype of the waveforms"},
{'name': keys[4], 'type': str(types[4].__name__), 'value': values[4], 'default': values[4], 'title': "The maximum number of spikes to use to compute PCA."}]
epoch_params_dict =OrderedDict([('epoch_tuples',None), ('epoch_names',None)])
def get_recording_params():
return recording_params_dict.copy()
def get_amplitude_params():
return amplitude_params_dict.copy()
def get_pca_scores_params():
return pca_scores_params_dict.copy()
def get_epoch_params():
return epoch_params_dict.copy()
def get_feature_params():
return feature_params_dict.copy()
def get_recording_gui_params():
return recording_gui_params.copy()
def get_amplitude_gui_params():
return amplitude_gui_params.copy()
def get_pca_scores_gui_params():
return pca_scores_gui_params.copy()
def get_feature_gui_params():
return feature_gui_params.copy()
def update_param_dicts(recording_params=None, amplitude_params=None,
pca_scores_params=None, epoch_params=None,
feature_params=None):
param_dicts = []
if recording_params is not None:
if not set(recording_params.keys()).issubset(
set(get_recording_params().keys())
):
raise ValueError("Improper parameter entered into the recording param dict.")
else:
recording_params = OrderedDict(get_recording_params(), **recording_params)
param_dicts.append(recording_params)
if amplitude_params is not None:
if not set(amplitude_params.keys()).issubset(
set(get_amplitude_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
amplitude_params = OrderedDict(get_amplitude_params(), **amplitude_params)
param_dicts.append(amplitude_params)
if pca_scores_params is not None:
if not set(pca_scores_params.keys()).issubset(
set(get_pca_scores_params().keys())
):
raise ValueError("Improper parameter entered into the amplitude param dict.")
else:
pca_scores_params = OrderedDict(get_pca_scores_params(), **pca_scores_params)
param_dicts.append(pca_scores_params)
if epoch_params is not None:
if not set(epoch_params.keys()).issubset(
set(get_epoch_params().keys())
):
raise ValueError("Improper parameter entered into the epoch params dict")
else:
epoch_params = OrderedDict(get_epoch_params(), **epoch_params)
param_dicts.append(epoch_params)
if feature_params is not None:
if not set(feature_params.keys()).issubset(
set(get_feature_params().keys())
):
raise ValueError("Improper parameter entered into the feature param dict.")
else:
feature_params = OrderedDict(get_feature_params(), **feature_params)
param_dicts.append(feature_params)
return param_dicts
| true | true |
f71450750ea8e5b04888fcc1b9d0708bbc947036 | 1,243 | py | Python | test/proj4/proj-regression-EPSG-3857-20.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 7 | 2019-03-19T09:32:41.000Z | 2022-02-07T13:20:33.000Z | test/proj4/proj-regression-EPSG-3857-20.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 2 | 2021-03-30T05:37:20.000Z | 2021-08-17T13:58:04.000Z | test/proj4/proj-regression-EPSG-3857-20.py | dvuckovic/magics-test | bd8baf97b0db986f6adf63700d3cf77bbcbad2f2 | [
"Apache-2.0"
] | 5 | 2019-03-19T10:43:46.000Z | 2021-09-09T14:28:39.000Z | from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
#Setting output
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
#Setting the geographical area
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
#Setting the coastlines
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
#Picking the grib metadata
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
#Plotting
plot(png,area,background,title,)
plot_area("EPSG:3857", -19.537526614209707, 21.73608176192727, 45.466740592414304, 81.98066721424705 ) | 28.906977 | 103 | 0.631537 | from Magics.macro import *
import os
def plot_area(epsg, llx, lly, urx, ury):
img = os.path.basename(__file__).split('.')[0]
title = "Projection {} : [{:.2f}, {:.2f}, {:.2f}, {:.2f}]".format(epsg, llx, lly, urx, ury)
png = output(
output_formats = ['png'],
output_name = img,
output_name_first_page_number = 'off')
area = mmap(
subpage_lower_left_latitude = lly,
subpage_lower_left_longitude = llx,
subpage_map_projection = epsg,
subpage_upper_right_latitude = ury,
subpage_upper_right_longitude = urx,
subpage_map_area_definition = "corners"
)
background = mcoast(
map_coastline_land_shade = 'on',
map_coastline_resolution = "medium",
map_coastline_land_shade_colour = 'cream')
title = mtext(
text_lines = [title],
text_justification = 'left',
text_font_size = 0.6,
text_colour = 'charcoal')
plot(png,area,background,title,)
plot_area("EPSG:3857", -19.537526614209707, 21.73608176192727, 45.466740592414304, 81.98066721424705 ) | true | true |
f71451d02fba81b192946c47e4158536448a5bed | 40 | py | Python | test-django-project/testapp/urls.py | rhenter/django-utils | 7e2901ac1efc3db47977b98e45754e40bfef6891 | [
"MIT"
] | 20 | 2021-01-21T13:04:44.000Z | 2022-03-26T22:03:19.000Z | test-django-project/testapp/urls.py | rhenter/django-utils | 7e2901ac1efc3db47977b98e45754e40bfef6891 | [
"MIT"
] | 4 | 2019-03-15T18:13:49.000Z | 2019-03-20T00:06:46.000Z | test-django-project/testapp/urls.py | rhenter/django-utils | 7e2901ac1efc3db47977b98e45754e40bfef6891 | [
"MIT"
] | 6 | 2021-01-21T13:27:45.000Z | 2022-03-26T21:28:22.000Z |
app_name = 'testapp'
urlpatterns = [
]
| 8 | 20 | 0.65 |
app_name = 'testapp'
urlpatterns = [
]
| true | true |
f714526286c921b2969c494f081547696e8bff4f | 8,039 | py | Python | python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 11 | 2016-08-29T07:43:26.000Z | 2016-08-29T07:51:24.000Z | python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/dist_fleet_heter_pipeline_ctr.py | L-Net-1992/Paddle | 4d0ca02ba56760b456f3d4b42a538555b9b6c307 | [
"Apache-2.0"
] | 1 | 2021-12-09T08:59:17.000Z | 2021-12-09T08:59:17.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Distribute CTR model for test fleet api
"""
from __future__ import print_function
import shutil
import tempfile
import time
import paddle
import paddle.fluid as fluid
import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_heter_base import runtime_main, FleetDistHeterRunnerBase
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
paddle.enable_static()
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
"""
For test CTR model, using Fleet api
"""
def net(self, args, batch_size=4, lr=0.01):
"""
network definition
Args:
batch_size(int): the size of mini-batch for training
lr(float): learning rate of training
Returns:
avg_cost: LoDTensor of cost.
"""
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False)
datas = [dnn_data, lr_data, label]
# build dnn model
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding,
pool_type="sum")
dnn_out = dnn_pool
# build lr model
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding,
pool_type="sum")
with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
with fluid.device_guard("cpu"):
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
label = fluid.layers.cast(label, dtype="int64")
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
fluid.layers.Print(avg_cost, message="avg_cost")
self.feeds = datas
self.train_file_path = ["fake1", "fake2"]
self.avg_cost = avg_cost
self.predict = predict
return avg_cost
def check_model_right(self, dirname):
model_filename = os.path.join(dirname, "__model__")
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = fluid.Program.parse_from_string(program_desc_str)
with open(os.path.join(dirname, "__model__.proto"), "w") as wn:
wn.write(str(program))
def do_dataset_training(self, fleet):
train_file_list = ctr_dataset_reader.prepare_fake_data()
exe = fluid.Executor(fluid.CPUPlace())
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
exe.run(fluid.default_startup_program())
fleet.init_worker()
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
filelist = fleet.util.get_file_shard(train_file_list)
print("filelist: {}".format(filelist))
# config dataset
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds)
pipe_command = 'python3 ctr_dataset_reader.py'
dataset.set_pipe_command(pipe_command)
dataset.set_filelist(filelist)
dataset.set_thread(thread_num)
for epoch_id in range(1):
pass_start = time.time()
dataset.set_filelist(filelist)
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset,
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
pass_time = time.time() - pass_start
print("do_dataset_training done. using time {}".format(pass_time))
exe.close()
def do_dataset_heter_training(self, fleet):
exe = fluid.Executor()
exe.run(fluid.default_startup_program())
fleet.init_worker()
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
pass_start = time.time()
exe.train_from_dataset(program=fluid.default_main_program(),
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
exe.close()
pass_time = time.time() - pass_start
print("do_dataset_heter_training done. using time {}".format(pass_time))
#for epoch_id in range(1):
# pass_start = time.time()
# dataset.set_filelist(filelist)
# exe.train_from_dataset(
# program=fluid.default_main_program(),
# dataset=dataset,
# fetch_list=[self.avg_cost],
# fetch_info=["cost"],
# print_period=2,
# debug=int(os.getenv("Debug", "0")))
# pass_time = time.time() - pass_start
# print("do_dataset_heter_training done. using time {}".format(pass_time))
if __name__ == "__main__":
runtime_main(TestHeterPipelinePsCTR2x2)
| 36.876147 | 85 | 0.559771 |
from __future__ import print_function
import shutil
import tempfile
import time
import paddle
import paddle.fluid as fluid
import os
import numpy as np
import ctr_dataset_reader
from test_dist_fleet_heter_base import runtime_main, FleetDistHeterRunnerBase
from dist_fleet_ctr import TestDistCTR2x2, fake_ctr_reader
paddle.enable_static()
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase):
def net(self, args, batch_size=4, lr=0.01):
dnn_input_dim, lr_input_dim = int(1e5), int(1e5)
with fluid.device_guard("cpu"):
dnn_data = fluid.layers.data(name="dnn_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
lr_data = fluid.layers.data(name="lr_data",
shape=[-1, 1],
dtype="int64",
lod_level=1,
append_batch_size=False)
label = fluid.layers.data(name="click",
shape=[-1, 1],
dtype="float32",
lod_level=0,
append_batch_size=False)
datas = [dnn_data, lr_data, label]
dnn_layer_dims = [128, 64, 32, 1]
dnn_embedding = fluid.layers.embedding(
is_distributed=False,
input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]],
param_attr=fluid.ParamAttr(
name="deep_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
dnn_pool = fluid.layers.sequence_pool(input=dnn_embedding,
pool_type="sum")
dnn_out = dnn_pool
lr_embbding = fluid.layers.embedding(
is_distributed=False,
input=lr_data,
size=[lr_input_dim, 1],
param_attr=fluid.ParamAttr(
name="wide_embedding",
initializer=fluid.initializer.Constant(value=0.01)),
is_sparse=True)
lr_pool = fluid.layers.sequence_pool(input=lr_embbding,
pool_type="sum")
with fluid.device_guard("gpu"):
for i, dim in enumerate(dnn_layer_dims[1:]):
fc = fluid.layers.fc(
input=dnn_out,
size=dim,
act="relu",
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.01)),
name='dnn-fc-%d' % i)
dnn_out = fc
with fluid.device_guard("cpu"):
merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1)
label = fluid.layers.cast(label, dtype="int64")
predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax')
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.mean(x=cost)
fluid.layers.Print(avg_cost, message="avg_cost")
self.feeds = datas
self.train_file_path = ["fake1", "fake2"]
self.avg_cost = avg_cost
self.predict = predict
return avg_cost
def check_model_right(self, dirname):
model_filename = os.path.join(dirname, "__model__")
with open(model_filename, "rb") as f:
program_desc_str = f.read()
program = fluid.Program.parse_from_string(program_desc_str)
with open(os.path.join(dirname, "__model__.proto"), "w") as wn:
wn.write(str(program))
def do_dataset_training(self, fleet):
train_file_list = ctr_dataset_reader.prepare_fake_data()
exe = fluid.Executor(fluid.CPUPlace())
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
exe.run(fluid.default_startup_program())
fleet.init_worker()
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
filelist = fleet.util.get_file_shard(train_file_list)
print("filelist: {}".format(filelist))
dataset = fluid.DatasetFactory().create_dataset()
dataset.set_batch_size(batch_size)
dataset.set_use_var(self.feeds)
pipe_command = 'python3 ctr_dataset_reader.py'
dataset.set_pipe_command(pipe_command)
dataset.set_filelist(filelist)
dataset.set_thread(thread_num)
for epoch_id in range(1):
pass_start = time.time()
dataset.set_filelist(filelist)
exe.train_from_dataset(program=fluid.default_main_program(),
dataset=dataset,
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
pass_time = time.time() - pass_start
print("do_dataset_training done. using time {}".format(pass_time))
exe.close()
def do_dataset_heter_training(self, fleet):
exe = fluid.Executor()
exe.run(fluid.default_startup_program())
fleet.init_worker()
real_program = fluid.default_main_program(
)._heter_pipeline_opt["section_program"]
print(real_program)
thread_num = int(os.getenv("CPU_NUM", 2))
batch_size = 128
pass_start = time.time()
exe.train_from_dataset(program=fluid.default_main_program(),
fetch_list=[self.avg_cost],
fetch_info=["cost"],
print_period=2,
debug=int(os.getenv("Debug", "0")))
exe.close()
pass_time = time.time() - pass_start
print("do_dataset_heter_training done. using time {}".format(pass_time))
if __name__ == "__main__":
runtime_main(TestHeterPipelinePsCTR2x2)
| true | true |
f71452cf2e938c16778cf2d6bdada38cde5b86ec | 716 | py | Python | tests/builder/model/test_model_builder.py | shfshf/deliverable_model | d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264 | [
"Apache-2.0"
] | null | null | null | tests/builder/model/test_model_builder.py | shfshf/deliverable_model | d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264 | [
"Apache-2.0"
] | null | null | null | tests/builder/model/test_model_builder.py | shfshf/deliverable_model | d1f34c4a719bd392033f3f9c9ccb2dbbcf6ec264 | [
"Apache-2.0"
] | null | null | null | import filecmp
from deliverable_model.builder.model.model_builder import ModelBuilder
def test_build(datadir, tmpdir):
model_builder = ModelBuilder()
model_builder.add_keras_h5_model(datadir / "fixture" / "keras_h5_model")
model_builder.save()
config = model_builder.serialize(tmpdir)
assert config == {
"converter_for_request": "converter_for_request",
"converter_for_response": "converter_for_response",
"custom_object_dependency": [],
"type": "keras_h5_model",
"version": "1.0",
}
dircmp_obj = filecmp.dircmp(datadir / "expected", tmpdir)
assert not dircmp_obj.diff_files
assert model_builder.get_dependency() == ["tensorflow"]
| 26.518519 | 76 | 0.702514 | import filecmp
from deliverable_model.builder.model.model_builder import ModelBuilder
def test_build(datadir, tmpdir):
model_builder = ModelBuilder()
model_builder.add_keras_h5_model(datadir / "fixture" / "keras_h5_model")
model_builder.save()
config = model_builder.serialize(tmpdir)
assert config == {
"converter_for_request": "converter_for_request",
"converter_for_response": "converter_for_response",
"custom_object_dependency": [],
"type": "keras_h5_model",
"version": "1.0",
}
dircmp_obj = filecmp.dircmp(datadir / "expected", tmpdir)
assert not dircmp_obj.diff_files
assert model_builder.get_dependency() == ["tensorflow"]
| true | true |
f71452f12de9bbaccc936c3a7641f37ccf59fe6e | 92,701 | py | Python | python/ccxt/ndax.py | allunderone/ccxt | b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da | [
"MIT"
] | 1 | 2018-08-20T09:38:13.000Z | 2018-08-20T09:38:13.000Z | python/ccxt/ndax.py | allunderone/ccxt | b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da | [
"MIT"
] | null | null | null | python/ccxt/ndax.py | allunderone/ccxt | b9e62462ad27a83ba6b0ec0ebd567357fdb7f2da | [
"MIT"
] | 1 | 2019-01-02T01:32:45.000Z | 2019-01-02T01:32:45.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ndax(Exchange):
def describe(self):
return self.deep_extend(super(ndax, self).describe(), {
'id': 'ndax',
'name': 'NDAX',
'countries': ['US'], # United States
'rateLimit': 1000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'signIn': True,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2419200',
'4M': '9676800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/108623144-67a3ef00-744e-11eb-8140-75c6b851e945.jpg',
'test': {
'public': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
'private': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
},
'api': {
'public': 'https://api.ndax.io:8443/AP',
'private': 'https://api.ndax.io:8443/AP',
},
'www': 'https://ndax.io',
'doc': [
'https://apidoc.ndax.io/',
],
'fees': 'https://ndax.io/fees',
'referral': 'https://one.ndax.io/bfQiSL',
},
'api': {
'public': {
'get': [
'Activate2FA',
'Authenticate2FA',
'AuthenticateUser',
'GetL2Snapshot',
'GetLevel1',
'GetValidate2FARequiredEndpoints',
'LogOut',
'GetTickerHistory',
'GetProduct',
'GetProducts',
'GetInstrument',
'GetInstruments',
'Ping',
'trades', # undocumented
'GetLastTrades', # undocumented
'SubscribeLevel1',
'SubscribeLevel2',
'SubscribeTicker',
'SubscribeTrades',
'SubscribeBlockTrades',
'UnsubscribeBlockTrades',
'UnsubscribeLevel1',
'UnsubscribeLevel2',
'UnsubscribeTicker',
'UnsubscribeTrades',
'Authenticate', # undocumented
],
},
'private': {
'get': [
'GetUserAccountInfos',
'GetUserAccounts',
'GetUserAffiliateCount',
'GetUserAffiliateTag',
'GetUserConfig',
'GetAllUnredactedUserConfigsForUser',
'GetUnredactedUserConfigByKey',
'GetUserDevices',
'GetUserReportTickets',
'GetUserReportWriterResultRecords',
'GetAccountInfo',
'GetAccountPositions',
'GetAllAccountConfigs',
'GetTreasuryProductsForAccount',
'GetAccountTrades',
'GetAccountTransactions',
'GetOpenTradeReports',
'GetAllOpenTradeReports',
'GetTradesHistory',
'GetOpenOrders',
'GetOpenQuotes',
'GetOrderFee',
'GetOrderHistory',
'GetOrdersHistory',
'GetOrderStatus',
'GetOmsFeeTiers',
'GetAccountDepositTransactions',
'GetAccountWithdrawTransactions',
'GetAllDepositRequestInfoTemplates',
'GetDepositInfo',
'GetDepositRequestInfoTemplate',
'GetDeposits',
'GetDepositTicket',
'GetDepositTickets',
'GetOMSWithdrawFees',
'GetWithdrawFee',
'GetWithdraws',
'GetWithdrawTemplate',
'GetWithdrawTemplateTypes',
'GetWithdrawTicket',
'GetWithdrawTickets',
],
'post': [
'AddUserAffiliateTag',
'CancelUserReport',
'RegisterNewDevice',
'SubscribeAccountEvents',
'UpdateUserAffiliateTag',
'GenerateTradeActivityReport',
'GenerateTransactionActivityReport',
'GenerateTreasuryActivityReport',
'ScheduleTradeActivityReport',
'ScheduleTransactionActivityReport',
'ScheduleTreasuryActivityReport',
'CancelAllOrders',
'CancelOrder',
'CancelQuote',
'CancelReplaceOrder',
'CreateQuote',
'ModifyOrder',
'SendOrder',
'SubmitBlockTrade',
'UpdateQuote',
'CancelWithdraw',
'CreateDepositTicket',
'CreateWithdrawTicket',
'SubmitDepositTicketComment',
'SubmitWithdrawTicketComment',
'GetOrderHistoryByOrderId',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.25 / 100,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
# these credentials are required for signIn() and withdraw()
# 'login': True,
# 'password': True,
# 'twofa': True,
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'Not_Enough_Funds': InsufficientFunds, # {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101}
'Server Error': ExchangeError, # {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null}
'Resource Not Found': OrderNotFound, # {"result":false,"errormsg":"Resource Not Found","errorcode":104,"detail":null}
},
'broad': {
'Invalid InstrumentId': BadSymbol, # {"result":false,"errormsg":"Invalid InstrumentId: 10000","errorcode":100,"detail":null}
'This endpoint requires 2FACode along with the payload': AuthenticationError,
},
},
'options': {
'omsId': 1,
'orderTypes': {
'Market': 1,
'Limit': 2,
'StopMarket': 3,
'StopLimit': 4,
'TrailingStopMarket': 5,
'TrailingStopLimit': 6,
'BlockTrade': 7,
},
},
})
def sign_in(self, params={}):
self.check_required_credentials()
if self.login is None or self.password is None or self.twofa is None:
raise AuthenticationError(self.id + ' signIn() requires exchange.login, exchange.password and exchange.twofa credentials')
request = {
'grant_type': 'client_credentials', # the only supported value
}
response = self.publicGetAuthenticate(self.extend(request, params))
#
# {
# "Authenticated":true,
# "Requires2FA":true,
# "AuthType":"Google",
# "AddtlInfo":"",
# "Pending2FaToken": "6f5c4e66-f3ee-493e-9227-31cc0583b55f"
# }
#
sessionToken = self.safe_string(response, 'SessionToken')
if sessionToken is not None:
self.options['sessionToken'] = sessionToken
return response
pending2faToken = self.safe_string(response, 'Pending2FaToken')
if pending2faToken is not None:
self.options['pending2faToken'] = pending2faToken
request = {
'Code': self.oath(),
}
response = self.publicGetAuthenticate2FA(self.extend(request, params))
#
# {
# "Authenticated": True,
# "UserId":57765,
# "SessionToken":"4a2a5857-c4e5-4fac-b09e-2c4c30b591a0"
# }
#
sessionToken = self.safe_string(response, 'SessionToken')
self.options['sessionToken'] = sessionToken
return response
return response
def fetch_currencies(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetProducts(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "ProductId":1,
# "Product":"BTC",
# "ProductFullName":"Bitcoin",
# "ProductType":"CryptoCurrency",
# "DecimalPlaces":8,
# "TickSize":0.0000000100000000000000000000,
# "NoFees":false,
# "IsDisabled":false,
# "MarginEnabled":false
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'ProductId')
name = self.safe_string(currency, 'ProductFullName')
type = self.safe_string(currency, 'ProductType')
code = self.safe_currency_code(self.safe_string(currency, 'Product'))
precision = self.safe_number(currency, 'TickSize')
isDisabled = self.safe_value(currency, 'IsDisabled')
active = not isDisabled
result[code] = {
'id': id,
'name': name,
'code': code,
'type': type,
'precision': precision,
'info': currency,
'active': active,
'fee': None,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetInstruments(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "InstrumentId":3,
# "Symbol":"LTCBTC",
# "Product1":3,
# "Product1Symbol":"LTC",
# "Product2":1,
# "Product2Symbol":"BTC",
# "InstrumentType":"Standard",
# "VenueInstrumentId":3,
# "VenueId":1,
# "SortIndex":0,
# "SessionStatus":"Running",
# "PreviousSessionStatus":"Stopped",
# "SessionStatusDateTime":"2020-11-25T19:42:15.245Z",
# "SelfTradePrevention":true,
# "QuantityIncrement":0.0000000100000000000000000000,
# "PriceIncrement":0.0000000100000000000000000000,
# "MinimumQuantity":0.0100000000000000000000000000,
# "MinimumPrice":0.0000010000000000000000000000,
# "VenueSymbol":"LTCBTC",
# "IsDisable":false,
# "MasterDataId":0,
# "PriceCollarThreshold":0.0000000000000000000000000000,
# "PriceCollarPercent":0.0000000000000000000000000000,
# "PriceCollarEnabled":false,
# "PriceFloorLimit":0.0000000000000000000000000000,
# "PriceFloorLimitEnabled":false,
# "PriceCeilingLimit":0.0000000000000000000000000000,
# "PriceCeilingLimitEnabled":false,
# "CreateWithMarketRunning":true,
# "AllowOnlyMarketMakerCounterParty":false,
# "PriceCollarIndexDifference":0.0000000000000000000000000000,
# "PriceCollarConvertToOtcEnabled":false,
# "PriceCollarConvertToOtcClientUserId":0,
# "PriceCollarConvertToOtcAccountId":0,
# "PriceCollarConvertToOtcThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeThreshold":0.0000000000000000000000000000,
# "OtcConvertSizeEnabled":false,
# "OtcTradesPublic":true,
# "PriceTier":0
# },
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'InstrumentId')
# lowercaseId = self.safe_string_lower(market, 'symbol')
baseId = self.safe_string(market, 'Product1')
quoteId = self.safe_string(market, 'Product2')
base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))
quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))
symbol = base + '/' + quote
precision = {
'amount': self.safe_number(market, 'QuantityIncrement'),
'price': self.safe_number(market, 'PriceIncrement'),
}
sessionStatus = self.safe_string(market, 'SessionStatus')
isDisable = self.safe_value(market, 'IsDisable')
sessionRunning = (sessionStatus == 'Running')
active = True if (sessionRunning and not isDisable) else False
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'MinimumQuantity'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'MinimumPrice'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=6, amountKey=8):
nonce = None
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(orderbook)):
level = orderbook[i]
if timestamp is None:
timestamp = self.safe_integer(level, 2)
else:
newTimestamp = self.safe_integer(level, 2)
timestamp = max(timestamp, newTimestamp)
if nonce is None:
nonce = self.safe_integer(level, 0)
else:
newNonce = self.safe_integer(level, 0)
nonce = max(nonce, newNonce)
bidask = self.parse_bid_ask(level, priceKey, amountKey)
levelSide = self.safe_integer(level, 9)
side = asksKey if levelSide else bidsKey
result[side].append(bidask)
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
result['nonce'] = nonce
return result
def fetch_order_book(self, symbol, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
limit = 100 if (limit is None) else limit # default 100
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Depth': limit, # default 100
}
response = self.publicGetGetL2Snapshot(self.extend(request, params))
#
# [
# [
# 0, # 0 MDUpdateId
# 1, # 1 Number of Unique Accounts
# 123, # 2 ActionDateTime in Posix format X 1000
# 0, # 3 ActionType 0(New), 1(Update), 2(Delete)
# 0.0, # 4 LastTradePrice
# 0, # 5 Number of Orders
# 0.0, # 6 Price
# 0, # 7 ProductPairCode
# 0.0, # 8 Quantity
# 0, # 9 Side
# ],
# [97244115,1,1607456142963,0,19069.32,1,19069.31,8,0.140095,0],
# [97244115,0,1607456142963,0,19069.32,1,19068.64,8,0.0055,0],
# [97244115,0,1607456142963,0,19069.32,1,19068.26,8,0.021291,0],
# [97244115,1,1607456142964,0,19069.32,1,19069.32,8,0.099636,1],
# [97244115,0,1607456142964,0,19069.32,1,19069.98,8,0.1,1],
# [97244115,0,1607456142964,0,19069.32,1,19069.99,8,0.141604,1],
# ]
#
return self.parse_order_book(response, symbol)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "OMSId":1,
# "InstrumentId":8,
# "BestBid":19069.31,
# "BestOffer":19069.32,
# "LastTradedPx":19069.32,
# "LastTradedQty":0.0001,
# "LastTradeTime":1607040406424,
# "SessionOpen":19069.32,
# "SessionHigh":19069.32,
# "SessionLow":19069.32,
# "SessionClose":19069.32,
# "Volume":0.0001,
# "CurrentDayVolume":0.0001,
# "CurrentDayNotional":1.906932,
# "CurrentDayNumTrades":1,
# "CurrentDayPxChange":0.00,
# "Rolling24HrVolume":0.000000000000000000000000000,
# "Rolling24HrNotional":0.00000000000000000000000,
# "Rolling24NumTrades":0,
# "Rolling24HrPxChange":0,
# "TimeStamp":"1607040406425",
# "BidQty":0,
# "AskQty":0,
# "BidOrderCt":0,
# "AskOrderCt":0,
# "Rolling24HrPxChangePercent":0,
# }
#
timestamp = self.safe_integer(ticker, 'TimeStamp')
marketId = self.safe_string(ticker, 'InstrumentId')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'LastTradedPx')
percentage = self.safe_number(ticker, 'Rolling24HrPxChangePercent')
change = self.safe_number(ticker, 'Rolling24HrPxChange')
open = self.safe_number(ticker, 'SessionOpen')
average = None
if (last is not None) and (change is not None):
average = self.sum(last, open) / 2
baseVolume = self.safe_number(ticker, 'Rolling24HrVolume')
quoteVolume = self.safe_number(ticker, 'Rolling24HrNotional')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'SessionHigh'),
'low': self.safe_number(ticker, 'SessionLow'),
'bid': self.safe_number(ticker, 'BestBid'),
'bidVolume': None, # self.safe_number(ticker, 'BidQty'), always shows 0
'ask': self.safe_number(ticker, 'BestOffer'),
'askVolume': None, # self.safe_number(ticker, 'AskQty'), always shows 0
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
response = self.publicGetGetLevel1(self.extend(request, params))
#
# {
# "OMSId":1,
# "InstrumentId":8,
# "BestBid":19069.31,
# "BestOffer":19069.32,
# "LastTradedPx":19069.32,
# "LastTradedQty":0.0001,
# "LastTradeTime":1607040406424,
# "SessionOpen":19069.32,
# "SessionHigh":19069.32,
# "SessionLow":19069.32,
# "SessionClose":19069.32,
# "Volume":0.0001,
# "CurrentDayVolume":0.0001,
# "CurrentDayNotional":1.906932,
# "CurrentDayNumTrades":1,
# "CurrentDayPxChange":0.00,
# "Rolling24HrVolume":0.000000000000000000000000000,
# "Rolling24HrNotional":0.00000000000000000000000,
# "Rolling24NumTrades":0,
# "Rolling24HrPxChange":0,
# "TimeStamp":"1607040406425",
# "BidQty":0,
# "AskQty":0,
# "BidOrderCt":0,
# "AskOrderCt":0,
# "Rolling24HrPxChangePercent":0,
# }
#
return self.parse_ticker(response, market)
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1501603632000, # 0 DateTime
# 2700.33, # 1 High
# 2687.01, # 2 Low
# 2687.01, # 3 Open
# 2687.01, # 4 Close
# 24.86100992, # 5 Volume
# 0, # 6 Inside Bid Price
# 2870.95, # 7 Inside Ask Price
# 1 # 8 InstrumentId
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is None:
if limit is not None:
request['FromDate'] = self.ymdhms(now - duration * limit * 1000)
request['ToDate'] = self.ymdhms(now)
else:
request['FromDate'] = self.ymdhms(since)
if limit is None:
request['ToDate'] = self.ymdhms(now)
else:
request['ToDate'] = self.ymdhms(self.sum(since, duration * limit * 1000))
response = self.publicGetGetTickerHistory(self.extend(request, params))
#
# [
# [1607299260000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299200000],
# [1607299320000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299260000],
# [1607299380000,19069.32,19069.32,19069.32,19069.32,0,19069.31,19069.32,8,1607299320000],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# [
# 6913253, # 0 TradeId
# 8, # 1 ProductPairCode
# 0.03340802, # 2 Quantity
# 19116.08, # 3 Price
# 2543425077, # 4 Order1
# 2543425482, # 5 Order2
# 1606935922416, # 6 Tradetime
# 0, # 7 Direction
# 1, # 8 TakerSide
# 0, # 9 BlockTrade
# 0, # 10 Either Order1ClientId or Order2ClientId
# ]
#
# fetchMyTrades(private)
#
# {
# "OMSId":1,
# "ExecutionId":16916567,
# "TradeId":14476351,
# "OrderId":2543565231,
# "AccountId":449,
# "AccountName":"igor@ccxt.trade",
# "SubAccountId":0,
# "ClientOrderId":0,
# "InstrumentId":8,
# "Side":"Sell",
# "OrderType":"Market",
# "Quantity":0.1230000000000000000000000000,
# "RemainingQuantity":0.0000000000000000000000000000,
# "Price":19069.310000000000000000000000,
# "Value":2345.5251300000000000000000000,
# "CounterParty":"7",
# "OrderTradeRevision":1,
# "Direction":"NoChange",
# "IsBlockTrade":false,
# "Fee":1.1727625650000000000000000000,
# "FeeProductId":8,
# "OrderOriginator":446,
# "UserName":"igor@ccxt.trade",
# "TradeTimeMS":1607565031569,
# "MakerTaker":"Taker",
# "AdapterTradeId":0,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "IsQuote":false,
# "CounterPartyClientUserId":1,
# "NotionalProductId":2,
# "NotionalRate":1.0000000000000000000000000000,
# "NotionalValue":2345.5251300000000000000000000,
# "NotionalHoldAmount":0,
# "TradeTime":637431618315686826
# }
#
# fetchOrderTrades
#
# {
# "Side":"Sell",
# "OrderId":2543565235,
# "Price":18600.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607585844956,
# "ReceiveTimeTicks":637431826449564182,
# "LastUpdatedTime":1607585844959,
# "LastUpdatedTimeTicks":637431826449593893,
# "OrigQuantity":0.1230000000000000000000000000,
# "QuantityExecuted":0.1230000000000000000000000000,
# "GrossValueExecuted":2345.3947500000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.250000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565235,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
priceString = None
amountString = None
cost = None
timestamp = None
id = None
marketId = None
side = None
orderId = None
takerOrMaker = None
fee = None
type = None
if isinstance(trade, list):
priceString = self.safe_string(trade, 3)
amountString = self.safe_string(trade, 2)
timestamp = self.safe_integer(trade, 6)
id = self.safe_string(trade, 0)
marketId = self.safe_string(trade, 1)
takerSide = self.safe_value(trade, 8)
side = 'sell' if takerSide else 'buy'
orderId = self.safe_string(trade, 4)
else:
timestamp = self.safe_integer_2(trade, 'TradeTimeMS', 'ReceiveTime')
id = self.safe_string(trade, 'TradeId')
orderId = self.safe_string_2(trade, 'OrderId', 'OrigOrderId')
marketId = self.safe_string_2(trade, 'InstrumentId', 'Instrument')
priceString = self.safe_string(trade, 'Price')
amountString = self.safe_string(trade, 'Quantity')
cost = self.safe_number_2(trade, 'Value', 'GrossValueExecuted')
takerOrMaker = self.safe_string_lower(trade, 'MakerTaker')
side = self.safe_string_lower(trade, 'Side')
type = self.safe_string_lower(trade, 'OrderType')
feeCost = self.safe_number(trade, 'Fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'FeeProductId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
symbol = self.safe_symbol(marketId, market)
return {
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
if limit is not None:
request['Count'] = limit
response = self.publicGetGetLastTrades(self.extend(request, params))
#
# [
# [6913253,8,0.03340802,19116.08,2543425077,2543425482,1606935922416,0,1,0,0],
# [6913254,8,0.01391671,19117.42,2543427510,2543427811,1606935927998,1,1,0,0],
# [6913255,8,0.000006,19107.81,2543430495,2543430793,1606935933881,2,0,0,0],
# ]
#
return self.parse_trades(response, market, since, limit)
def fetch_accounts(self, params={}):
if not self.login:
raise AuthenticationError(self.id + ' fetchAccounts() requires exchange.login email credential')
omsId = self.safe_integer(self.options, 'omsId', 1)
self.check_required_credentials()
request = {
'omsId': omsId,
'UserId': self.uid,
'UserName': self.login,
}
response = self.privateGetGetUserAccounts(self.extend(request, params))
#
# [449] # comma-separated list of account ids
#
result = []
for i in range(0, len(response)):
accountId = self.safe_string(response, i)
result.append({
'id': accountId,
'type': None,
'currency': None,
'info': accountId,
})
return result
def fetch_balance(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetAccountPositions(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "AccountId":449,
# "ProductSymbol":"BTC",
# "ProductId":1,
# "Amount":10.000000000000000000000000000,
# "Hold":0,
# "PendingDeposits":0.0000000000000000000000000000,
# "PendingWithdraws":0.0000000000000000000000000000,
# "TotalDayDeposits":10.000000000000000000000000000,
# "TotalMonthDeposits":10.000000000000000000000000000,
# "TotalYearDeposits":10.000000000000000000000000000,
# "TotalDayDepositNotional":10.000000000000000000000000000,
# "TotalMonthDepositNotional":10.000000000000000000000000000,
# "TotalYearDepositNotional":10.000000000000000000000000000,
# "TotalDayWithdraws":0,
# "TotalMonthWithdraws":0,
# "TotalYearWithdraws":0,
# "TotalDayWithdrawNotional":0,
# "TotalMonthWithdrawNotional":0,
# "TotalYearWithdrawNotional":0,
# "NotionalProductId":8,
# "NotionalProductSymbol":"USDT",
# "NotionalValue":10.000000000000000000000000000,
# "NotionalHoldAmount":0,
# "NotionalRate":1
# },
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'ProductId')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'Amount')
account['used'] = self.safe_string(balance, 'Hold')
result[code] = account
return self.parse_balance(result)
def parse_ledger_entry_type(self, type):
types = {
'Trade': 'trade',
'Deposit': 'transaction',
'Withdraw': 'transaction',
'Transfer': 'transfer',
'OrderHold': 'trade',
'WithdrawHold': 'transaction',
'DepositHold': 'transaction',
'MarginHold': 'trade',
'ManualHold': 'trade',
'ManualEntry': 'trade',
'MarginAcquisition': 'trade',
'MarginRelinquish': 'trade',
'MarginQuoteHold': 'trade',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "TransactionId":2663709493,
# "ReferenceId":68,
# "OMSId":1,
# "AccountId":449,
# "CR":10.000000000000000000000000000,
# "DR":0.0000000000000000000000000000,
# "Counterparty":3,
# "TransactionType":"Other",
# "ReferenceType":"Deposit",
# "ProductId":1,
# "Balance":10.000000000000000000000000000,
# "TimeStamp":1607532331591
# }
#
id = self.safe_string(item, 'TransactionId')
account = self.safe_string(item, 'AccountId')
referenceId = self.safe_string(item, 'ReferenceId')
referenceAccount = self.safe_string(item, 'Counterparty')
type = self.parse_ledger_entry_type(self.safe_string(item, 'ReferenceType'))
currencyId = self.safe_string(item, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
credit = self.safe_number(item, 'CR')
debit = self.safe_number(item, 'DR')
amount = None
direction = None
if credit > 0:
amount = credit
direction = 'in'
elif debit > 0:
amount = debit
direction = 'out'
timestamp = self.safe_integer(item, 'TimeStamp')
before = None
after = self.safe_number(item, 'Balance')
if direction == 'out':
before = self.sum(after, amount)
elif direction == 'in':
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetAccountTransactions(self.extend(request, params))
#
# [
# {
# "TransactionId":2663709493,
# "ReferenceId":68,
# "OMSId":1,
# "AccountId":449,
# "CR":10.000000000000000000000000000,
# "DR":0.0000000000000000000000000000,
# "Counterparty":3,
# "TransactionType":"Other",
# "ReferenceType":"Deposit",
# "ProductId":1,
# "Balance":10.000000000000000000000000000,
# "TimeStamp":1607532331591
# },
# ]
#
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_ledger(response, currency, since, limit)
def parse_order_status(self, status):
statuses = {
'Accepted': 'open',
'Rejected': 'rejected',
'Working': 'open',
'Canceled': 'canceled',
'Expired': 'expired',
'FullyExecuted': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "status":"Accepted",
# "errormsg":"",
# "OrderId": 2543565231
# }
#
# editOrder
#
# {
# "ReplacementOrderId": 1234,
# "ReplacementClOrdId": 1561,
# "OrigOrderId": 5678,
# "OrigClOrdId": 91011,
# }
#
# fetchOpenOrders, fetchClosedOrders
#
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010,
# "Quantity":0.345,
# "DisplayQuantity":0.345,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Working",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607579326005,
# "LastUpdatedTimeTicks":637431761260054714,
# "OrigQuantity":0.345,
# "QuantityExecuted":0,
# "GrossValueExecuted":0,
# "ExecutableValue":0,
# "AvgPrice":0,
# "CounterPartyId":0,
# "ChangeReason":"NewInputAccepted",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.32,
# "InsideAskSize":0.099736,
# "InsideBid":19068.25,
# "InsideBidSize":1.330001,
# "LastTradePrice":19068.25,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"AddedToBook",
# "UseMargin":false,
# "StopPrice":0,
# "PegPriceType":"Unknown",
# "PegOffset":0,
# "PegLimitOffset":0,
# "IpAddress":null,
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
id = self.safe_string_2(order, 'ReplacementOrderId', 'OrderId')
timestamp = self.safe_integer(order, 'ReceiveTime')
lastTradeTimestamp = self.safe_integer(order, 'LastUpdatedTime')
marketId = self.safe_string(order, 'Instrument')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string_lower(order, 'Side')
type = self.safe_string_lower(order, 'OrderType')
clientOrderId = self.safe_string_2(order, 'ReplacementClOrdId', 'ClientOrderId')
price = self.safe_number(order, 'Price', 0.0)
price = price if (price > 0.0) else None
amount = self.safe_number(order, 'OrigQuantity')
filled = self.safe_number(order, 'QuantityExecuted')
cost = self.safe_number(order, 'GrossValueExecuted')
average = self.safe_number(order, 'AvgPrice', 0.0)
average = average if (average > 0) else None
stopPrice = self.safe_number(order, 'StopPrice', 0.0)
stopPrice = stopPrice if (stopPrice > 0.0) else None
timeInForce = None
status = self.parse_order_status(self.safe_string(order, 'OrderState'))
fee = None
trades = None
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': None,
'fee': fee,
'trades': trades,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date
# 'ClientOrderId': clientOrderId, # defaults to 0
# If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call).
# If order B executes, then order A created by self call is canceled.
# You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book.
# See CancelReplaceOrder and ModifyOrder.
# 'OrderIdOCO': 0, # The order ID if One Cancels the Other.
# 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True
'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition
'Quantity': float(self.amount_to_precision(symbol, amount)),
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade
# 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint
# 'LimitPrice': float(self.price_to_precision(symbol, price)),
}
# If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostSendOrder(self.extend(request, params))
#
# {
# "status":"Accepted",
# "errormsg":"",
# "OrderId": 2543565231
# }
#
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'OrderIdToReplace': int(id),
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1, # 0 Unknown, 1 GTC by default, 2 OPG execute as close to opening price as possible, 3 IOC immediate or canceled, 4 FOK fill-or-kill, 5 GTX good 'til executed, 6 GTD good 'til date
# 'ClientOrderId': clientOrderId, # defaults to 0
# If self order is order A, OrderIdOCO refers to the order ID of an order B(which is not the order being created by self call).
# If order B executes, then order A created by self call is canceled.
# You can also set up order B to watch order A in the same way, but that may require an update to order B to make it watch self one, which could have implications for priority in the order book.
# See CancelReplaceOrder and ModifyOrder.
# 'OrderIdOCO': 0, # The order ID if One Cancels the Other.
# 'UseDisplayQuantity': False, # If you enter a Limit order with a reserve, you must set UseDisplayQuantity to True
'Side': orderSide, # 0 Buy, 1 Sell, 2 Short, 3 unknown an error condition
'Quantity': float(self.amount_to_precision(symbol, amount)),
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)), # 0 Unknown, 1 Market, 2 Limit, 3 StopMarket, 4 StopLimit, 5 TrailingStopMarket, 6 TrailingStopLimit, 7 BlockTrade
# 'PegPriceType': 3, # 1 Last, 2 Bid, 3 Ask, 4 Midpoint
# 'LimitPrice': float(self.price_to_precision(symbol, price)),
}
# If OrderType=1(Market), Side=0(Buy), and LimitPrice is supplied, the Market order will execute up to the value specified
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostCancelReplaceOrder(self.extend(request, params))
#
# {
# "replacementOrderId": 1234,
# "replacementClOrdId": 1561,
# "origOrderId": 5678,
# "origClOrdId": 91011,
# }
#
return self.parse_order(response, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
# 'InstrumentId': market['id'],
# 'TradeId': 123, # If you specify TradeId, GetTradesHistory can return all states for a single trade
# 'OrderId': 456, # If specified, the call returns all trades associated with the order
# 'UserId': integer. The ID of the logged-in user. If not specified, the call returns trades associated with the users belonging to the default account for the logged-in user of self OMS.
# 'StartTimeStamp': long integer. The historical date and time at which to begin the trade report, in POSIX format. If not specified, reverts to the start date of self account on the trading venue.
# 'EndTimeStamp': long integer. Date at which to end the trade report, in POSIX format.
# 'Depth': integer. In self case, the count of trades to return, counting from the StartIndex. If Depth is not specified, returns all trades between BeginTimeStamp and EndTimeStamp, beginning at StartIndex.
# 'StartIndex': 0 # from the most recent trade 0 and moving backwards in time
# 'ExecutionId': 123, # The ID of the individual buy or sell execution. If not specified, returns all.
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetTradesHistory(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "ExecutionId":16916567,
# "TradeId":14476351,
# "OrderId":2543565231,
# "AccountId":449,
# "AccountName":"igor@ccxt.trade",
# "SubAccountId":0,
# "ClientOrderId":0,
# "InstrumentId":8,
# "Side":"Sell",
# "OrderType":"Market",
# "Quantity":0.1230000000000000000000000000,
# "RemainingQuantity":0.0000000000000000000000000000,
# "Price":19069.310000000000000000000000,
# "Value":2345.5251300000000000000000000,
# "CounterParty":"7",
# "OrderTradeRevision":1,
# "Direction":"NoChange",
# "IsBlockTrade":false,
# "Fee":1.1727625650000000000000000000,
# "FeeProductId":8,
# "OrderOriginator":446,
# "UserName":"igor@ccxt.trade",
# "TradeTimeMS":1607565031569,
# "MakerTaker":"Taker",
# "AdapterTradeId":0,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "IsQuote":false,
# "CounterPartyClientUserId":1,
# "NotionalProductId":2,
# "NotionalRate":1.0000000000000000000000000000,
# "NotionalValue":2345.5251300000000000000000000,
# "NotionalHoldAmount":0,
# "TradeTime":637431618315686826
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def cancel_all_orders(self, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if symbol is not None:
market = self.market(symbol)
request['IntrumentId'] = market['id']
response = self.privatePostCancelAllOrders(self.extend(request, params))
#
# {
# "result":true,
# "errormsg":null,
# "errorcode":0,
# "detail":null
# }
#
return response
def cancel_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
# defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
# accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
# params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
# 'AccountId': accountId,
}
clientOrderId = self.safe_integer_2(params, 'clientOrderId', 'ClOrderId')
if clientOrderId is not None:
request['ClOrderId'] = clientOrderId
else:
request['OrderId'] = int(id)
params = self.omit(params, ['clientOrderId', 'ClOrderId'])
response = self.privatePostCancelOrder(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'id': id,
'clientOrderId': clientOrderId,
})
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetOpenOrders(self.extend(request, params))
#
# [
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010,
# "Quantity":0.345,
# "DisplayQuantity":0.345,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Working",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607579326005,
# "LastUpdatedTimeTicks":637431761260054714,
# "OrigQuantity":0.345,
# "QuantityExecuted":0,
# "GrossValueExecuted":0,
# "ExecutableValue":0,
# "AvgPrice":0,
# "CounterPartyId":0,
# "ChangeReason":"NewInputAccepted",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.32,
# "InsideAskSize":0.099736,
# "InsideBid":19068.25,
# "InsideBidSize":1.330001,
# "LastTradePrice":19068.25,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"AddedToBook",
# "UseMargin":false,
# "StopPrice":0,
# "PegPriceType":"Unknown",
# "PegOffset":0,
# "PegLimitOffset":0,
# "IpAddress":null,
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
# 'ClientOrderId': clientOrderId,
# 'OriginalOrderId': id,
# 'OriginalClientOrderId': long integer,
# 'UserId': integer,
# 'InstrumentId': market['id'],
# 'StartTimestamp': since,
# 'EndTimestamp': self.milliseconds(),
# 'Depth': limit,
# 'StartIndex': 0,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetOrdersHistory(self.extend(request, params))
#
# [
# {
# "Side":"Buy",
# "OrderId":2543565233,
# "Price":19010.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.3450000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"Canceled",
# "ReceiveTime":1607579326003,
# "ReceiveTimeTicks":637431761260028981,
# "LastUpdatedTime":1607580965346,
# "LastUpdatedTimeTicks":637431777653463754,
# "OrigQuantity":0.3450000000000000000000000000,
# "QuantityExecuted":0.0000000000000000000000000000,
# "GrossValueExecuted":0.0000000000000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":0.0000000000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"UserModified",
# "OrigOrderId":2543565233,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"UserModified",
# "OrderFlag":"AddedToBook, RemovedFromBook",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# },
# ]
#
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
'OrderId': int(id),
}
response = self.privateGetGetOrderStatus(self.extend(request, params))
#
# {
# "Side":"Sell",
# "OrderId":2543565232,
# "Price":0.0000000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Market",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607569475591,
# "ReceiveTimeTicks":637431662755912377,
# "LastUpdatedTime":1607569475596,
# "LastUpdatedTimeTicks":637431662755960902,
# "OrigQuantity":1.0000000000000000000000000000,
# "QuantityExecuted":1.0000000000000000000000000000,
# "GrossValueExecuted":19068.270478610000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.270478610000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565232,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19069.310000000000000000000000,
# "InsideBidSize":0.2400950000000000000000000000,
# "LastTradePrice":19069.310000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# }
#
return self.parse_order(response, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
# defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
# accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
# params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'OMSId': int(omsId),
# 'AccountId': accountId,
'OrderId': int(id),
}
response = self.privatePostGetOrderHistoryByOrderId(self.extend(request, params))
#
# [
# {
# "Side":"Sell",
# "OrderId":2543565235,
# "Price":18600.000000000000000000000000,
# "Quantity":0.0000000000000000000000000000,
# "DisplayQuantity":0.0000000000000000000000000000,
# "Instrument":8,
# "Account":449,
# "AccountName":"igor@ccxt.trade",
# "OrderType":"Limit",
# "ClientOrderId":0,
# "OrderState":"FullyExecuted",
# "ReceiveTime":1607585844956,
# "ReceiveTimeTicks":637431826449564182,
# "LastUpdatedTime":1607585844959,
# "LastUpdatedTimeTicks":637431826449593893,
# "OrigQuantity":0.1230000000000000000000000000,
# "QuantityExecuted":0.1230000000000000000000000000,
# "GrossValueExecuted":2345.3947500000000000000000000,
# "ExecutableValue":0.0000000000000000000000000000,
# "AvgPrice":19068.250000000000000000000000,
# "CounterPartyId":0,
# "ChangeReason":"Trade",
# "OrigOrderId":2543565235,
# "OrigClOrdId":0,
# "EnteredBy":446,
# "UserName":"igor@ccxt.trade",
# "IsQuote":false,
# "InsideAsk":19069.320000000000000000000000,
# "InsideAskSize":0.0997360000000000000000000000,
# "InsideBid":19068.250000000000000000000000,
# "InsideBidSize":1.3300010000000000000000000000,
# "LastTradePrice":19068.250000000000000000000000,
# "RejectReason":"",
# "IsLockedIn":false,
# "CancelReason":"",
# "OrderFlag":"0",
# "UseMargin":false,
# "StopPrice":0.0000000000000000000000000000,
# "PegPriceType":"Unknown",
# "PegOffset":0.0000000000000000000000000000,
# "PegLimitOffset":0.0000000000000000000000000000,
# "IpAddress":"x.x.x.x",
# "ClientOrderIdUuid":null,
# "OMSId":1
# },
# ]
#
grouped = self.group_by(response, 'ChangeReason')
trades = self.safe_value(grouped, 'Trade', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'GenerateNewKey': False,
}
response = self.privateGetGetDepositInfo(self.extend(request, params))
#
# {
# "result":true,
# "errormsg":null,
# "statuscode":0,
# "AssetManagerId":1,
# "AccountId":57922,
# "AssetId":16,
# "ProviderId":23,
# "DepositInfo":"[\"0x8A27564b5c30b91C93B1591821642420F323a210\"]"
# }
#
return self.parse_deposit_address(response, currency)
def parse_deposit_address(self, depositAddress, currency=None):
#
# fetchDepositAddress, createDepositAddress
#
# {
# "result":true,
# "errormsg":null,
# "statuscode":0,
# "AssetManagerId":1,
# "AccountId":449,
# "AssetId":1,
# "ProviderId":1,
# "DepositInfo":"[\"r3e95RwVsLH7yCbnMfyh7SA8FdwUJCB4S2?memo=241452010\"]"
# }
#
depositInfoString = self.safe_string(depositAddress, 'DepositInfo')
depositInfo = json.loads(depositInfoString)
depositInfoLength = len(depositInfo)
lastString = self.safe_string(depositInfo, depositInfoLength - 1)
parts = lastString.split('?memo=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
code = None
if currency is not None:
code = currency['code']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def create_deposit_address(self, code, params={}):
request = {
'GenerateNewKey': True,
}
return self.fetch_deposit_address(code, self.extend(request, params))
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetDeposits(self.extend(request, params))
#
# [
# {
# "OMSId":1,
# "DepositId":44,
# "AccountId":449,
# "SubAccountId":0,
# "ProductId":4,
# "Amount":200.00000000000000000000000000,
# "LastUpdateTimeStamp":637431291261187806,
# "ProductType":"CryptoCurrency",
# "TicketStatus":"FullyProcessed",
# "DepositInfo":"{}",
# "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3",
# "TicketNumber":71,
# "NotionalProductId":13,
# "NotionalValue":200.00000000000000000000000000,
# "FeeAmount":0.0000000000000000000000000000,
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetWithdraws(self.extend(request, params))
#
# [
# {
# "Amount": 0.0,
# "FeeAmount": 0.0,
# "NotionalValue": 0.0,
# "WithdrawId": 0,
# "AssetManagerId": 0,
# "AccountId": 0,
# "AssetId": 0,
# "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}",
# "TemplateFormType": "TetherRPCWithdraw",
# "omsId": 0,
# "TicketStatus": 0,
# "TicketNumber": 0,
# "WithdrawTransactionDetails": "",
# "WithdrawType": "",
# "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3",
# "AssetType": 0,
# "Reaccepted": True,
# "NotionalProductId": 0
# },
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'New': 'pending', # new ticket awaiting operator review
'AdminProcessing': 'pending', # an admin is looking at the ticket
'Accepted': 'pending', # an admin accepts the ticket
'Rejected': 'rejected', # admin rejects the ticket
'SystemProcessing': 'pending', # automatic processing; an unlikely status for a deposit
'FullyProcessed': 'ok', # the deposit has concluded
'Failed': 'failed', # the deposit has failed for some reason
'Pending': 'pending', # Account Provider has set status to pending
'Confirmed': 'pending', # Account Provider confirms the deposit
'AmlProcessing': 'pending', # anti-money-laundering process underway
'AmlAccepted': 'pending', # anti-money-laundering process successful
'AmlRejected': 'rejected', # deposit did not stand up to anti-money-laundering process
'AmlFailed': 'failed', # anti-money-laundering process failed/did not complete
'LimitsAccepted': 'pending', # deposit meets limits for fiat or crypto asset
'LimitsRejected': 'rejected', # deposit does not meet limits for fiat or crypto asset
},
'withdrawal': {
'New': 'pending', # awaiting operator review
'AdminProcessing': 'pending', # An admin is looking at the ticket
'Accepted': 'pending', # withdrawal will proceed
'Rejected': 'rejected', # admin or automatic rejection
'SystemProcessing': 'pending', # automatic processing underway
'FullyProcessed': 'ok', # the withdrawal has concluded
'Failed': 'failed', # the withdrawal failed for some reason
'Pending': 'pending', # the admin has placed the withdrawal in pending status
'Pending2Fa': 'pending', # user must click 2-factor authentication confirmation link
'AutoAccepted': 'pending', # withdrawal will be automatically processed
'Delayed': 'pending', # waiting for funds to be allocated for the withdrawal
'UserCanceled': 'canceled', # withdraw canceled by user or Superuser
'AdminCanceled': 'canceled', # withdraw canceled by Superuser
'AmlProcessing': 'pending', # anti-money-laundering process underway
'AmlAccepted': 'pending', # anti-money-laundering process complete
'AmlRejected': 'rejected', # withdrawal did not stand up to anti-money-laundering process
'AmlFailed': 'failed', # withdrawal did not complete anti-money-laundering process
'LimitsAccepted': 'pending', # withdrawal meets limits for fiat or crypto asset
'LimitsRejected': 'rejected', # withdrawal does not meet limits for fiat or crypto asset
'Submitted': 'pending', # withdrawal sent to Account Provider; awaiting blockchain confirmation
'Confirmed': 'pending', # Account Provider confirms that withdrawal is on the blockchain
'ManuallyConfirmed': 'pending', # admin has sent withdrawal via wallet or admin function directly; marks ticket as FullyProcessed; debits account
'Confirmed2Fa': 'pending', # user has confirmed withdraw via 2-factor authentication.
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "OMSId":1,
# "DepositId":44,
# "AccountId":449,
# "SubAccountId":0,
# "ProductId":4,
# "Amount":200.00000000000000000000000000,
# "LastUpdateTimeStamp":637431291261187806,
# "ProductType":"CryptoCurrency",
# "TicketStatus":"FullyProcessed",
# "DepositInfo":"{}",
# "DepositCode":"ab0e23d5-a9ce-4d94-865f-9ab464fb1de3",
# "TicketNumber":71,
# "NotionalProductId":13,
# "NotionalValue":200.00000000000000000000000000,
# "FeeAmount":0.0000000000000000000000000000,
# }
#
# fetchWithdrawals
#
# {
# "Amount": 0.0,
# "FeeAmount": 0.0,
# "NotionalValue": 0.0,
# "WithdrawId": 0,
# "AssetManagerId": 0,
# "AccountId": 0,
# "AssetId": 0,
# "TemplateForm": "{\"TemplateType\": \"TetherRPCWithdraw\",\"Comment\": \"TestWithdraw\",\"ExternalAddress\": \"ms6C3pKAAr8gRCcnVebs8VRkVrjcvqNYv3\"}",
# "TemplateFormType": "TetherRPCWithdraw",
# "omsId": 0,
# "TicketStatus": 0,
# "TicketNumber": 0,
# "WithdrawTransactionDetails": "",
# "WithdrawType": "",
# "WithdrawCode": "490b4fa3-53fc-44f4-bd29-7e16be86fba3",
# "AssetType": 0,
# "Reaccepted": True,
# "NotionalProductId": 0
# }
#
id = self.safe_string(transaction, 'DepositId')
txid = None
currencyId = self.safe_string(transaction, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
type = None
if 'DepositId' in transaction:
type = 'deposit'
elif 'WithdrawId' in transaction:
type = 'withdrawal'
templateFormString = self.safe_string(transaction, 'TemplateForm')
address = None
updated = self.safe_integer(transaction, 'LastUpdateTimeStamp')
if templateFormString is not None:
templateForm = json.loads(templateFormString)
address = self.safe_string(templateForm, 'ExternalAddress')
txid = self.safe_string(templateForm, 'TxId')
timestamp = self.safe_integer(templateForm, 'TimeSubmitted')
updated = self.safe_integer(templateForm, 'LastUpdated', updated)
addressTo = address
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'TicketStatus'), type)
amount = self.safe_number(transaction, 'Amount')
feeCost = self.safe_number(transaction, 'FeeAmount')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': addressTo,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
# self method required login, password and twofa key
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
raise AuthenticationError(self.id + ' call signIn() method to obtain a session token')
self.check_address(address)
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
withdrawTemplateTypesRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
}
withdrawTemplateTypesResponse = self.privateGetGetWithdrawTemplateTypes(withdrawTemplateTypesRequest)
#
# {
# result: True,
# errormsg: null,
# statuscode: "0",
# TemplateTypes: [
# {AccountProviderId: "14", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "BitgoRPC-BTC"},
# {AccountProviderId: "20", TemplateName: "ToExternalBitcoinAddress", AccountProviderName: "TrezorBTC"},
# {AccountProviderId: "31", TemplateName: "BTC", AccountProviderName: "BTC Fireblocks 1"}
# ]
# }
#
templateTypes = self.safe_value(withdrawTemplateTypesResponse, 'TemplateTypes', [])
firstTemplateType = self.safe_value(templateTypes, 0)
if firstTemplateType is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template type for ' + currency['code'])
templateName = self.safe_string(firstTemplateType, 'TemplateName')
withdrawTemplateRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateType': templateName,
'AccountProviderId': firstTemplateType['AccountProviderId'],
}
withdrawTemplateResponse = self.privateGetGetWithdrawTemplate(withdrawTemplateRequest)
#
# {
# result: True,
# errormsg: null,
# statuscode: "0",
# Template: "{\"TemplateType\":\"ToExternalBitcoinAddress\",\"Comment\":\"\",\"ExternalAddress\":\"\"}"
# }
#
template = self.safe_string(withdrawTemplateResponse, 'Template')
if template is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template for ' + currency['code'])
withdrawTemplate = json.loads(template)
withdrawTemplate['ExternalAddress'] = address
if tag is not None:
if 'Memo' in withdrawTemplate:
withdrawTemplate['Memo'] = tag
withdrawPayload = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateForm': self.json(withdrawTemplate),
'TemplateType': templateName,
}
withdrawRequest = {
'TfaType': 'Google',
'TFaCode': self.oath(),
'Payload': self.json(withdrawPayload),
}
response = self.privatePostCreateWithdrawTicket(self.deep_extend(withdrawRequest, params))
return {
'info': response,
'id': self.safe_string(response, 'Id'),
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if path == 'Authenticate':
auth = self.login + ':' + self.password
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
# 'Content-Type': 'application/json',
}
elif path == 'Authenticate2FA':
pending2faToken = self.safe_string(self.options, 'pending2faToken')
if pending2faToken is not None:
headers = {
'Pending2FaToken': pending2faToken,
# 'Content-Type': 'application/json',
}
query = self.omit(query, 'pending2faToken')
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'Nonce': nonce,
'APIKey': self.apiKey,
'Signature': signature,
'UserId': self.uid,
}
else:
headers = {
'APToken': sessionToken,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 404:
raise AuthenticationError(self.id + ' ' + body)
if response is None:
return
#
# {"status":"Rejected","errormsg":"Not_Enough_Funds","errorcode":101}
# {"result":false,"errormsg":"Server Error","errorcode":102,"detail":null}
#
message = self.safe_string(response, 'errormsg')
if (message is not None) and (message != ''):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback)
| 44.503601 | 218 | 0.51199 |
ge import Exchange
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class ndax(Exchange):
def describe(self):
return self.deep_extend(super(ndax, self).describe(), {
'id': 'ndax',
'name': 'NDAX',
'countries': ['US'],
'rateLimit': 1000,
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'createDepositAddress': True,
'createOrder': True,
'editOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'signIn': True,
},
'timeframes': {
'1m': '60',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2419200',
'4M': '9676800',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/108623144-67a3ef00-744e-11eb-8140-75c6b851e945.jpg',
'test': {
'public': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
'private': 'https://ndaxmarginstaging.cdnhop.net:8443/AP',
},
'api': {
'public': 'https://api.ndax.io:8443/AP',
'private': 'https://api.ndax.io:8443/AP',
},
'www': 'https://ndax.io',
'doc': [
'https://apidoc.ndax.io/',
],
'fees': 'https://ndax.io/fees',
'referral': 'https://one.ndax.io/bfQiSL',
},
'api': {
'public': {
'get': [
'Activate2FA',
'Authenticate2FA',
'AuthenticateUser',
'GetL2Snapshot',
'GetLevel1',
'GetValidate2FARequiredEndpoints',
'LogOut',
'GetTickerHistory',
'GetProduct',
'GetProducts',
'GetInstrument',
'GetInstruments',
'Ping',
'trades',
'GetLastTrades',
'SubscribeLevel1',
'SubscribeLevel2',
'SubscribeTicker',
'SubscribeTrades',
'SubscribeBlockTrades',
'UnsubscribeBlockTrades',
'UnsubscribeLevel1',
'UnsubscribeLevel2',
'UnsubscribeTicker',
'UnsubscribeTrades',
'Authenticate',
],
},
'private': {
'get': [
'GetUserAccountInfos',
'GetUserAccounts',
'GetUserAffiliateCount',
'GetUserAffiliateTag',
'GetUserConfig',
'GetAllUnredactedUserConfigsForUser',
'GetUnredactedUserConfigByKey',
'GetUserDevices',
'GetUserReportTickets',
'GetUserReportWriterResultRecords',
'GetAccountInfo',
'GetAccountPositions',
'GetAllAccountConfigs',
'GetTreasuryProductsForAccount',
'GetAccountTrades',
'GetAccountTransactions',
'GetOpenTradeReports',
'GetAllOpenTradeReports',
'GetTradesHistory',
'GetOpenOrders',
'GetOpenQuotes',
'GetOrderFee',
'GetOrderHistory',
'GetOrdersHistory',
'GetOrderStatus',
'GetOmsFeeTiers',
'GetAccountDepositTransactions',
'GetAccountWithdrawTransactions',
'GetAllDepositRequestInfoTemplates',
'GetDepositInfo',
'GetDepositRequestInfoTemplate',
'GetDeposits',
'GetDepositTicket',
'GetDepositTickets',
'GetOMSWithdrawFees',
'GetWithdrawFee',
'GetWithdraws',
'GetWithdrawTemplate',
'GetWithdrawTemplateTypes',
'GetWithdrawTicket',
'GetWithdrawTickets',
],
'post': [
'AddUserAffiliateTag',
'CancelUserReport',
'RegisterNewDevice',
'SubscribeAccountEvents',
'UpdateUserAffiliateTag',
'GenerateTradeActivityReport',
'GenerateTransactionActivityReport',
'GenerateTreasuryActivityReport',
'ScheduleTradeActivityReport',
'ScheduleTransactionActivityReport',
'ScheduleTreasuryActivityReport',
'CancelAllOrders',
'CancelOrder',
'CancelQuote',
'CancelReplaceOrder',
'CreateQuote',
'ModifyOrder',
'SendOrder',
'SubmitBlockTrade',
'UpdateQuote',
'CancelWithdraw',
'CreateDepositTicket',
'CreateWithdrawTicket',
'SubmitDepositTicketComment',
'SubmitWithdrawTicketComment',
'GetOrderHistoryByOrderId',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100,
'taker': 0.25 / 100,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'uid': True,
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'Not_Enough_Funds': InsufficientFunds,
'Server Error': ExchangeError,
'Resource Not Found': OrderNotFound,
},
'broad': {
'Invalid InstrumentId': BadSymbol,
'This endpoint requires 2FACode along with the payload': AuthenticationError,
},
},
'options': {
'omsId': 1,
'orderTypes': {
'Market': 1,
'Limit': 2,
'StopMarket': 3,
'StopLimit': 4,
'TrailingStopMarket': 5,
'TrailingStopLimit': 6,
'BlockTrade': 7,
},
},
})
def sign_in(self, params={}):
self.check_required_credentials()
if self.login is None or self.password is None or self.twofa is None:
raise AuthenticationError(self.id + ' signIn() requires exchange.login, exchange.password and exchange.twofa credentials')
request = {
'grant_type': 'client_credentials',
}
response = self.publicGetAuthenticate(self.extend(request, params))
sessionToken = self.safe_string(response, 'SessionToken')
if sessionToken is not None:
self.options['sessionToken'] = sessionToken
return response
pending2faToken = self.safe_string(response, 'Pending2FaToken')
if pending2faToken is not None:
self.options['pending2faToken'] = pending2faToken
request = {
'Code': self.oath(),
}
response = self.publicGetAuthenticate2FA(self.extend(request, params))
sessionToken = self.safe_string(response, 'SessionToken')
self.options['sessionToken'] = sessionToken
return response
return response
def fetch_currencies(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetProducts(self.extend(request, params))
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'ProductId')
name = self.safe_string(currency, 'ProductFullName')
type = self.safe_string(currency, 'ProductType')
code = self.safe_currency_code(self.safe_string(currency, 'Product'))
precision = self.safe_number(currency, 'TickSize')
isDisabled = self.safe_value(currency, 'IsDisabled')
active = not isDisabled
result[code] = {
'id': id,
'name': name,
'code': code,
'type': type,
'precision': precision,
'info': currency,
'active': active,
'fee': None,
'limits': self.limits,
}
return result
def fetch_markets(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
request = {
'omsId': omsId,
}
response = self.publicGetGetInstruments(self.extend(request, params))
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'InstrumentId')
baseId = self.safe_string(market, 'Product1')
quoteId = self.safe_string(market, 'Product2')
base = self.safe_currency_code(self.safe_string(market, 'Product1Symbol'))
quote = self.safe_currency_code(self.safe_string(market, 'Product2Symbol'))
symbol = base + '/' + quote
precision = {
'amount': self.safe_number(market, 'QuantityIncrement'),
'price': self.safe_number(market, 'PriceIncrement'),
}
sessionStatus = self.safe_string(market, 'SessionStatus')
isDisable = self.safe_value(market, 'IsDisable')
sessionRunning = (sessionStatus == 'Running')
active = True if (sessionRunning and not isDisable) else False
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_number(market, 'MinimumQuantity'),
'max': None,
},
'price': {
'min': self.safe_number(market, 'MinimumPrice'),
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='bids', asksKey='asks', priceKey=6, amountKey=8):
nonce = None
result = {
'symbol': symbol,
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(orderbook)):
level = orderbook[i]
if timestamp is None:
timestamp = self.safe_integer(level, 2)
else:
newTimestamp = self.safe_integer(level, 2)
timestamp = max(timestamp, newTimestamp)
if nonce is None:
nonce = self.safe_integer(level, 0)
else:
newNonce = self.safe_integer(level, 0)
nonce = max(nonce, newNonce)
bidask = self.parse_bid_ask(level, priceKey, amountKey)
levelSide = self.safe_integer(level, 9)
side = asksKey if levelSide else bidsKey
result[side].append(bidask)
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
result['nonce'] = nonce
return result
def fetch_order_book(self, symbol, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
limit = 100 if (limit is None) else limit
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Depth': limit,
}
response = self.publicGetGetL2Snapshot(self.extend(request, params))
bol)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'TimeStamp')
marketId = self.safe_string(ticker, 'InstrumentId')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'LastTradedPx')
percentage = self.safe_number(ticker, 'Rolling24HrPxChangePercent')
change = self.safe_number(ticker, 'Rolling24HrPxChange')
open = self.safe_number(ticker, 'SessionOpen')
average = None
if (last is not None) and (change is not None):
average = self.sum(last, open) / 2
baseVolume = self.safe_number(ticker, 'Rolling24HrVolume')
quoteVolume = self.safe_number(ticker, 'Rolling24HrNotional')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'SessionHigh'),
'low': self.safe_number(ticker, 'SessionLow'),
'bid': self.safe_number(ticker, 'BestBid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'BestOffer'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_ticker(self, symbol, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
response = self.publicGetGetLevel1(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_ohlcv(self, ohlcv, market=None):
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
'Interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.milliseconds()
if since is None:
if limit is not None:
request['FromDate'] = self.ymdhms(now - duration * limit * 1000)
request['ToDate'] = self.ymdhms(now)
else:
request['FromDate'] = self.ymdhms(since)
if limit is None:
request['ToDate'] = self.ymdhms(now)
else:
request['ToDate'] = self.ymdhms(self.sum(since, duration * limit * 1000))
response = self.publicGetGetTickerHistory(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
priceString = None
amountString = None
cost = None
timestamp = None
id = None
marketId = None
side = None
orderId = None
takerOrMaker = None
fee = None
type = None
if isinstance(trade, list):
priceString = self.safe_string(trade, 3)
amountString = self.safe_string(trade, 2)
timestamp = self.safe_integer(trade, 6)
id = self.safe_string(trade, 0)
marketId = self.safe_string(trade, 1)
takerSide = self.safe_value(trade, 8)
side = 'sell' if takerSide else 'buy'
orderId = self.safe_string(trade, 4)
else:
timestamp = self.safe_integer_2(trade, 'TradeTimeMS', 'ReceiveTime')
id = self.safe_string(trade, 'TradeId')
orderId = self.safe_string_2(trade, 'OrderId', 'OrigOrderId')
marketId = self.safe_string_2(trade, 'InstrumentId', 'Instrument')
priceString = self.safe_string(trade, 'Price')
amountString = self.safe_string(trade, 'Quantity')
cost = self.safe_number_2(trade, 'Value', 'GrossValueExecuted')
takerOrMaker = self.safe_string_lower(trade, 'MakerTaker')
side = self.safe_string_lower(trade, 'Side')
type = self.safe_string_lower(trade, 'OrderType')
feeCost = self.safe_number(trade, 'Fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'FeeProductId')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
if cost is None:
cost = self.parse_number(Precise.string_mul(priceString, amountString))
symbol = self.safe_symbol(marketId, market)
return {
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
market = self.market(symbol)
request = {
'omsId': omsId,
'InstrumentId': market['id'],
}
if limit is not None:
request['Count'] = limit
response = self.publicGetGetLastTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_accounts(self, params={}):
if not self.login:
raise AuthenticationError(self.id + ' fetchAccounts() requires exchange.login email credential')
omsId = self.safe_integer(self.options, 'omsId', 1)
self.check_required_credentials()
request = {
'omsId': omsId,
'UserId': self.uid,
'UserName': self.login,
}
response = self.privateGetGetUserAccounts(self.extend(request, params))
for i in range(0, len(response)):
accountId = self.safe_string(response, i)
result.append({
'id': accountId,
'type': None,
'currency': None,
'info': accountId,
})
return result
def fetch_balance(self, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetAccountPositions(self.extend(request, params))
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'ProductId')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'Amount')
account['used'] = self.safe_string(balance, 'Hold')
result[code] = account
return self.parse_balance(result)
def parse_ledger_entry_type(self, type):
types = {
'Trade': 'trade',
'Deposit': 'transaction',
'Withdraw': 'transaction',
'Transfer': 'transfer',
'OrderHold': 'trade',
'WithdrawHold': 'transaction',
'DepositHold': 'transaction',
'MarginHold': 'trade',
'ManualHold': 'trade',
'ManualEntry': 'trade',
'MarginAcquisition': 'trade',
'MarginRelinquish': 'trade',
'MarginQuoteHold': 'trade',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
id = self.safe_string(item, 'TransactionId')
account = self.safe_string(item, 'AccountId')
referenceId = self.safe_string(item, 'ReferenceId')
referenceAccount = self.safe_string(item, 'Counterparty')
type = self.parse_ledger_entry_type(self.safe_string(item, 'ReferenceType'))
currencyId = self.safe_string(item, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
credit = self.safe_number(item, 'CR')
debit = self.safe_number(item, 'DR')
amount = None
direction = None
if credit > 0:
amount = credit
direction = 'in'
elif debit > 0:
amount = debit
direction = 'out'
timestamp = self.safe_integer(item, 'TimeStamp')
before = None
after = self.safe_number(item, 'Balance')
if direction == 'out':
before = self.sum(after, amount)
elif direction == 'in':
before = max(0, after - amount)
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': None,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetAccountTransactions(self.extend(request, params))
currency = None
if code is not None:
currency = self.currency(code)
return self.parse_ledger(response, currency, since, limit)
def parse_order_status(self, status):
statuses = {
'Accepted': 'open',
'Rejected': 'rejected',
'Working': 'open',
'Canceled': 'canceled',
'Expired': 'expired',
'FullyExecuted': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string_2(order, 'ReplacementOrderId', 'OrderId')
timestamp = self.safe_integer(order, 'ReceiveTime')
lastTradeTimestamp = self.safe_integer(order, 'LastUpdatedTime')
marketId = self.safe_string(order, 'Instrument')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string_lower(order, 'Side')
type = self.safe_string_lower(order, 'OrderType')
clientOrderId = self.safe_string_2(order, 'ReplacementClOrdId', 'ClientOrderId')
price = self.safe_number(order, 'Price', 0.0)
price = price if (price > 0.0) else None
amount = self.safe_number(order, 'OrigQuantity')
filled = self.safe_number(order, 'QuantityExecuted')
cost = self.safe_number(order, 'GrossValueExecuted')
average = self.safe_number(order, 'AvgPrice', 0.0)
average = average if (average > 0) else None
stopPrice = self.safe_number(order, 'StopPrice', 0.0)
stopPrice = stopPrice if (stopPrice > 0.0) else None
timeInForce = None
status = self.parse_order_status(self.safe_string(order, 'OrderState'))
fee = None
trades = None
return self.safe_order({
'id': id,
'clientOrderId': clientOrderId,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': None,
'fee': fee,
'trades': trades,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1,
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)),
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostSendOrder(self.extend(request, params))
return self.parse_order(response, market)
def edit_order(self, id, symbol, type, side, amount, price=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
clientOrderId = self.safe_integer_2(params, 'ClientOrderId', 'clientOrderId')
params = self.omit(params, ['accountId', 'AccountId', 'clientOrderId', 'ClientOrderId'])
market = self.market(symbol)
orderSide = 0 if (side == 'buy') else 1
request = {
'OrderIdToReplace': int(id),
'InstrumentId': int(market['id']),
'omsId': omsId,
'AccountId': accountId,
'TimeInForce': 1,
'OrderType': self.safe_integer(self.options['orderTypes'], self.capitalize(type)),
if price is not None:
request['LimitPrice'] = float(self.price_to_precision(symbol, price))
if clientOrderId is not None:
request['ClientOrderId'] = clientOrderId
response = self.privatePostCancelReplaceOrder(self.extend(request, params))
return self.parse_order(response, market)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
quest['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetTradesHistory(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def cancel_all_orders(self, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
if symbol is not None:
market = self.market(symbol)
request['IntrumentId'] = market['id']
response = self.privatePostCancelAllOrders(self.extend(request, params))
return response
def cancel_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
}
clientOrderId = self.safe_integer_2(params, 'clientOrderId', 'ClOrderId')
if clientOrderId is not None:
request['ClOrderId'] = clientOrderId
else:
request['OrderId'] = int(id)
params = self.omit(params, ['clientOrderId', 'ClOrderId'])
response = self.privatePostCancelOrder(self.extend(request, params))
order = self.parse_order(response, market)
return self.extend(order, {
'id': id,
'clientOrderId': clientOrderId,
})
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetOpenOrders(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
request = {
'omsId': omsId,
'AccountId': accountId,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['InstrumentId'] = market['id']
if since is not None:
request['StartTimeStamp'] = int(since / 1000)
if limit is not None:
request['Depth'] = limit
response = self.privateGetGetOrdersHistory(self.extend(request, params))
return self.parse_orders(response, market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'omsId': omsId,
'AccountId': accountId,
'OrderId': int(id),
}
response = self.privateGetGetOrderStatus(self.extend(request, params))
return self.parse_order(response, market)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
market = None
if symbol is not None:
market = self.market(symbol)
request = {
'OMSId': int(omsId),
'OrderId': int(id),
}
response = self.privatePostGetOrderHistoryByOrderId(self.extend(request, params))
grouped = self.group_by(response, 'ChangeReason')
trades = self.safe_value(grouped, 'Trade', [])
return self.parse_trades(trades, market, since, limit)
def fetch_deposit_address(self, code, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'GenerateNewKey': False,
}
response = self.privateGetGetDepositInfo(self.extend(request, params))
return self.parse_deposit_address(response, currency)
def parse_deposit_address(self, depositAddress, currency=None):
depositInfoString = self.safe_string(depositAddress, 'DepositInfo')
depositInfo = json.loads(depositInfoString)
depositInfoLength = len(depositInfo)
lastString = self.safe_string(depositInfo, depositInfoLength - 1)
parts = lastString.split('?memo=')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
code = None
if currency is not None:
code = currency['code']
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def create_deposit_address(self, code, params={}):
request = {
'GenerateNewKey': True,
}
return self.fetch_deposit_address(code, self.extend(request, params))
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetDeposits(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = None
if code is not None:
currency = self.currency(code)
request = {
'omsId': omsId,
'AccountId': accountId,
}
response = self.privateGetGetWithdraws(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
statusesByType = {
'deposit': {
'New': 'pending',
'AdminProcessing': 'pending',
'Accepted': 'pending',
'Rejected': 'rejected',
'SystemProcessing': 'pending',
'FullyProcessed': 'ok',
'Failed': 'failed',
'Pending': 'pending',
'Confirmed': 'pending',
'AmlProcessing': 'pending',
'AmlAccepted': 'pending',
'AmlRejected': 'rejected',
'AmlFailed': 'failed',
'LimitsAccepted': 'pending',
'LimitsRejected': 'rejected',
},
'withdrawal': {
'New': 'pending',
'AdminProcessing': 'pending',
'Accepted': 'pending',
'Rejected': 'rejected',
'SystemProcessing': 'pending',
'FullyProcessed': 'ok',
'Failed': 'failed',
'Pending': 'pending',
'Pending2Fa': 'pending',
'AutoAccepted': 'pending',
'Delayed': 'pending',
'UserCanceled': 'canceled',
'AdminCanceled': 'canceled',
'AmlProcessing': 'pending',
'AmlAccepted': 'pending',
'AmlRejected': 'rejected',
'AmlFailed': 'failed',
'LimitsAccepted': 'pending',
'LimitsRejected': 'rejected',
'Submitted': 'pending',
'Confirmed': 'pending',
'ManuallyConfirmed': 'pending',
'Confirmed2Fa': 'pending',
},
}
statuses = self.safe_value(statusesByType, type, {})
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
id = self.safe_string(transaction, 'DepositId')
txid = None
currencyId = self.safe_string(transaction, 'ProductId')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
type = None
if 'DepositId' in transaction:
type = 'deposit'
elif 'WithdrawId' in transaction:
type = 'withdrawal'
templateFormString = self.safe_string(transaction, 'TemplateForm')
address = None
updated = self.safe_integer(transaction, 'LastUpdateTimeStamp')
if templateFormString is not None:
templateForm = json.loads(templateFormString)
address = self.safe_string(templateForm, 'ExternalAddress')
txid = self.safe_string(templateForm, 'TxId')
timestamp = self.safe_integer(templateForm, 'TimeSubmitted')
updated = self.safe_integer(templateForm, 'LastUpdated', updated)
addressTo = address
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'TicketStatus'), type)
amount = self.safe_number(transaction, 'Amount')
feeCost = self.safe_number(transaction, 'FeeAmount')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': addressTo,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
raise AuthenticationError(self.id + ' call signIn() method to obtain a session token')
self.check_address(address)
omsId = self.safe_integer(self.options, 'omsId', 1)
self.load_markets()
self.load_accounts()
defaultAccountId = self.safe_integer_2(self.options, 'accountId', 'AccountId', int(self.accounts[0]['id']))
accountId = self.safe_integer_2(params, 'accountId', 'AccountId', defaultAccountId)
params = self.omit(params, ['accountId', 'AccountId'])
currency = self.currency(code)
withdrawTemplateTypesRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
}
withdrawTemplateTypesResponse = self.privateGetGetWithdrawTemplateTypes(withdrawTemplateTypesRequest)
templateTypes = self.safe_value(withdrawTemplateTypesResponse, 'TemplateTypes', [])
firstTemplateType = self.safe_value(templateTypes, 0)
if firstTemplateType is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template type for ' + currency['code'])
templateName = self.safe_string(firstTemplateType, 'TemplateName')
withdrawTemplateRequest = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateType': templateName,
'AccountProviderId': firstTemplateType['AccountProviderId'],
}
withdrawTemplateResponse = self.privateGetGetWithdrawTemplate(withdrawTemplateRequest)
template = self.safe_string(withdrawTemplateResponse, 'Template')
if template is None:
raise ExchangeError(self.id + ' withdraw() could not find a withdraw template for ' + currency['code'])
withdrawTemplate = json.loads(template)
withdrawTemplate['ExternalAddress'] = address
if tag is not None:
if 'Memo' in withdrawTemplate:
withdrawTemplate['Memo'] = tag
withdrawPayload = {
'omsId': omsId,
'AccountId': accountId,
'ProductId': currency['id'],
'TemplateForm': self.json(withdrawTemplate),
'TemplateType': templateName,
}
withdrawRequest = {
'TfaType': 'Google',
'TFaCode': self.oath(),
'Payload': self.json(withdrawPayload),
}
response = self.privatePostCreateWithdrawTicket(self.deep_extend(withdrawRequest, params))
return {
'info': response,
'id': self.safe_string(response, 'Id'),
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if path == 'Authenticate':
auth = self.login + ':' + self.password
auth64 = self.string_to_base64(auth)
headers = {
'Authorization': 'Basic ' + self.decode(auth64),
}
elif path == 'Authenticate2FA':
pending2faToken = self.safe_string(self.options, 'pending2faToken')
if pending2faToken is not None:
headers = {
'Pending2FaToken': pending2faToken,
}
query = self.omit(query, 'pending2faToken')
if query:
url += '?' + self.urlencode(query)
elif api == 'private':
self.check_required_credentials()
sessionToken = self.safe_string(self.options, 'sessionToken')
if sessionToken is None:
nonce = str(self.nonce())
auth = nonce + self.uid + self.apiKey
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'Nonce': nonce,
'APIKey': self.apiKey,
'Signature': signature,
'UserId': self.uid,
}
else:
headers = {
'APToken': sessionToken,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
body = self.json(query)
else:
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if code == 404:
raise AuthenticationError(self.id + ' ' + body)
if response is None:
return
message = self.safe_string(response, 'errormsg')
if (message is not None) and (message != ''):
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback)
| true | true |
f714530361b2ac4050c9c8318d7b66818f4b64c0 | 546 | py | Python | symposion/conference/admin.py | priyanshuraj7829/symposion | 6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f | [
"BSD-3-Clause"
] | 147 | 2015-01-13T11:24:12.000Z | 2022-03-20T20:31:52.000Z | symposion/conference/admin.py | priyanshuraj7829/symposion | 6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f | [
"BSD-3-Clause"
] | 758 | 2015-03-18T13:39:25.000Z | 2022-03-31T13:14:09.000Z | symposion/conference/admin.py | priyanshuraj7829/symposion | 6b522f1f798d53cf0a481ecbac002dc4d0b5ab2f | [
"BSD-3-Clause"
] | 83 | 2015-01-16T04:46:54.000Z | 2020-10-02T07:45:48.000Z | from django.contrib import admin
from symposion.conference.models import Conference, Section
class SectionInline(admin.TabularInline):
model = Section
prepopulated_fields = {"slug": ("name",)}
extra = 1
class ConferenceAdmin(admin.ModelAdmin):
list_display = ("title", "start_date", "end_date")
inlines = [SectionInline, ]
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(
Section,
prepopulated_fields={"slug": ("name",)},
list_display=("name", "conference", "start_date", "end_date")
)
| 23.73913 | 65 | 0.710623 | from django.contrib import admin
from symposion.conference.models import Conference, Section
class SectionInline(admin.TabularInline):
model = Section
prepopulated_fields = {"slug": ("name",)}
extra = 1
class ConferenceAdmin(admin.ModelAdmin):
list_display = ("title", "start_date", "end_date")
inlines = [SectionInline, ]
admin.site.register(Conference, ConferenceAdmin)
admin.site.register(
Section,
prepopulated_fields={"slug": ("name",)},
list_display=("name", "conference", "start_date", "end_date")
)
| true | true |
f714542afbe3ce6340bb9e918a90bcd27446491c | 667 | py | Python | manage.py | mukhametdinovigor/where_to_go | 7374807a6b9bde3b0d2ec03f99f4c73718f7e63c | [
"MIT"
] | null | null | null | manage.py | mukhametdinovigor/where_to_go | 7374807a6b9bde3b0d2ec03f99f4c73718f7e63c | [
"MIT"
] | 2 | 2022-01-13T03:53:40.000Z | 2022-03-12T01:00:24.000Z | manage.py | mukhametdinovigor/where_to_go | 7374807a6b9bde3b0d2ec03f99f4c73718f7e63c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'where_to_go.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29 | 75 | 0.68066 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'where_to_go.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f714546188fa61ae283a9f70bf3b94a702bd550e | 238 | py | Python | 2nd_minimum_no_list.py | Akshara2820/Python_Folder | 06782f88b45f907a4836e073c51f603bb19f9aa9 | [
"MIT"
] | null | null | null | 2nd_minimum_no_list.py | Akshara2820/Python_Folder | 06782f88b45f907a4836e073c51f603bb19f9aa9 | [
"MIT"
] | null | null | null | 2nd_minimum_no_list.py | Akshara2820/Python_Folder | 06782f88b45f907a4836e073c51f603bb19f9aa9 | [
"MIT"
] | null | null | null | num=[50,40,23,70,56,100,18,]
l=len(num)
a=0
mini1=num[a]
i=0
x=num
while i<l:
if x[i]<=mini1:
mini1=x[i]
i+=1
y=0
mini2=num[y]
a=0
c=num
m=0
while m<l:
if mini2>num[m]>mini1:
mini2=num[m]
m+=1
print(mini2)
| 11.333333 | 28 | 0.546218 | num=[50,40,23,70,56,100,18,]
l=len(num)
a=0
mini1=num[a]
i=0
x=num
while i<l:
if x[i]<=mini1:
mini1=x[i]
i+=1
y=0
mini2=num[y]
a=0
c=num
m=0
while m<l:
if mini2>num[m]>mini1:
mini2=num[m]
m+=1
print(mini2)
| true | true |
f71454f89dd5ee3d75234b5625c7636f0d8d8344 | 99 | py | Python | vecino/__init__.py | sniperkit/snk.fork.vecino | a140171795e68fb7c9e26a72a585bd6aeb4e35a9 | [
"Apache-2.0"
] | null | null | null | vecino/__init__.py | sniperkit/snk.fork.vecino | a140171795e68fb7c9e26a72a585bd6aeb4e35a9 | [
"Apache-2.0"
] | null | null | null | vecino/__init__.py | sniperkit/snk.fork.vecino | a140171795e68fb7c9e26a72a585bd6aeb4e35a9 | [
"Apache-2.0"
] | null | null | null | from vecino.similar_repositories import SimilarRepositories
from vecino.__main__ import initialize
| 33 | 59 | 0.89899 | from vecino.similar_repositories import SimilarRepositories
from vecino.__main__ import initialize
| true | true |
f7145511c4c4a602dc7d916f5a9d093870f5b3f0 | 40,188 | py | Python | research/object_detection/eval_util.py | slomrafgrav/models | daa6c0415e47bdc52ad6434dc2bdb5d8aeb4f7ce | [
"Apache-2.0"
] | 79 | 2019-03-02T17:40:25.000Z | 2021-08-17T13:22:03.000Z | research/object_detection/eval_util.py | ywy0318/models | 91a59c78e8c48e8a1b2fec37143e52dae3f066c1 | [
"Apache-2.0"
] | 8 | 2019-05-14T10:10:50.000Z | 2020-12-20T14:05:29.000Z | research/object_detection/eval_util.py | ywy0318/models | 91a59c78e8c48e8a1b2fec37143e52dae3f066c1 | [
"Apache-2.0"
] | 27 | 2019-02-04T01:45:48.000Z | 2021-03-18T02:39:28.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utility functions for evaluation."""
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] float32 tensor of
binarized masks, reframed to full image masks.
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
label_id_offset = 1 # Applying label id offset (b/63711816)
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# TODO(rathodv): This should be done in model's postprocess
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
| 44.259912 | 80 | 0.700956 |
import collections
import os
import time
import numpy as np
import tensorflow as tf
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
slim = tf.contrib.slim
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1:
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with tf.contrib.slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
sess.close()
return (global_step, all_evaluator_metrics)
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None):
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`number_of_steps` must be either None or a positive number.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path)
write_metrics(metrics, global_step, summary_dir)
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(args):
detection_boxes, detection_masks, image_shape = args
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
return tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
def _resize_groundtruth_masks(args):
mask, image_shape = args
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(mask, 3), tf.uint8)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None):
label_id_offset = 1
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.to_int32(detections[detection_fields.num_detections])
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
# function ideally.
output_dict[detection_fields.detection_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes, detection_masks,
original_image_spatial_shapes],
dtype=tf.uint8))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, original_image_spatial_shapes],
dtype=tf.uint8))
output_dict.update(groundtruth)
if scale_to_absolute:
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in ('coco_detection_metrics', 'coco_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
return evaluator_options
| true | true |
f714557129b17004b2cb69261547461b88d0c20a | 227 | py | Python | rank_reset.py | AmanMulani/python_web_crawling | a36115db6548b98c2c66868a14ce752449f4f7d1 | [
"MIT"
] | null | null | null | rank_reset.py | AmanMulani/python_web_crawling | a36115db6548b98c2c66868a14ce752449f4f7d1 | [
"MIT"
] | null | null | null | rank_reset.py | AmanMulani/python_web_crawling | a36115db6548b98c2c66868a14ce752449f4f7d1 | [
"MIT"
] | null | null | null | import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('''
UPDATE Pages SET new_rank = 1.0, old_rank = 0.0
''')
conn.commit()
cur.close()
print('The rank of all pages has been set to 1.0') | 18.916667 | 51 | 0.678414 | import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('''
UPDATE Pages SET new_rank = 1.0, old_rank = 0.0
''')
conn.commit()
cur.close()
print('The rank of all pages has been set to 1.0') | true | true |
f71455c4fdf2d638a601f379ab38dd4ba96daa46 | 1,474 | py | Python | PythonClient/cv_mode.py | jelaredulla/thesis | dc348652cc0bd0a35e5d7506144d641510c2483b | [
"MIT"
] | null | null | null | PythonClient/cv_mode.py | jelaredulla/thesis | dc348652cc0bd0a35e5d7506144d641510c2483b | [
"MIT"
] | null | null | null | PythonClient/cv_mode.py | jelaredulla/thesis | dc348652cc0bd0a35e5d7506144d641510c2483b | [
"MIT"
] | null | null | null | # In settings.json first activate computer vision mode:
# https://github.com/Microsoft/AirSim/blob/master/docs/image_apis.md#computer-vision-mode
from AirSimClient import *
import pprint
pp = pprint.PrettyPrinter(indent=4)
client = CarClient()
client.confirmConnection()
for x in range(3): # do few times
z = x * -20 - 5 # some random number
client.simSetPose(Pose(Vector3r(z, z, z), AirSimClientBase.toQuaternion(x / 3.0, 0, x / 3.0)), True)
responses = client.simGetImages([
ImageRequest(0, AirSimImageType.DepthVis),
ImageRequest(1, AirSimImageType.DepthPerspective, True),
ImageRequest(0, AirSimImageType.Segmentation),
ImageRequest(0, AirSimImageType.Scene),
ImageRequest(0, AirSimImageType.DisparityNormalized),
ImageRequest(0, AirSimImageType.SurfaceNormals)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.pfm'), AirSimClientBase.getPfmArray(response))
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.png'), response.image_data_uint8)
pose = client.simGetPose()
pp.pprint(pose)
time.sleep(3)
| 40.944444 | 147 | 0.687246 |
mport *
import pprint
pp = pprint.PrettyPrinter(indent=4)
client = CarClient()
client.confirmConnection()
for x in range(3):
z = x * -20 - 5
client.simSetPose(Pose(Vector3r(z, z, z), AirSimClientBase.toQuaternion(x / 3.0, 0, x / 3.0)), True)
responses = client.simGetImages([
ImageRequest(0, AirSimImageType.DepthVis),
ImageRequest(1, AirSimImageType.DepthPerspective, True),
ImageRequest(0, AirSimImageType.Segmentation),
ImageRequest(0, AirSimImageType.Scene),
ImageRequest(0, AirSimImageType.DisparityNormalized),
ImageRequest(0, AirSimImageType.SurfaceNormals)])
for i, response in enumerate(responses):
if response.pixels_as_float:
print("Type %d, size %d" % (response.image_type, len(response.image_data_float)))
AirSimClientBase.write_pfm(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.pfm'), AirSimClientBase.getPfmArray(response))
else:
print("Type %d, size %d" % (response.image_type, len(response.image_data_uint8)))
AirSimClientBase.write_file(os.path.normpath('/temp/cv_mode_' + str(x) + "_" + str(i) + '.png'), response.image_data_uint8)
pose = client.simGetPose()
pp.pprint(pose)
time.sleep(3)
| true | true |
f71455e0d19a2d1ec2ea85826d0070d6ac81fa73 | 5,361 | py | Python | corehq/messaging/tasks.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/messaging/tasks.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | corehq/messaging/tasks.py | dannyroberts/commcare-hq | 4b0b8ecbe851e46307d3a0e635d6d5d6e31c3598 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.sms import tasks as sms_tasks
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.form_processor.utils import should_use_sql_backend
from corehq.messaging.scheduling.tasks import delete_schedule_instances_for_cases
from corehq.messaging.scheduling.util import utcnow
from corehq.messaging.util import MessagingRuleProgressHelper, use_phone_entries
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from corehq.util.celery_utils import no_result_task
from corehq.util.datadog.utils import case_load_counter
from dimagi.utils.couch import CriticalSection
from django.conf import settings
from django.db.models import Q
from django.db import transaction
def get_sync_key(case_id):
return 'sync-case-for-messaging-%s' % case_id
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging(self, domain, case_id):
if REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
sync_case_for_messaging.apply_async([domain, case_id], countdown=60)
return
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging(domain, case_id)
except Exception as e:
self.retry(exc=e)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging_rule(self, domain, case_id, rule_id):
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging_rule(domain, case_id, rule_id)
except Exception as e:
self.retry(exc=e)
def _sync_case_for_messaging(domain, case_id):
try:
case = CaseAccessors(domain).get_case(case_id)
sms_tasks.clear_case_caches(case)
except CaseNotFound:
case = None
case_load_counter("messaging_sync", domain)()
if case is None or case.is_deleted:
sms_tasks.delete_phone_numbers_for_owners([case_id])
delete_schedule_instances_for_cases(domain, [case_id])
return
if use_phone_entries():
sms_tasks._sync_case_phone_number(case)
rules = AutomaticUpdateRule.by_domain_cached(case.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
for rule in rules_by_case_type.get(case.type, []):
rule.run_rule(case, utcnow())
def _get_cached_rule(domain, rule_id):
rules = AutomaticUpdateRule.by_domain_cached(domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules = [rule for rule in rules if rule.pk == rule_id]
if len(rules) != 1:
return None
return rules[0]
def _sync_case_for_messaging_rule(domain, case_id, rule_id):
case_load_counter("messaging_rule_sync", domain)()
case = CaseAccessors(domain).get_case(case_id)
rule = _get_cached_rule(domain, rule_id)
if rule:
rule.run_rule(case, utcnow())
MessagingRuleProgressHelper(rule_id).increment_current_case_count()
def initiate_messaging_rule_run(domain, rule_id):
MessagingRuleProgressHelper(rule_id).set_initial_progress()
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=True)
transaction.on_commit(lambda: run_messaging_rule.delay(domain, rule_id))
def get_case_ids_for_messaging_rule(domain, case_type):
if not should_use_sql_backend(domain):
return CaseAccessors(domain).get_case_ids_in_domain(case_type)
else:
return run_query_across_partitioned_databases(
CommCareCaseSQL,
Q(domain=domain, type=case_type, deleted=False),
values=['case_id']
)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE)
def set_rule_complete(rule_id):
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=False)
MessagingRuleProgressHelper(rule_id).set_rule_complete()
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_RULE_QUEUE, acks_late=True)
def run_messaging_rule(domain, rule_id):
rule = _get_cached_rule(domain, rule_id)
if not rule:
return
total_count = 0
progress_helper = MessagingRuleProgressHelper(rule_id)
for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type):
sync_case_for_messaging_rule.delay(domain, case_id, rule_id)
total_count += 1
if total_count % 1000 == 0:
progress_helper.set_total_case_count(total_count)
progress_helper.set_total_case_count(total_count)
# By putting this task last in the queue, the rule should be marked
# complete at about the time that the last tasks are finishing up.
# This beats saving the task results in the database and using a
# celery chord which would be more taxing on system resources.
set_rule_complete.delay(rule_id)
| 39.419118 | 102 | 0.76814 | from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.apps.data_interfaces.models import AutomaticUpdateRule
from corehq.apps.sms import tasks as sms_tasks
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.dbaccessors import CaseAccessors
from corehq.form_processor.models import CommCareCaseSQL
from corehq.form_processor.utils import should_use_sql_backend
from corehq.messaging.scheduling.tasks import delete_schedule_instances_for_cases
from corehq.messaging.scheduling.util import utcnow
from corehq.messaging.util import MessagingRuleProgressHelper, use_phone_entries
from corehq.sql_db.util import run_query_across_partitioned_databases
from corehq.toggles import REMINDERS_MIGRATION_IN_PROGRESS
from corehq.util.celery_utils import no_result_task
from corehq.util.datadog.utils import case_load_counter
from dimagi.utils.couch import CriticalSection
from django.conf import settings
from django.db.models import Q
from django.db import transaction
def get_sync_key(case_id):
return 'sync-case-for-messaging-%s' % case_id
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging(self, domain, case_id):
if REMINDERS_MIGRATION_IN_PROGRESS.enabled(domain):
sync_case_for_messaging.apply_async([domain, case_id], countdown=60)
return
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging(domain, case_id)
except Exception as e:
self.retry(exc=e)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE, acks_late=True,
default_retry_delay=5 * 60, max_retries=12, bind=True)
def sync_case_for_messaging_rule(self, domain, case_id, rule_id):
try:
with CriticalSection([get_sync_key(case_id)], timeout=5 * 60):
_sync_case_for_messaging_rule(domain, case_id, rule_id)
except Exception as e:
self.retry(exc=e)
def _sync_case_for_messaging(domain, case_id):
try:
case = CaseAccessors(domain).get_case(case_id)
sms_tasks.clear_case_caches(case)
except CaseNotFound:
case = None
case_load_counter("messaging_sync", domain)()
if case is None or case.is_deleted:
sms_tasks.delete_phone_numbers_for_owners([case_id])
delete_schedule_instances_for_cases(domain, [case_id])
return
if use_phone_entries():
sms_tasks._sync_case_phone_number(case)
rules = AutomaticUpdateRule.by_domain_cached(case.domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules_by_case_type = AutomaticUpdateRule.organize_rules_by_case_type(rules)
for rule in rules_by_case_type.get(case.type, []):
rule.run_rule(case, utcnow())
def _get_cached_rule(domain, rule_id):
rules = AutomaticUpdateRule.by_domain_cached(domain, AutomaticUpdateRule.WORKFLOW_SCHEDULING)
rules = [rule for rule in rules if rule.pk == rule_id]
if len(rules) != 1:
return None
return rules[0]
def _sync_case_for_messaging_rule(domain, case_id, rule_id):
case_load_counter("messaging_rule_sync", domain)()
case = CaseAccessors(domain).get_case(case_id)
rule = _get_cached_rule(domain, rule_id)
if rule:
rule.run_rule(case, utcnow())
MessagingRuleProgressHelper(rule_id).increment_current_case_count()
def initiate_messaging_rule_run(domain, rule_id):
MessagingRuleProgressHelper(rule_id).set_initial_progress()
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=True)
transaction.on_commit(lambda: run_messaging_rule.delay(domain, rule_id))
def get_case_ids_for_messaging_rule(domain, case_type):
if not should_use_sql_backend(domain):
return CaseAccessors(domain).get_case_ids_in_domain(case_type)
else:
return run_query_across_partitioned_databases(
CommCareCaseSQL,
Q(domain=domain, type=case_type, deleted=False),
values=['case_id']
)
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_CASE_UPDATE_QUEUE)
def set_rule_complete(rule_id):
AutomaticUpdateRule.objects.filter(pk=rule_id).update(locked_for_editing=False)
MessagingRuleProgressHelper(rule_id).set_rule_complete()
@no_result_task(serializer='pickle', queue=settings.CELERY_REMINDER_RULE_QUEUE, acks_late=True)
def run_messaging_rule(domain, rule_id):
rule = _get_cached_rule(domain, rule_id)
if not rule:
return
total_count = 0
progress_helper = MessagingRuleProgressHelper(rule_id)
for case_id in get_case_ids_for_messaging_rule(domain, rule.case_type):
sync_case_for_messaging_rule.delay(domain, case_id, rule_id)
total_count += 1
if total_count % 1000 == 0:
progress_helper.set_total_case_count(total_count)
progress_helper.set_total_case_count(total_count)
set_rule_complete.delay(rule_id)
| true | true |
f71456a563d2f1c851ebfeff59b72638c5277020 | 17,730 | py | Python | imageio_ffmpeg/_io.py | One-sixth/imageio-ffmpeg | 888dace44a2160395cd88c577d542fe820086aa0 | [
"BSD-2-Clause"
] | null | null | null | imageio_ffmpeg/_io.py | One-sixth/imageio-ffmpeg | 888dace44a2160395cd88c577d542fe820086aa0 | [
"BSD-2-Clause"
] | null | null | null | imageio_ffmpeg/_io.py | One-sixth/imageio-ffmpeg | 888dace44a2160395cd88c577d542fe820086aa0 | [
"BSD-2-Clause"
] | null | null | null | import sys
import time
import signal
import subprocess
from ._utils import get_ffmpeg_exe, logger
from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs
ISWIN = sys.platform.startswith("win")
exe = None
def _get_exe():
global exe
if exe is None:
exe = get_ffmpeg_exe()
return exe
def count_frames_and_secs(path):
"""
Get the number of frames and number of seconds for the given video
file. Note that this operation can be quite slow for large files.
Disclaimer: I've seen this produce different results from actually reading
the frames with older versions of ffmpeg (2.x). Therefore I cannot say
with 100% certainty that the returned values are always exact.
"""
# https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg
assert isinstance(path, str), "Video path must be a string"
cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"]
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=ISWIN)
except subprocess.CalledProcessError as err:
out = err.output.decode(errors="ignore")
raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out))
# Note that other than with the subprocess calls below, ffmpeg wont hang here.
# Worst case Python will stop/crash and ffmpeg will continue running until done.
nframes = nsecs = None
for line in reversed(out.splitlines()):
if line.startswith(b"frame="):
line = line.decode(errors="ignore")
i = line.find("frame=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nframes = int(s)
i = line.find("time=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nsecs = cvsecs(*s.split(":"))
return nframes, nsecs
raise RuntimeError("Could not get number of frames") # pragma: no cover
def read_frames(path, pix_fmt="rgb24", bpp=3, input_params=None, output_params=None):
"""
Create a generator to iterate over the frames in a video file.
It first yields a small metadata dictionary that contains:
* ffmpeg_version: the ffmpeg version is use (as a string).
* codec: a hint about the codec used to encode the video, e.g. "h264"
* source_size: the width and height of the encoded video frames
* size: the width and height of the frames that will be produced
* fps: the frames per second. Can be zero if it could not be detected.
* duration: duration in seconds. Can be zero if it could not be detected.
After that, it yields frames until the end of the video is reached. Each
frame is a bytes object.
This function makes no assumptions about the number of frames in
the data. For one because this is hard to predict exactly, but also
because it may depend on the provided output_params. If you want
to know the number of frames in a video file, use count_frames_and_secs().
It is also possible to estimate the number of frames from the fps and
duration, but note that even if both numbers are present, the resulting
value is not always correct.
Example:
gen = read_frames(path)
meta = gen.__next__()
for frame in gen:
print(len(frame))
Parameters:
path (str): the file to write to.
pix_fmt (str): the pixel format of the frames to be read.
The default is "rgb24" (frames are uint8 RGB images).
bpp (int): The number of bytes per pixel in the output frames.
This depends on the given pix_fmt. Default is 3 (RGB).
input_params (list): Additional ffmpeg input command line parameters.
output_params (list): Additional ffmpeg output command line parameters.
"""
# ----- Input args
assert isinstance(path, str), "Video path must be a string"
# Note: Dont check whether it exists. The source could be e.g. a camera.
pix_fmt = pix_fmt or "rgb24"
bpp = bpp or 3
input_params = input_params or []
output_params = output_params or []
assert isinstance(pix_fmt, str), "pix_fmt must be a string"
assert isinstance(bpp, int), "bpp must be an int"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
# ----- Prepare
pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"]
cmd = [_get_exe()]
cmd += input_params + ["-i", path]
cmd += pre_output_params + output_params + ["-"]
cmd = ' '.join(cmd)
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=ISWIN,
)
log_catcher = LogCatcher(p.stderr)
try:
# ----- Load meta data
# Wait for the log catcher to get the meta information
etime = time.time() + 10.0
while (not log_catcher.header) and time.time() < etime:
time.sleep(0.01)
# Check whether we have the information
if not log_catcher.header:
err2 = log_catcher.get_text(0.2)
fmt = "Could not load meta information\n=== stderr ===\n{}"
raise IOError(fmt.format(err2))
elif "No such file or directory" in log_catcher.header:
raise IOError("{} not found! Wrong path?".format(path))
meta = parse_ffmpeg_header(log_catcher.header)
yield meta
# ----- Read frames
w, h = meta["size"]
framesize = w * h * bpp
framenr = 0
while True:
framenr += 1
try:
bb = bytes()
while len(bb) < framesize:
extra_bytes = p.stdout.read(framesize - len(bb))
if not extra_bytes:
if len(bb) == 0:
return
else:
raise RuntimeError(
"End of file reached before full frame could be read."
)
bb += extra_bytes
yield bb
except Exception as err:
err1 = str(err)
err2 = log_catcher.get_text(0.4)
fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}"
raise RuntimeError(fmt.format(framenr, err1, err2))
finally:
# Generators are automatically closed when they get deleted,
# so this code is almost guaranteed to run.
if p.poll() is None:
# Ask ffmpeg to quit
try:
if True:
p.communicate(b"q")
else: # pragma: no cover
# I read somewhere that modern ffmpeg on Linux prefers a
# "ctrl-c", but tests so far suggests sending q is better.
p.send_signal(signal.SIGINT)
except Exception as err: # pragma: no cover
logger.warning("Error while attempting stop ffmpeg: " + str(err))
# Wait for it to stop
etime = time.time() + 1.5
while time.time() < etime and p.poll() is None:
time.sleep(0.01)
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
logger.warning("We had to kill ffmpeg to stop it.")
p.kill()
def write_frames(
path,
size,
pix_fmt_in="rgb24",
pix_fmt_out="yuv420p",
fps=16,
quality=5,
bitrate=None,
codec=None,
macro_block_size=16,
ffmpeg_log_level="warning",
ffmpeg_timeout=20.0,
input_params=None,
output_params=None,
):
"""
Create a generator to write frames (bytes objects) into a video file.
The frames are written by using the generator's `send()` method. Frames
can be anything that can be written to a file. Typically these are
bytes objects, but c-contiguous Numpy arrays also work.
Example:
gen = write_frames(path, size)
gen.send(None) # seed the generator
for frame in frames:
gen.send(frame)
gen.close() # don't forget this
Parameters:
path (str): the file to write to.
size (tuple): the width and height of the frames.
pix_fmt_in (str): the pixel format of incoming frames.
E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24".
pix_fmt_out (str): the pixel format to store frames. Default yuv420p".
fps (float): The frames per second. Default 16.
quality (float): A measure for quality between 0 and 10. Default 5.
Ignored if bitrate is given.
bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good.
codec (str): The codec. Default "libx264" (or "msmpeg4" for .wmv).
macro_block_size (int): You probably want to align the size of frames
to this value to avoid image resizing. Default 16. Can be set
to 1 to avoid block alignment, though this is not recommended.
ffmpeg_log_level (str): The ffmpeg logging level. Default "warning".
ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process
to finish. Value of 0 will wait forever. The time that ffmpeg needs
depends on CPU speed, compression, and frame size. Default 20.0.
input_params (list): Additional ffmpeg input command line parameters.
output_params (list): Additional ffmpeg output command line parameters.
"""
# ----- Input args
assert isinstance(path, str), "Video path must be a string"
# The pix_fmt_out yuv420p is the best for the outpur to work in
# QuickTime and most other players. These players only support
# the YUV planar color space with 4:2:0 chroma subsampling for
# H.264 video. Otherwise, depending on the source, ffmpeg may
# output to a pixel format that may be incompatible with these
# players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers
pix_fmt_in = pix_fmt_in or "rgb24"
pix_fmt_out = pix_fmt_out or "yuv420p"
fps = fps or 16
quality = quality or 5
# bitrate, codec, macro_block_size can all be None or ...
macro_block_size = macro_block_size or 16
ffmpeg_log_level = ffmpeg_log_level or "warning"
input_params = input_params or []
output_params = output_params or []
floatish = float, int
if isinstance(size, (tuple, list)):
assert len(size) == 2, "size must be a 2-tuple"
assert isinstance(size[0], int) and isinstance(
size[1], int
), "size must be ints"
sizestr = "{:d}x{:d}".format(*size)
# elif isinstance(size, str):
# assert "x" in size, "size as string must have format NxM"
# sizestr = size
else:
assert False, "size must be str or tuple"
assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str"
assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str"
assert isinstance(fps, floatish), "fps must be float"
assert isinstance(quality, floatish), "quality must be float"
assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive"
assert isinstance(macro_block_size, int), "macro_block_size must be int"
assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str"
assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
# ----- Prepare
# Get parameters
default_codec = "libx264"
if path.lower().endswith(".wmv"):
# This is a safer default codec on windows to get videos that
# will play in powerpoint and other apps. H264 is not always
# available on windows.
default_codec = "msmpeg4"
codec = codec or default_codec
# Get command
cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr]
cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params
cmd += ["-i", "-"]
cmd += ["-an", "-vcodec", codec, "-pix_fmt", pix_fmt_out]
# Add fixed bitrate or variable bitrate compression flags
if bitrate is not None:
cmd += ["-b:v", str(bitrate)]
elif quality is not None: # If None, then we don't add anything
quality = 1 - quality / 10.0
if codec == "libx264":
# crf ranges 0 to 51, 51 being worst.
quality = int(quality * 51)
cmd += ["-crf", str(quality)] # for h264
else: # Many codecs accept q:v
# q:v range can vary, 1-31, 31 being worst
# But q:v does not always have the same range.
# May need a way to find range for any codec.
quality = int(quality * 30) + 1
cmd += ["-qscale:v", str(quality)] # for others
# Note, for most codecs, the image dimensions must be divisible by
# 16 the default for the macro_block_size is 16. Check if image is
# divisible, if not have ffmpeg upsize to nearest size and warn
# user they should correct input image if this is not desired.
if macro_block_size > 1:
if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0:
out_w = size[0]
out_h = size[1]
if size[0] % macro_block_size > 0:
out_w += macro_block_size - (size[0] % macro_block_size)
if size[1] % macro_block_size > 0:
out_h += macro_block_size - (size[1] % macro_block_size)
cmd += ["-vf", "scale={}:{}".format(out_w, out_h)]
logger.warning(
"IMAGEIO FFMPEG_WRITER WARNING: input image is not"
" divisible by macro_block_size={}, resizing from {} "
"to {} to ensure video compatibility with most codecs "
"and players. To prevent resizing, make your input "
"image divisible by the macro_block_size or set the "
"macro_block_size to 1 (risking incompatibility).".format(
macro_block_size, size[:2], (out_w, out_h)
)
)
# Rather than redirect stderr to a pipe, just set minimal
# output from ffmpeg by default. That way if there are warnings
# the user will see them.
cmd += ["-v", ffmpeg_log_level]
cmd += output_params
cmd.append(path)
cmd_str = " ".join(cmd)
if any(
[level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")]
):
logger.info("RUNNING FFMPEG COMMAND: " + cmd_str)
# Launch process
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, shell=ISWIN
)
# For Windows, set `shell=True` in sp.Popen to prevent popup
# of a command line window in frozen applications.
# Note that directing stderr to a pipe on windows will cause ffmpeg
# to hang if the buffer is not periodically cleared using
# StreamCatcher or other means.
# Setting bufsize to 0 or a small value does not seem to have much effect
# (at least on Windows). I suspect that ffmpeg buffers # multiple frames
# (before encoding in a batch).
# ----- Write frames
try:
# Just keep going until the generator.close() is called (raises GeneratorExit).
# This could also happen when the generator is deleted somehow.
nframes = 0
while True:
# Get frame
bb = (yield)
# framesize = size[0] * size[1] * depth * bpp
# assert isinstance(bb, bytes), "Frame must be send as bytes"
# assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes"
# Actually, we accept anything that can be written to file.
# This e.g. allows writing numpy arrays without having to make a copy ...
# Write
try:
p.stdin.write(bb)
except Exception as err:
# Show the command and stderr from pipe
msg = (
"{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR "
"OUTPUT:\n".format(err, cmd_str)
)
raise IOError(msg)
nframes += 1
except GeneratorExit:
if nframes == 0:
logger.warning("No frames have been written; the written video is invalid.")
finally:
if p.poll() is None:
# Ask ffmpeg to quit - and wait for it to finish writing the file.
# Depending on the frame size and encoding this can take a few
# seconds (sometimes 10-20). Since a user may get bored and hit
# Ctrl-C, we wrap this in a try-except.
waited = False
try:
try:
p.stdin.close()
except Exception: # pragma: no cover
pass
etime = time.time() + ffmpeg_timeout
while (not ffmpeg_timeout or time.time() < etime) and p.poll() is None:
time.sleep(0.01)
waited = True
finally:
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
more = " Consider increasing ffmpeg_timeout." if waited else ""
logger.warning("We had to kill ffmpeg to stop it." + more)
p.kill()
| 39.4 | 89 | 0.598759 | import sys
import time
import signal
import subprocess
from ._utils import get_ffmpeg_exe, logger
from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs
ISWIN = sys.platform.startswith("win")
exe = None
def _get_exe():
global exe
if exe is None:
exe = get_ffmpeg_exe()
return exe
def count_frames_and_secs(path):
assert isinstance(path, str), "Video path must be a string"
cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"]
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=ISWIN)
except subprocess.CalledProcessError as err:
out = err.output.decode(errors="ignore")
raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out))
nframes = nsecs = None
for line in reversed(out.splitlines()):
if line.startswith(b"frame="):
line = line.decode(errors="ignore")
i = line.find("frame=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nframes = int(s)
i = line.find("time=")
if i >= 0:
s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
nsecs = cvsecs(*s.split(":"))
return nframes, nsecs
raise RuntimeError("Could not get number of frames")
def read_frames(path, pix_fmt="rgb24", bpp=3, input_params=None, output_params=None):
assert isinstance(path, str), "Video path must be a string"
pix_fmt = pix_fmt or "rgb24"
bpp = bpp or 3
input_params = input_params or []
output_params = output_params or []
assert isinstance(pix_fmt, str), "pix_fmt must be a string"
assert isinstance(bpp, int), "bpp must be an int"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"]
cmd = [_get_exe()]
cmd += input_params + ["-i", path]
cmd += pre_output_params + output_params + ["-"]
cmd = ' '.join(cmd)
p = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=ISWIN,
)
log_catcher = LogCatcher(p.stderr)
try:
etime = time.time() + 10.0
while (not log_catcher.header) and time.time() < etime:
time.sleep(0.01)
if not log_catcher.header:
err2 = log_catcher.get_text(0.2)
fmt = "Could not load meta information\n=== stderr ===\n{}"
raise IOError(fmt.format(err2))
elif "No such file or directory" in log_catcher.header:
raise IOError("{} not found! Wrong path?".format(path))
meta = parse_ffmpeg_header(log_catcher.header)
yield meta
w, h = meta["size"]
framesize = w * h * bpp
framenr = 0
while True:
framenr += 1
try:
bb = bytes()
while len(bb) < framesize:
extra_bytes = p.stdout.read(framesize - len(bb))
if not extra_bytes:
if len(bb) == 0:
return
else:
raise RuntimeError(
"End of file reached before full frame could be read."
)
bb += extra_bytes
yield bb
except Exception as err:
err1 = str(err)
err2 = log_catcher.get_text(0.4)
fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}"
raise RuntimeError(fmt.format(framenr, err1, err2))
finally:
if p.poll() is None:
try:
if True:
p.communicate(b"q")
else:
p.send_signal(signal.SIGINT)
except Exception as err:
logger.warning("Error while attempting stop ffmpeg: " + str(err))
etime = time.time() + 1.5
while time.time() < etime and p.poll() is None:
time.sleep(0.01)
if p.poll() is None:
logger.warning("We had to kill ffmpeg to stop it.")
p.kill()
def write_frames(
path,
size,
pix_fmt_in="rgb24",
pix_fmt_out="yuv420p",
fps=16,
quality=5,
bitrate=None,
codec=None,
macro_block_size=16,
ffmpeg_log_level="warning",
ffmpeg_timeout=20.0,
input_params=None,
output_params=None,
):
assert isinstance(path, str), "Video path must be a string"
fmt_in or "rgb24"
pix_fmt_out = pix_fmt_out or "yuv420p"
fps = fps or 16
quality = quality or 5
macro_block_size = macro_block_size or 16
ffmpeg_log_level = ffmpeg_log_level or "warning"
input_params = input_params or []
output_params = output_params or []
floatish = float, int
if isinstance(size, (tuple, list)):
assert len(size) == 2, "size must be a 2-tuple"
assert isinstance(size[0], int) and isinstance(
size[1], int
), "size must be ints"
sizestr = "{:d}x{:d}".format(*size)
else:
assert False, "size must be str or tuple"
assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str"
assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str"
assert isinstance(fps, floatish), "fps must be float"
assert isinstance(quality, floatish), "quality must be float"
assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive"
assert isinstance(macro_block_size, int), "macro_block_size must be int"
assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str"
assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float"
assert isinstance(input_params, list), "input_params must be a list"
assert isinstance(output_params, list), "output_params must be a list"
default_codec = "libx264"
if path.lower().endswith(".wmv"):
default_codec = "msmpeg4"
codec = codec or default_codec
cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr]
cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params
cmd += ["-i", "-"]
cmd += ["-an", "-vcodec", codec, "-pix_fmt", pix_fmt_out]
if bitrate is not None:
cmd += ["-b:v", str(bitrate)]
elif quality is not None:
quality = 1 - quality / 10.0
if codec == "libx264":
# crf ranges 0 to 51, 51 being worst.
quality = int(quality * 51)
cmd += ["-crf", str(quality)] # for h264
else: # Many codecs accept q:v
# q:v range can vary, 1-31, 31 being worst
# But q:v does not always have the same range.
# May need a way to find range for any codec.
quality = int(quality * 30) + 1
cmd += ["-qscale:v", str(quality)] # for others
# Note, for most codecs, the image dimensions must be divisible by
# 16 the default for the macro_block_size is 16. Check if image is
# divisible, if not have ffmpeg upsize to nearest size and warn
# user they should correct input image if this is not desired.
if macro_block_size > 1:
if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0:
out_w = size[0]
out_h = size[1]
if size[0] % macro_block_size > 0:
out_w += macro_block_size - (size[0] % macro_block_size)
if size[1] % macro_block_size > 0:
out_h += macro_block_size - (size[1] % macro_block_size)
cmd += ["-vf", "scale={}:{}".format(out_w, out_h)]
logger.warning(
"IMAGEIO FFMPEG_WRITER WARNING: input image is not"
" divisible by macro_block_size={}, resizing from {} "
"to {} to ensure video compatibility with most codecs "
"and players. To prevent resizing, make your input "
"image divisible by the macro_block_size or set the "
"macro_block_size to 1 (risking incompatibility).".format(
macro_block_size, size[:2], (out_w, out_h)
)
)
# Rather than redirect stderr to a pipe, just set minimal
# output from ffmpeg by default. That way if there are warnings
# the user will see them.
cmd += ["-v", ffmpeg_log_level]
cmd += output_params
cmd.append(path)
cmd_str = " ".join(cmd)
if any(
[level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")]
):
logger.info("RUNNING FFMPEG COMMAND: " + cmd_str)
# Launch process
p = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, shell=ISWIN
)
# For Windows, set `shell=True` in sp.Popen to prevent popup
# of a command line window in frozen applications.
# Note that directing stderr to a pipe on windows will cause ffmpeg
# to hang if the buffer is not periodically cleared using
# StreamCatcher or other means.
# Setting bufsize to 0 or a small value does not seem to have much effect
# (at least on Windows). I suspect that ffmpeg buffers # multiple frames
# (before encoding in a batch).
# ----- Write frames
try:
# Just keep going until the generator.close() is called (raises GeneratorExit).
# This could also happen when the generator is deleted somehow.
nframes = 0
while True:
# Get frame
bb = (yield)
# framesize = size[0] * size[1] * depth * bpp
# assert isinstance(bb, bytes), "Frame must be send as bytes"
# assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes"
# Actually, we accept anything that can be written to file.
# This e.g. allows writing numpy arrays without having to make a copy ...
# Write
try:
p.stdin.write(bb)
except Exception as err:
# Show the command and stderr from pipe
msg = (
"{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR "
"OUTPUT:\n".format(err, cmd_str)
)
raise IOError(msg)
nframes += 1
except GeneratorExit:
if nframes == 0:
logger.warning("No frames have been written; the written video is invalid.")
finally:
if p.poll() is None:
# Ask ffmpeg to quit - and wait for it to finish writing the file.
# Depending on the frame size and encoding this can take a few
# seconds (sometimes 10-20). Since a user may get bored and hit
# Ctrl-C, we wrap this in a try-except.
waited = False
try:
try:
p.stdin.close()
except Exception: # pragma: no cover
pass
etime = time.time() + ffmpeg_timeout
while (not ffmpeg_timeout or time.time() < etime) and p.poll() is None:
time.sleep(0.01)
waited = True
finally:
# Grr, we have to kill it
if p.poll() is None: # pragma: no cover
more = " Consider increasing ffmpeg_timeout." if waited else ""
logger.warning("We had to kill ffmpeg to stop it." + more)
p.kill()
| true | true |
f714592c49e276e7f9b6598977e5a6108553973c | 1,014 | py | Python | django_admin_demo/urls.py | noahzaozao/django_admin_demo | 631010bb8cd14c8ccf48b46f154d78c2e7b5887a | [
"Apache-2.0"
] | null | null | null | django_admin_demo/urls.py | noahzaozao/django_admin_demo | 631010bb8cd14c8ccf48b46f154d78c2e7b5887a | [
"Apache-2.0"
] | null | null | null | django_admin_demo/urls.py | noahzaozao/django_admin_demo | 631010bb8cd14c8ccf48b46f154d78c2e7b5887a | [
"Apache-2.0"
] | null | null | null | """django_admin_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from web.views import APIUserSearchView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/user/search', APIUserSearchView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.214286 | 79 | 0.731755 | from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from web.views import APIUserSearchView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'api/user/search', APIUserSearchView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f7145968a1bf58bef88ec9f77fbf6c48708480e2 | 1,067 | py | Python | wtools/plotting.py | DrAuxin/WestpaTools | 4e236e0a3d65504d1937260316a4a5c6f39aa610 | [
"BSD-3-Clause"
] | 1 | 2020-05-18T15:58:17.000Z | 2020-05-18T15:58:17.000Z | wtools/plotting.py | DrAuxin/WestpaTools | 4e236e0a3d65504d1937260316a4a5c6f39aa610 | [
"BSD-3-Clause"
] | null | null | null | wtools/plotting.py | DrAuxin/WestpaTools | 4e236e0a3d65504d1937260316a4a5c6f39aa610 | [
"BSD-3-Clause"
] | null | null | null | import h5py
import numpy
import matplotlib.pyplot as plt
def plotflux(h5file, state=1):
"""
A function that plots the dataset target_flux_evolution from a direct.h5 file.
Parameters
----------
h5file: dictionary
The user's HDF5 file loaded with loadh5.
state: integer
The target state; the state for which you want to know the entering flux for.
Returns
-------
Nothing
The plot of the flux evolution will be shown in a separate window.
Examples
--------
>>> h5file = loadh5("west.h5")
>>> plotflux(h5file, 1)
--------
| __/ |
| / |
--------
"""
fluxes = h5file['target_flux_evolution']['expected',:,state-1]
iterations = numpy.arange(1,len(fluxes)+1,1)
fig, ax = plt.subplots()
ax.plot(iterations,fluxes, linewidth=3)
ax.set_xlabel('WE Iteration', fontsize=24)
ax.set_ylabel('Mean Flux', fontsize=24)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.tick_params(labelsize=22)
fig.tight_layout()
plt.show()
| 26.675 | 85 | 0.615745 | import h5py
import numpy
import matplotlib.pyplot as plt
def plotflux(h5file, state=1):
fluxes = h5file['target_flux_evolution']['expected',:,state-1]
iterations = numpy.arange(1,len(fluxes)+1,1)
fig, ax = plt.subplots()
ax.plot(iterations,fluxes, linewidth=3)
ax.set_xlabel('WE Iteration', fontsize=24)
ax.set_ylabel('Mean Flux', fontsize=24)
ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax.tick_params(labelsize=22)
fig.tight_layout()
plt.show()
| true | true |
f7145aa2447443687be6df3402e6c85c14e2707b | 15,668 | py | Python | experiments/custom_agents_opt.py | anonips/-MDP-Playground | 74431f98c210830a93a1bc83fcdcb95bf1644696 | [
"Apache-2.0"
] | 2 | 2019-09-18T14:43:40.000Z | 2021-02-23T18:46:50.000Z | experiments/custom_agents_opt.py | anonips/-MDP-Playground | 74431f98c210830a93a1bc83fcdcb95bf1644696 | [
"Apache-2.0"
] | null | null | null | experiments/custom_agents_opt.py | anonips/-MDP-Playground | 74431f98c210830a93a1bc83fcdcb95bf1644696 | [
"Apache-2.0"
] | 1 | 2020-02-14T13:59:15.000Z | 2020-02-14T13:59:15.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
# yapf: disable
# __sphinx_doc_begin__
class RandomAgent(Trainer):
"""Policy that takes random actions and never learns."""
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
class VIAgent(Trainer):
"""Value Iteration.
#TODO Make it Generalized PI.
"""
_name = "VIAgent"
_default_config = with_common_config({
"tolerance": 0.01,
"discount_factor": 0.5,
"rollouts_per_iteration": 10,
"episode_length": 200,
# "lr": 0.5
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
self.V = np.zeros(self.env.observation_space.n)
self.policy = np.zeros(self.env.observation_space.n, dtype=int)
self.policy[:] = -1 #IMP # To avoid initing it to a value within action_space range
@override(Trainer)
def _train(self):
max_diff = np.inf # Maybe keep a state variable so that we don't need to update every train iteration??
state_space_size = self.env.observation_space.n
gamma = self.config["discount_factor"]
total_iterations = 0
while max_diff > self.config["tolerance"]:
total_iterations += 1
for s in range(state_space_size):
# print("self.V[:]", s, max_diff, self.V, [self.env.R(s, a) for a in range(self.env.action_space.n)], self.policy[s])
self.V_old = self.V.copy() # Is this asynchronous? V_old should be held constant for all states in the for loop?
# print([self.env.R(s, a) for a in range(self.env.action_space.n)], [gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)], [self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.policy[s] = np.argmax([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.V[s] = np.max([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)]) # We want R to be a callable function, so I guess we have to keep a for loop here??
# print("self.V, self.V_old, self.policy[s]", self.V, self.V_old, self.policy[s], self.env.P(s, self.policy[s]))
max_diff = np.max(np.absolute(self.V_old - self.V))
# import time
# time.sleep(2)
# for s in range(state_space_size):
# print("FINAL self.V[:]", s, max_diff, self.V[:], [self.env.R(s, a) for a in range(self.env.action_space.n)])
print("Total iterations:", total_iterations)
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
for _ in range(self.config["episode_length"]):
action = self.policy[obs]
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
import ray
from ray import tune
from ray.rllib.utils.seed import seed as rllib_seed
import rl_toy
from rl_toy.envs import RLToyEnv
from ray.tune.registry import register_env
register_env("RLToy-v0", lambda config: RLToyEnv(config))
from ray.rllib.models.preprocessors import OneHotPreprocessor
from ray.rllib.models import ModelCatalog
ModelCatalog.register_custom_preprocessor("ohe", OneHotPreprocessor)
#rllib_seed(0, 0, 0) ####IMP Doesn't work due to multi-process I think; so use config["seed"]
ray.init()
# Old config space
# algorithms = ["DQN"]
# state_space_sizes = [2**i for i in range(4,6)]
# action_space_sizes = [2**i for i in range(1,6)]
# delays = [0] + [2**i for i in range(5)]
# sequence_lengths = [i for i in range(1,6)]
# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# # make_reward_dense = [True, False]
# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
#test basic case
# algorithms = ["DQN"]
# state_space_sizes = [10]
# action_space_sizes = [10]
# delays = [4]
# sequence_lengths = [2]
# reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# # make_reward_dense = [True, False]
# terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
state_space_sizes = [8]#, 10, 12, 14] # [2**i for i in range(1,6)]
action_space_sizes = [8]#2, 4, 8, 16] # [2**i for i in range(1,6)]
delays = [0] # + [2**i for i in range(4)]
sequence_lengths = [1]#, 2]#i for i in range(1,4)]
reward_densities = [0.25] # np.linspace(0.0, 1.0, num=5)
# make_reward_dense = [True, False]
terminal_state_densities = [0.25] # np.linspace(0.1, 1.0, num=5)
algorithms = ["DQN"]
#seeds = []
# Others, keep the rest fixed for these: learning_starts, target_network_update_freq, double_dqn, fcnet_hiddens, fcnet_activation, use_lstm, lstm_seq_len, sample_batch_size/train_batch_size
# More others: adam_epsilon, exploration_final_eps/exploration_fraction, buffer_size
num_layerss = [1, 2, 3, 4]
layer_widths = [128, 256, 512]
fcnet_activations = ["tanh", "relu", "sigmoid"]
learning_startss = [500, 1000, 2000, 4000, 8000]
target_network_update_freqs = [8, 80, 800]
double_dqn = [False, True]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
adam_epsilons = [1e-3, 1e-4, 1e-5, 1e-6] # [1e-1, 1e-4, 1e-7, 1e-10]
# lstm with sequence lengths
print('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density,'
'terminal_state_density ')
print(algorithms, state_space_sizes, action_space_sizes, delays, sequence_lengths, reward_densities, terminal_state_densities)
# stats = {}
# aaaa = 3
#TODO Write addnl. line at beginning of file for column names
# fout = open('rl_stats_temp.csv', 'a') #hardcoded
# fout.write('# basename, n_points, n_features, n_trees ')
import time
start = time.time()
print(algorithms, state_space_sizes, action_space_sizes, delays,
sequence_lengths, reward_densities, terminal_state_densities)
def on_train_result(info):
# print("#############trainer.train() result: {} -> {} episodes".format(
# info["trainer"], info["result"]["episodes_this_iter"]), info)
# you can mutate the result dict to add new fields to return
# stats['episode_len_mean'] = info['result']['episode_len_mean']
# print("++++++++", aaaa, stats)
algorithm = info["trainer"]._name
state_space_size = info["result"]["config"]["env_config"]["state_space_size"]
action_space_size = info["result"]["config"]["env_config"]["action_space_size"]
delay = info["result"]["config"]["env_config"]["delay"]
sequence_length = info["result"]["config"]["env_config"]["sequence_length"]
reward_density = info["result"]["config"]["env_config"]["reward_density"]
terminal_state_density = info["result"]["config"]["env_config"]["terminal_state_density"]
fcnet_hiddens = info["result"]["config"]["model"]["fcnet_hiddens"]
num_layers = len(fcnet_hiddens)
layer_width = fcnet_hiddens[0] #hack
lr = info["result"]["config"]["lr"]
adam_epsilon = info["result"]["config"]["adam_epsilon"]
timesteps_total = info["result"]["timesteps_total"] # also has episodes_total and training_iteration
episode_reward_mean = info["result"]["episode_reward_mean"] # also has max and min
episode_len_mean = info["result"]["episode_len_mean"]
fout = open('./rl_stats_temp_opt.csv', 'a') #hardcoded
fout.write('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density, '
'terminal_state_density, num_layers, layer_width, lr, adam_epsilon,\n' + str(algorithm) + ' ' + str(state_space_size) +
' ' + str(action_space_size) + ' ' + str(delay) + ' ' + str(sequence_length)
+ ' ' + str(reward_density) + ' ' + str(terminal_state_density) + ' ')
# Writes every iteration, would slow things down. #hack
fout.write(str(num_layers) + ' ' + str(layer_width) + ' ' + str(lr) + ' ' + str(adam_epsilon) + ' ' + str(timesteps_total) + ' ' + str(episode_reward_mean) +
' ' + str(episode_len_mean) + '\n')
fout.close()
info["result"]["callback_ok"] = True
# tune.run(
# RandomAgent,
# stop={
# "timesteps_total": 20000,
# },
# config={
# "rollouts_per_iteration": 10,
# "env": "RLToy-v0",
# "env_config": {
# 'state_space_type': 'discrete',
# 'action_space_type': 'discrete',
# 'state_space_size': 16,
# 'action_space_size': 16,
# 'generate_random_mdp': True,
# 'delay': 6,
# 'sequence_length': 1,
# 'reward_density': 0.25,
# 'terminal_state_density': 0.25
# },
# },
# )
# tune.run(
# VIAgent,
# stop={
# "timesteps_total": 20000,
# },
# config={
# "tolerance": 0.01,
# "discount_factor": 0.99,
# "rollouts_per_iteration": 10,
# "env": "RLToy-v0",
# "env_config": {
# 'state_space_type': 'discrete',
# 'action_space_type': 'discrete',
# 'state_space_size': 10,
# 'action_space_size': 10,
# 'generate_random_mdp': True,
# 'delay': 0,
# 'sequence_length': 1,
# 'reward_density': 0.25,
# 'terminal_state_density': 0.25
# },
# },
# )
for algorithm in algorithms: #TODO each one has different config_spaces
for state_space_size in state_space_sizes:
for action_space_size in action_space_sizes:
for delay in delays:
for sequence_length in sequence_lengths:
for reward_density in reward_densities:
for terminal_state_density in terminal_state_densities:
for lr in learning_rates:
for adam_epsilon in adam_epsilons:
tune.run(
algorithm,
stop={
"timesteps_total": 20000,
},
config={
# 'seed': 0, #seed
"adam_epsilon": adam_epsilon,
"lr": lr, # "lr": grid_search([1e-2, 1e-4, 1e-6]),
"beta_annealing_fraction": 1.0,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"env": "RLToy-v0",
"env_config": {
'seed': 0, #seed
'state_space_type': 'discrete',
'action_space_type': 'discrete',
'state_space_size': state_space_size,
'action_space_size': action_space_size,
'generate_random_mdp': True,
'delay': delay,
'sequence_length': sequence_length,
'reward_density': reward_density,
'terminal_state_density': terminal_state_density,
'repeats_in_sequences': False,
'reward_unit': 1.0,
'make_denser': False,
'completely_connected': True
},
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {}, # extra options to pass to your preprocessor
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 100,
"train_batch_size": 32,
"callbacks": {
# "on_episode_start": tune.function(on_episode_start),
# "on_episode_step": tune.function(on_episode_step),
# "on_episode_end": tune.function(on_episode_end),
# "on_sample_end": tune.function(on_sample_end),
"on_train_result": tune.function(on_train_result),
# "on_postprocess_traj": tune.function(on_postprocess_traj),
},
},
#return_trials=True # add tirals = tune.run( above
)
end = time.time()
print("No. of seconds to run:", end - start)
| 44.511364 | 254 | 0.524509 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.utils.annotations import override
class RandomAgent(Trainer):
_name = "RandomAgent"
_default_config = with_common_config({
"rollouts_per_iteration": 10,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
@override(Trainer)
def _train(self):
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
while not done:
action = self.env.action_space.sample()
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
class VIAgent(Trainer):
_name = "VIAgent"
_default_config = with_common_config({
"tolerance": 0.01,
"discount_factor": 0.5,
"rollouts_per_iteration": 10,
"episode_length": 200,
})
@override(Trainer)
def _init(self, config, env_creator):
self.env = env_creator(config["env_config"])
self.V = np.zeros(self.env.observation_space.n)
self.policy = np.zeros(self.env.observation_space.n, dtype=int)
self.policy[:] = -1 _diff = np.inf
state_space_size = self.env.observation_space.n
gamma = self.config["discount_factor"]
total_iterations = 0
while max_diff > self.config["tolerance"]:
total_iterations += 1
for s in range(state_space_size):
# print("self.V[:]", s, max_diff, self.V, [self.env.R(s, a) for a in range(self.env.action_space.n)], self.policy[s])
self.V_old = self.V.copy() # Is this asynchronous? V_old should be held constant for all states in the for loop?
# print([self.env.R(s, a) for a in range(self.env.action_space.n)], [gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)], [self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.policy[s] = np.argmax([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)])
self.V[s] = np.max([self.env.R(s, a) + gamma * self.V[self.env.P(s, a)] for a in range(self.env.action_space.n)]) # We want R to be a callable function, so I guess we have to keep a for loop here??
# print("self.V, self.V_old, self.policy[s]", self.V, self.V_old, self.policy[s], self.env.P(s, self.policy[s]))
max_diff = np.max(np.absolute(self.V_old - self.V))
# import time
# time.sleep(2)
# for s in range(state_space_size):
# print("FINAL self.V[:]", s, max_diff, self.V[:], [self.env.R(s, a) for a in range(self.env.action_space.n)])
print("Total iterations:", total_iterations)
rewards = []
steps = 0
for _ in range(self.config["rollouts_per_iteration"]):
obs = self.env.reset()
done = False
reward = 0.0
for _ in range(self.config["episode_length"]):
action = self.policy[obs]
obs, r, done, info = self.env.step(action)
reward += r
steps += 1
rewards.append(reward)
return {
"episode_reward_mean": np.mean(rewards),
"timesteps_this_iter": steps,
}
import ray
from ray import tune
from ray.rllib.utils.seed import seed as rllib_seed
import rl_toy
from rl_toy.envs import RLToyEnv
from ray.tune.registry import register_env
register_env("RLToy-v0", lambda config: RLToyEnv(config))
from ray.rllib.models.preprocessors import OneHotPreprocessor
from ray.rllib.models import ModelCatalog
ModelCatalog.register_custom_preprocessor("ohe", OneHotPreprocessor)
#rllib_seed(0, 0, 0) ####IMP Doesn't work due to multi-process I think; so use config["seed"]
ray.init()
anh", "relu", "sigmoid"]
learning_startss = [500, 1000, 2000, 4000, 8000]
target_network_update_freqs = [8, 80, 800]
double_dqn = [False, True]
learning_rates = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
adam_epsilons = [1e-3, 1e-4, 1e-5, 1e-6]
print('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density,'
'terminal_state_density ')
print(algorithms, state_space_sizes, action_space_sizes, delays, sequence_lengths, reward_densities, terminal_state_densities)
time
start = time.time()
print(algorithms, state_space_sizes, action_space_sizes, delays,
sequence_lengths, reward_densities, terminal_state_densities)
def on_train_result(info):
algorithm = info["trainer"]._name
state_space_size = info["result"]["config"]["env_config"]["state_space_size"]
action_space_size = info["result"]["config"]["env_config"]["action_space_size"]
delay = info["result"]["config"]["env_config"]["delay"]
sequence_length = info["result"]["config"]["env_config"]["sequence_length"]
reward_density = info["result"]["config"]["env_config"]["reward_density"]
terminal_state_density = info["result"]["config"]["env_config"]["terminal_state_density"]
fcnet_hiddens = info["result"]["config"]["model"]["fcnet_hiddens"]
num_layers = len(fcnet_hiddens)
layer_width = fcnet_hiddens[0]
lr = info["result"]["config"]["lr"]
adam_epsilon = info["result"]["config"]["adam_epsilon"]
timesteps_total = info["result"]["timesteps_total"]
episode_reward_mean = info["result"]["episode_reward_mean"]
episode_len_mean = info["result"]["episode_len_mean"]
fout = open('./rl_stats_temp_opt.csv', 'a')
fout.write('# Algorithm, state_space_size, action_space_size, delay, sequence_length, reward_density, '
'terminal_state_density, num_layers, layer_width, lr, adam_epsilon,\n' + str(algorithm) + ' ' + str(state_space_size) +
' ' + str(action_space_size) + ' ' + str(delay) + ' ' + str(sequence_length)
+ ' ' + str(reward_density) + ' ' + str(terminal_state_density) + ' ')
fout.write(str(num_layers) + ' ' + str(layer_width) + ' ' + str(lr) + ' ' + str(adam_epsilon) + ' ' + str(timesteps_total) + ' ' + str(episode_reward_mean) +
' ' + str(episode_len_mean) + '\n')
fout.close()
info["result"]["callback_ok"] = True
for algorithm in algorithms:
for state_space_size in state_space_sizes:
for action_space_size in action_space_sizes:
for delay in delays:
for sequence_length in sequence_lengths:
for reward_density in reward_densities:
for terminal_state_density in terminal_state_densities:
for lr in learning_rates:
for adam_epsilon in adam_epsilons:
tune.run(
algorithm,
stop={
"timesteps_total": 20000,
},
config={
"adam_epsilon": adam_epsilon,
"lr": lr,
"beta_annealing_fraction": 1.0,
"buffer_size": 1000000,
"double_q": False,
"dueling": False,
"env": "RLToy-v0",
"env_config": {
'seed': 0,
'state_space_type': 'discrete',
'action_space_type': 'discrete',
'state_space_size': state_space_size,
'action_space_size': action_space_size,
'generate_random_mdp': True,
'delay': delay,
'sequence_length': sequence_length,
'reward_density': reward_density,
'terminal_state_density': terminal_state_density,
'repeats_in_sequences': False,
'reward_unit': 1.0,
'make_denser': False,
'completely_connected': True
},
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {},
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 100,
"train_batch_size": 32,
"callbacks": {
"on_train_result": tune.function(on_train_result),
},
},
)
end = time.time()
print("No. of seconds to run:", end - start)
| true | true |
f7145c199e0e4cfca77fa9ac99b9dea5fb703b95 | 1,756 | py | Python | alipay/aop/api/domain/AnswerModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AnswerModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AnswerModel.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AnswerModel(object):
def __init__(self):
self._extra = None
self._item_id = None
self._option_id = None
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value):
self._extra = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def option_id(self):
return self._option_id
@option_id.setter
def option_id(self, value):
self._option_id = value
def to_alipay_dict(self):
params = dict()
if self.extra:
if hasattr(self.extra, 'to_alipay_dict'):
params['extra'] = self.extra.to_alipay_dict()
else:
params['extra'] = self.extra
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.option_id:
if hasattr(self.option_id, 'to_alipay_dict'):
params['option_id'] = self.option_id.to_alipay_dict()
else:
params['option_id'] = self.option_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnswerModel()
if 'extra' in d:
o.extra = d['extra']
if 'item_id' in d:
o.item_id = d['item_id']
if 'option_id' in d:
o.option_id = d['option_id']
return o
| 24.732394 | 69 | 0.555809 |
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class AnswerModel(object):
def __init__(self):
self._extra = None
self._item_id = None
self._option_id = None
@property
def extra(self):
return self._extra
@extra.setter
def extra(self, value):
self._extra = value
@property
def item_id(self):
return self._item_id
@item_id.setter
def item_id(self, value):
self._item_id = value
@property
def option_id(self):
return self._option_id
@option_id.setter
def option_id(self, value):
self._option_id = value
def to_alipay_dict(self):
params = dict()
if self.extra:
if hasattr(self.extra, 'to_alipay_dict'):
params['extra'] = self.extra.to_alipay_dict()
else:
params['extra'] = self.extra
if self.item_id:
if hasattr(self.item_id, 'to_alipay_dict'):
params['item_id'] = self.item_id.to_alipay_dict()
else:
params['item_id'] = self.item_id
if self.option_id:
if hasattr(self.option_id, 'to_alipay_dict'):
params['option_id'] = self.option_id.to_alipay_dict()
else:
params['option_id'] = self.option_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnswerModel()
if 'extra' in d:
o.extra = d['extra']
if 'item_id' in d:
o.item_id = d['item_id']
if 'option_id' in d:
o.option_id = d['option_id']
return o
| true | true |
f7145c94fe95283ff3e26d0aa9e1a5bdf965d2fc | 52,866 | py | Python | sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/hybridnetwork/v20210501/outputs.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'CustomProfileResponse',
'DataDiskResponse',
'ImageReferenceResponse',
'LinuxConfigurationResponse',
'NetworkFunctionRoleConfigurationResponse',
'NetworkFunctionTemplateResponse',
'NetworkFunctionUserConfigurationResponse',
'NetworkFunctionUserConfigurationResponseOsProfile',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'OsDiskResponse',
'OsProfileResponse',
'SshConfigurationResponse',
'SshPublicKeyResponse',
'StorageProfileResponse',
'SubResourceResponse',
'SystemDataResponse',
]
@pulumi.output_type
class CustomProfileResponse(dict):
"""
Specifies the custom settings for the virtual machine.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "metadataConfigurationPath":
suggest = "metadata_configuration_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
metadata_configuration_path: Optional[str] = None):
"""
Specifies the custom settings for the virtual machine.
:param str metadata_configuration_path: Path for metadata configuration.
"""
if metadata_configuration_path is not None:
pulumi.set(__self__, "metadata_configuration_path", metadata_configuration_path)
@property
@pulumi.getter(name="metadataConfigurationPath")
def metadata_configuration_path(self) -> Optional[str]:
"""
Path for metadata configuration.
"""
return pulumi.get(self, "metadata_configuration_path")
@pulumi.output_type
class DataDiskResponse(dict):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createOption":
suggest = "create_option"
elif key == "diskSizeGB":
suggest = "disk_size_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_option: Optional[str] = None,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:param str create_option: Specifies how the virtual machine should be created.
:param int disk_size_gb: Specifies the size of an empty disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
:param str name: The name of data disk.
"""
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[str]:
"""
Specifies how the virtual machine should be created.
"""
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
Specifies the size of an empty disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The name of data disk.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class ImageReferenceResponse(dict):
"""
The image reference properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "exactVersion":
suggest = "exact_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ImageReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
exact_version: Optional[str] = None,
offer: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
"""
The image reference properties.
:param str exact_version: Specifies in decimal numbers, the exact version of image used to create the virtual machine.
:param str offer: Specifies the offer of the image used to create the virtual machine.
:param str publisher: The image publisher.
:param str sku: The image SKU.
:param str version: Specifies the version of the image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
"""
if exact_version is not None:
pulumi.set(__self__, "exact_version", exact_version)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="exactVersion")
def exact_version(self) -> Optional[str]:
"""
Specifies in decimal numbers, the exact version of image used to create the virtual machine.
"""
return pulumi.get(self, "exact_version")
@property
@pulumi.getter
def offer(self) -> Optional[str]:
"""
Specifies the offer of the image used to create the virtual machine.
"""
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
"""
The image publisher.
"""
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
"""
The image SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Specifies the version of the image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class LinuxConfigurationResponse(dict):
"""
Specifies the Linux operating system settings on the virtual machine.
"""
def __init__(__self__, *,
ssh: Optional['outputs.SshConfigurationResponse'] = None):
"""
Specifies the Linux operating system settings on the virtual machine.
:param 'SshConfigurationResponse' ssh: Specifies the ssh key configuration for a Linux OS.
"""
if ssh is not None:
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter
def ssh(self) -> Optional['outputs.SshConfigurationResponse']:
"""
Specifies the ssh key configuration for a Linux OS.
"""
return pulumi.get(self, "ssh")
@pulumi.output_type
class NetworkFunctionRoleConfigurationResponse(dict):
"""
Network function role configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customProfile":
suggest = "custom_profile"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "roleType":
suggest = "role_type"
elif key == "storageProfile":
suggest = "storage_profile"
elif key == "userDataParameters":
suggest = "user_data_parameters"
elif key == "userDataTemplate":
suggest = "user_data_template"
elif key == "virtualMachineSize":
suggest = "virtual_machine_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionRoleConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_profile: Optional['outputs.CustomProfileResponse'] = None,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.OsProfileResponse'] = None,
role_name: Optional[str] = None,
role_type: Optional[str] = None,
storage_profile: Optional['outputs.StorageProfileResponse'] = None,
user_data_parameters: Optional[Any] = None,
user_data_template: Optional[Any] = None,
virtual_machine_size: Optional[str] = None):
"""
Network function role configuration.
:param 'CustomProfileResponse' custom_profile: Specifies the custom settings for the virtual machine.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: The network interface configurations.
:param 'OsProfileResponse' os_profile: Specifies the operating system settings for the role instance. This value can be updated during the deployment of network function.
:param str role_name: The name of the network function role.
:param str role_type: Role type.
:param 'StorageProfileResponse' storage_profile: Specifies the storage settings for the virtual machine disks.
:param Any user_data_parameters: The user parameters for customers. The format of user data parameters has to be matched with the provided user data template.
:param Any user_data_template: The user data template for customers. This is a json schema template describing the format and data type of user data parameters.
:param str virtual_machine_size: The size of the virtual machine.
"""
if custom_profile is not None:
pulumi.set(__self__, "custom_profile", custom_profile)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_type is not None:
pulumi.set(__self__, "role_type", role_type)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
if user_data_template is not None:
pulumi.set(__self__, "user_data_template", user_data_template)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter(name="customProfile")
def custom_profile(self) -> Optional['outputs.CustomProfileResponse']:
"""
Specifies the custom settings for the virtual machine.
"""
return pulumi.get(self, "custom_profile")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
"""
The network interface configurations.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OsProfileResponse']:
"""
Specifies the operating system settings for the role instance. This value can be updated during the deployment of network function.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
"""
The name of the network function role.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="roleType")
def role_type(self) -> Optional[str]:
"""
Role type.
"""
return pulumi.get(self, "role_type")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
"""
Specifies the storage settings for the virtual machine disks.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
"""
The user parameters for customers. The format of user data parameters has to be matched with the provided user data template.
"""
return pulumi.get(self, "user_data_parameters")
@property
@pulumi.getter(name="userDataTemplate")
def user_data_template(self) -> Optional[Any]:
"""
The user data template for customers. This is a json schema template describing the format and data type of user data parameters.
"""
return pulumi.get(self, "user_data_template")
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[str]:
"""
The size of the virtual machine.
"""
return pulumi.get(self, "virtual_machine_size")
@pulumi.output_type
class NetworkFunctionTemplateResponse(dict):
"""
The network function template.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkFunctionRoleConfigurations":
suggest = "network_function_role_configurations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionTemplateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_function_role_configurations: Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']] = None):
"""
The network function template.
:param Sequence['NetworkFunctionRoleConfigurationResponse'] network_function_role_configurations: An array of network function role definitions.
"""
if network_function_role_configurations is not None:
pulumi.set(__self__, "network_function_role_configurations", network_function_role_configurations)
@property
@pulumi.getter(name="networkFunctionRoleConfigurations")
def network_function_role_configurations(self) -> Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']]:
"""
An array of network function role definitions.
"""
return pulumi.get(self, "network_function_role_configurations")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponse(dict):
"""
The network function user configuration.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "userDataParameters":
suggest = "user_data_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile'] = None,
role_name: Optional[str] = None,
user_data_parameters: Optional[Any] = None):
"""
The network function user configuration.
:param Sequence['NetworkInterfaceResponse'] network_interfaces: The network interface configuration.
:param 'NetworkFunctionUserConfigurationResponseOsProfile' os_profile: Specifies the operating system settings for the role instance.
:param str role_name: The name of the network function role.
:param Any user_data_parameters: The user data parameters from the customer.
"""
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
"""
The network interface configuration.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile']:
"""
Specifies the operating system settings for the role instance.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
"""
The name of the network function role.
"""
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
"""
The user data parameters from the customer.
"""
return pulumi.get(self, "user_data_parameters")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponseOsProfile(dict):
"""
Specifies the operating system settings for the role instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customData":
suggest = "custom_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponseOsProfile. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_data: Optional[str] = None):
"""
Specifies the operating system settings for the role instance.
:param str custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
"""
Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
return pulumi.get(self, "custom_data")
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
"""
Network interface IP configuration properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dnsServers":
suggest = "dns_servers"
elif key == "ipAddress":
suggest = "ip_address"
elif key == "ipAllocationMethod":
suggest = "ip_allocation_method"
elif key == "ipVersion":
suggest = "ip_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceIPConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None,
gateway: Optional[str] = None,
ip_address: Optional[str] = None,
ip_allocation_method: Optional[str] = None,
ip_version: Optional[str] = None,
subnet: Optional[str] = None):
"""
Network interface IP configuration properties.
:param Sequence[str] dns_servers: The list of DNS servers IP addresses.
:param str gateway: The value of the gateway.
:param str ip_address: The value of the IP address.
:param str ip_allocation_method: IP address allocation method.
:param str ip_version: IP address version.
:param str subnet: The value of the subnet.
"""
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_allocation_method is not None:
pulumi.set(__self__, "ip_allocation_method", ip_allocation_method)
if ip_version is not None:
pulumi.set(__self__, "ip_version", ip_version)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
"""
The list of DNS servers IP addresses.
"""
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
"""
The value of the gateway.
"""
return pulumi.get(self, "gateway")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The value of the IP address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipAllocationMethod")
def ip_allocation_method(self) -> Optional[str]:
"""
IP address allocation method.
"""
return pulumi.get(self, "ip_allocation_method")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> Optional[str]:
"""
IP address version.
"""
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def subnet(self) -> Optional[str]:
"""
The value of the subnet.
"""
return pulumi.get(self, "subnet")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
"""
Network interface properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipConfigurations":
suggest = "ip_configurations"
elif key == "macAddress":
suggest = "mac_address"
elif key == "networkInterfaceName":
suggest = "network_interface_name"
elif key == "vmSwitchType":
suggest = "vm_switch_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
mac_address: Optional[str] = None,
network_interface_name: Optional[str] = None,
vm_switch_type: Optional[str] = None):
"""
Network interface properties.
:param Sequence['NetworkInterfaceIPConfigurationResponse'] ip_configurations: A list of IP configurations of the network interface.
:param str mac_address: The MAC address of the network interface.
:param str network_interface_name: The name of the network interface.
:param str vm_switch_type: The type of the VM switch.
"""
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_interface_name is not None:
pulumi.set(__self__, "network_interface_name", network_interface_name)
if vm_switch_type is not None:
pulumi.set(__self__, "vm_switch_type", vm_switch_type)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IP configurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkInterfaceName")
def network_interface_name(self) -> Optional[str]:
"""
The name of the network interface.
"""
return pulumi.get(self, "network_interface_name")
@property
@pulumi.getter(name="vmSwitchType")
def vm_switch_type(self) -> Optional[str]:
"""
The type of the VM switch.
"""
return pulumi.get(self, "vm_switch_type")
@pulumi.output_type
class OsDiskResponse(dict):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
elif key == "osType":
suggest = "os_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None,
os_type: Optional[str] = None):
"""
Specifies information about the operating system disk used by the virtual machine. <br><br> For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).
:param int disk_size_gb: Specifies the size of os disk in gigabytes. This is the fully expanded disk size needed of the VHD image on the ASE. This disk size should be greater than the size of the VHD provided in vhdUri.
:param str name: The VHD name.
:param str os_type: The OS type.
"""
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
"""
Specifies the size of os disk in gigabytes. This is the fully expanded disk size needed of the VHD image on the ASE. This disk size should be greater than the size of the VHD provided in vhdUri.
"""
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The VHD name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type.
"""
return pulumi.get(self, "os_type")
@pulumi.output_type
class OsProfileResponse(dict):
"""
Specifies the operating system settings for the role instance.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUsername":
suggest = "admin_username"
elif key == "customData":
suggest = "custom_data"
elif key == "customDataRequired":
suggest = "custom_data_required"
elif key == "linuxConfiguration":
suggest = "linux_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_username: Optional[str] = None,
custom_data: Optional[str] = None,
custom_data_required: Optional[bool] = None,
linux_configuration: Optional['outputs.LinuxConfigurationResponse'] = None):
"""
Specifies the operating system settings for the role instance.
:param str admin_username: Specifies the name of the administrator account. <br><br> **Windows-only restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1 character <br><br> **Max-length (Linux):** 64 characters <br><br> **Max-length (Windows):** 20 characters <br><br><li> For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li> For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:param str custom_data: Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
:param bool custom_data_required: Indicates if custom data is required to deploy this role.
:param 'LinuxConfigurationResponse' linux_configuration: Specifies the Linux operating system settings on the virtual machine. <br><br>For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) <br><br> For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
if admin_username is not None:
pulumi.set(__self__, "admin_username", admin_username)
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
if custom_data_required is None:
custom_data_required = True
if custom_data_required is not None:
pulumi.set(__self__, "custom_data_required", custom_data_required)
if linux_configuration is not None:
pulumi.set(__self__, "linux_configuration", linux_configuration)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> Optional[str]:
"""
Specifies the name of the administrator account. <br><br> **Windows-only restriction:** Cannot end in "." <br><br> **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5". <br><br> **Minimum-length (Linux):** 1 character <br><br> **Max-length (Linux):** 64 characters <br><br> **Max-length (Windows):** 20 characters <br><br><li> For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)<br><li> For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
"""
Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the virtual machine. The maximum length of the binary array is 65535 bytes. <br><br> **Note: Do not pass any secrets or passwords in customData property** <br><br> This property cannot be updated after the VM is created. <br><br> customData is passed to the VM to be saved as a file. For more information see [Custom Data on Azure VMs](https://azure.microsoft.com/en-us/blog/custom-data-and-cloud-init-on-windows-azure/) <br><br> For using cloud-init for your Linux VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
"""
return pulumi.get(self, "custom_data")
@property
@pulumi.getter(name="customDataRequired")
def custom_data_required(self) -> Optional[bool]:
"""
Indicates if custom data is required to deploy this role.
"""
return pulumi.get(self, "custom_data_required")
@property
@pulumi.getter(name="linuxConfiguration")
def linux_configuration(self) -> Optional['outputs.LinuxConfigurationResponse']:
"""
Specifies the Linux operating system settings on the virtual machine. <br><br>For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) <br><br> For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "linux_configuration")
@pulumi.output_type
class SshConfigurationResponse(dict):
"""
SSH configuration for Linux based VMs running on Azure
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKeys":
suggest = "public_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_keys: Optional[Sequence['outputs.SshPublicKeyResponse']] = None):
"""
SSH configuration for Linux based VMs running on Azure
:param Sequence['SshPublicKeyResponse'] public_keys: The list of SSH public keys used to authenticate with linux based VMs.
"""
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[Sequence['outputs.SshPublicKeyResponse']]:
"""
The list of SSH public keys used to authenticate with linux based VMs.
"""
return pulumi.get(self, "public_keys")
@pulumi.output_type
class SshPublicKeyResponse(dict):
"""
Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyData":
suggest = "key_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshPublicKeyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_data: Optional[str] = None,
path: Optional[str] = None):
"""
Contains information about SSH certificate public key and the path on the Linux VM where the public key is placed.
:param str key_data: SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format. <br><br> For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
:param str path: Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys
"""
if key_data is not None:
pulumi.set(__self__, "key_data", key_data)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> Optional[str]:
"""
SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format. <br><br> For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json).
"""
return pulumi.get(self, "key_data")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys
"""
return pulumi.get(self, "path")
@pulumi.output_type
class StorageProfileResponse(dict):
"""
Specifies the storage settings for the virtual machine disks.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataDisks":
suggest = "data_disks"
elif key == "imageReference":
suggest = "image_reference"
elif key == "osDisk":
suggest = "os_disk"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disks: Optional[Sequence['outputs.DataDiskResponse']] = None,
image_reference: Optional['outputs.ImageReferenceResponse'] = None,
os_disk: Optional['outputs.OsDiskResponse'] = None):
"""
Specifies the storage settings for the virtual machine disks.
:param Sequence['DataDiskResponse'] data_disks: Specifies the parameters that are used to add a data disk to a virtual machine.
:param 'ImageReferenceResponse' image_reference: The image reference properties.
:param 'OsDiskResponse' os_disk: Specifies information about the operating system disk used by the virtual machine.
"""
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if os_disk is not None:
pulumi.set(__self__, "os_disk", os_disk)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[Sequence['outputs.DataDiskResponse']]:
"""
Specifies the parameters that are used to add a data disk to a virtual machine.
"""
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional['outputs.ImageReferenceResponse']:
"""
The image reference properties.
"""
return pulumi.get(self, "image_reference")
@property
@pulumi.getter(name="osDisk")
def os_disk(self) -> Optional['outputs.OsDiskResponse']:
"""
Specifies information about the operating system disk used by the virtual machine.
"""
return pulumi.get(self, "os_disk")
@pulumi.output_type
class SubResourceResponse(dict):
"""
Reference to another sub resource.
"""
def __init__(__self__, *,
id: Optional[str] = None):
"""
Reference to another sub resource.
:param str id: Resource ID.
"""
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
| 44.239331 | 1,145 | 0.659668 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'CustomProfileResponse',
'DataDiskResponse',
'ImageReferenceResponse',
'LinuxConfigurationResponse',
'NetworkFunctionRoleConfigurationResponse',
'NetworkFunctionTemplateResponse',
'NetworkFunctionUserConfigurationResponse',
'NetworkFunctionUserConfigurationResponseOsProfile',
'NetworkInterfaceIPConfigurationResponse',
'NetworkInterfaceResponse',
'OsDiskResponse',
'OsProfileResponse',
'SshConfigurationResponse',
'SshPublicKeyResponse',
'StorageProfileResponse',
'SubResourceResponse',
'SystemDataResponse',
]
@pulumi.output_type
class CustomProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "metadataConfigurationPath":
suggest = "metadata_configuration_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in CustomProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
CustomProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
CustomProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
metadata_configuration_path: Optional[str] = None):
if metadata_configuration_path is not None:
pulumi.set(__self__, "metadata_configuration_path", metadata_configuration_path)
@property
@pulumi.getter(name="metadataConfigurationPath")
def metadata_configuration_path(self) -> Optional[str]:
return pulumi.get(self, "metadata_configuration_path")
@pulumi.output_type
class DataDiskResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createOption":
suggest = "create_option"
elif key == "diskSizeGB":
suggest = "disk_size_gb"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
create_option: Optional[str] = None,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None):
if create_option is not None:
pulumi.set(__self__, "create_option", create_option)
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="createOption")
def create_option(self) -> Optional[str]:
return pulumi.get(self, "create_option")
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@pulumi.output_type
class ImageReferenceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "exactVersion":
suggest = "exact_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ImageReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ImageReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
exact_version: Optional[str] = None,
offer: Optional[str] = None,
publisher: Optional[str] = None,
sku: Optional[str] = None,
version: Optional[str] = None):
if exact_version is not None:
pulumi.set(__self__, "exact_version", exact_version)
if offer is not None:
pulumi.set(__self__, "offer", offer)
if publisher is not None:
pulumi.set(__self__, "publisher", publisher)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="exactVersion")
def exact_version(self) -> Optional[str]:
return pulumi.get(self, "exact_version")
@property
@pulumi.getter
def offer(self) -> Optional[str]:
return pulumi.get(self, "offer")
@property
@pulumi.getter
def publisher(self) -> Optional[str]:
return pulumi.get(self, "publisher")
@property
@pulumi.getter
def sku(self) -> Optional[str]:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class LinuxConfigurationResponse(dict):
def __init__(__self__, *,
ssh: Optional['outputs.SshConfigurationResponse'] = None):
if ssh is not None:
pulumi.set(__self__, "ssh", ssh)
@property
@pulumi.getter
def ssh(self) -> Optional['outputs.SshConfigurationResponse']:
return pulumi.get(self, "ssh")
@pulumi.output_type
class NetworkFunctionRoleConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customProfile":
suggest = "custom_profile"
elif key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "roleType":
suggest = "role_type"
elif key == "storageProfile":
suggest = "storage_profile"
elif key == "userDataParameters":
suggest = "user_data_parameters"
elif key == "userDataTemplate":
suggest = "user_data_template"
elif key == "virtualMachineSize":
suggest = "virtual_machine_size"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionRoleConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionRoleConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_profile: Optional['outputs.CustomProfileResponse'] = None,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.OsProfileResponse'] = None,
role_name: Optional[str] = None,
role_type: Optional[str] = None,
storage_profile: Optional['outputs.StorageProfileResponse'] = None,
user_data_parameters: Optional[Any] = None,
user_data_template: Optional[Any] = None,
virtual_machine_size: Optional[str] = None):
if custom_profile is not None:
pulumi.set(__self__, "custom_profile", custom_profile)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if role_type is not None:
pulumi.set(__self__, "role_type", role_type)
if storage_profile is not None:
pulumi.set(__self__, "storage_profile", storage_profile)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
if user_data_template is not None:
pulumi.set(__self__, "user_data_template", user_data_template)
if virtual_machine_size is not None:
pulumi.set(__self__, "virtual_machine_size", virtual_machine_size)
@property
@pulumi.getter(name="customProfile")
def custom_profile(self) -> Optional['outputs.CustomProfileResponse']:
return pulumi.get(self, "custom_profile")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OsProfileResponse']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="roleType")
def role_type(self) -> Optional[str]:
return pulumi.get(self, "role_type")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
return pulumi.get(self, "user_data_parameters")
@property
@pulumi.getter(name="userDataTemplate")
def user_data_template(self) -> Optional[Any]:
return pulumi.get(self, "user_data_template")
@property
@pulumi.getter(name="virtualMachineSize")
def virtual_machine_size(self) -> Optional[str]:
return pulumi.get(self, "virtual_machine_size")
@pulumi.output_type
class NetworkFunctionTemplateResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkFunctionRoleConfigurations":
suggest = "network_function_role_configurations"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionTemplateResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionTemplateResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_function_role_configurations: Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']] = None):
if network_function_role_configurations is not None:
pulumi.set(__self__, "network_function_role_configurations", network_function_role_configurations)
@property
@pulumi.getter(name="networkFunctionRoleConfigurations")
def network_function_role_configurations(self) -> Optional[Sequence['outputs.NetworkFunctionRoleConfigurationResponse']]:
return pulumi.get(self, "network_function_role_configurations")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "networkInterfaces":
suggest = "network_interfaces"
elif key == "osProfile":
suggest = "os_profile"
elif key == "roleName":
suggest = "role_name"
elif key == "userDataParameters":
suggest = "user_data_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
network_interfaces: Optional[Sequence['outputs.NetworkInterfaceResponse']] = None,
os_profile: Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile'] = None,
role_name: Optional[str] = None,
user_data_parameters: Optional[Any] = None):
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if os_profile is not None:
pulumi.set(__self__, "os_profile", os_profile)
if role_name is not None:
pulumi.set(__self__, "role_name", role_name)
if user_data_parameters is not None:
pulumi.set(__self__, "user_data_parameters", user_data_parameters)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[Sequence['outputs.NetworkInterfaceResponse']]:
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.NetworkFunctionUserConfigurationResponseOsProfile']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="roleName")
def role_name(self) -> Optional[str]:
return pulumi.get(self, "role_name")
@property
@pulumi.getter(name="userDataParameters")
def user_data_parameters(self) -> Optional[Any]:
return pulumi.get(self, "user_data_parameters")
@pulumi.output_type
class NetworkFunctionUserConfigurationResponseOsProfile(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "customData":
suggest = "custom_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkFunctionUserConfigurationResponseOsProfile. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkFunctionUserConfigurationResponseOsProfile.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
custom_data: Optional[str] = None):
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
return pulumi.get(self, "custom_data")
@pulumi.output_type
class NetworkInterfaceIPConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dnsServers":
suggest = "dns_servers"
elif key == "ipAddress":
suggest = "ip_address"
elif key == "ipAllocationMethod":
suggest = "ip_allocation_method"
elif key == "ipVersion":
suggest = "ip_version"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceIPConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceIPConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
dns_servers: Optional[Sequence[str]] = None,
gateway: Optional[str] = None,
ip_address: Optional[str] = None,
ip_allocation_method: Optional[str] = None,
ip_version: Optional[str] = None,
subnet: Optional[str] = None):
if dns_servers is not None:
pulumi.set(__self__, "dns_servers", dns_servers)
if gateway is not None:
pulumi.set(__self__, "gateway", gateway)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if ip_allocation_method is not None:
pulumi.set(__self__, "ip_allocation_method", ip_allocation_method)
if ip_version is not None:
pulumi.set(__self__, "ip_version", ip_version)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
@property
@pulumi.getter(name="dnsServers")
def dns_servers(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "dns_servers")
@property
@pulumi.getter
def gateway(self) -> Optional[str]:
return pulumi.get(self, "gateway")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipAllocationMethod")
def ip_allocation_method(self) -> Optional[str]:
return pulumi.get(self, "ip_allocation_method")
@property
@pulumi.getter(name="ipVersion")
def ip_version(self) -> Optional[str]:
return pulumi.get(self, "ip_version")
@property
@pulumi.getter
def subnet(self) -> Optional[str]:
return pulumi.get(self, "subnet")
@pulumi.output_type
class NetworkInterfaceResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipConfigurations":
suggest = "ip_configurations"
elif key == "macAddress":
suggest = "mac_address"
elif key == "networkInterfaceName":
suggest = "network_interface_name"
elif key == "vmSwitchType":
suggest = "vm_switch_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in NetworkInterfaceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
NetworkInterfaceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ip_configurations: Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']] = None,
mac_address: Optional[str] = None,
network_interface_name: Optional[str] = None,
vm_switch_type: Optional[str] = None):
if ip_configurations is not None:
pulumi.set(__self__, "ip_configurations", ip_configurations)
if mac_address is not None:
pulumi.set(__self__, "mac_address", mac_address)
if network_interface_name is not None:
pulumi.set(__self__, "network_interface_name", network_interface_name)
if vm_switch_type is not None:
pulumi.set(__self__, "vm_switch_type", vm_switch_type)
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
return pulumi.get(self, "mac_address")
@property
@pulumi.getter(name="networkInterfaceName")
def network_interface_name(self) -> Optional[str]:
return pulumi.get(self, "network_interface_name")
@property
@pulumi.getter(name="vmSwitchType")
def vm_switch_type(self) -> Optional[str]:
return pulumi.get(self, "vm_switch_type")
@pulumi.output_type
class OsDiskResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "diskSizeGB":
suggest = "disk_size_gb"
elif key == "osType":
suggest = "os_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsDiskResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsDiskResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsDiskResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
disk_size_gb: Optional[int] = None,
name: Optional[str] = None,
os_type: Optional[str] = None):
if disk_size_gb is not None:
pulumi.set(__self__, "disk_size_gb", disk_size_gb)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
@property
@pulumi.getter(name="diskSizeGB")
def disk_size_gb(self) -> Optional[int]:
return pulumi.get(self, "disk_size_gb")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
return pulumi.get(self, "os_type")
@pulumi.output_type
class OsProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "adminUsername":
suggest = "admin_username"
elif key == "customData":
suggest = "custom_data"
elif key == "customDataRequired":
suggest = "custom_data_required"
elif key == "linuxConfiguration":
suggest = "linux_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in OsProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
OsProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
OsProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
admin_username: Optional[str] = None,
custom_data: Optional[str] = None,
custom_data_required: Optional[bool] = None,
linux_configuration: Optional['outputs.LinuxConfigurationResponse'] = None):
if admin_username is not None:
pulumi.set(__self__, "admin_username", admin_username)
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
if custom_data_required is None:
custom_data_required = True
if custom_data_required is not None:
pulumi.set(__self__, "custom_data_required", custom_data_required)
if linux_configuration is not None:
pulumi.set(__self__, "linux_configuration", linux_configuration)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> Optional[str]:
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[str]:
return pulumi.get(self, "custom_data")
@property
@pulumi.getter(name="customDataRequired")
def custom_data_required(self) -> Optional[bool]:
return pulumi.get(self, "custom_data_required")
@property
@pulumi.getter(name="linuxConfiguration")
def linux_configuration(self) -> Optional['outputs.LinuxConfigurationResponse']:
return pulumi.get(self, "linux_configuration")
@pulumi.output_type
class SshConfigurationResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "publicKeys":
suggest = "public_keys"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
public_keys: Optional[Sequence['outputs.SshPublicKeyResponse']] = None):
if public_keys is not None:
pulumi.set(__self__, "public_keys", public_keys)
@property
@pulumi.getter(name="publicKeys")
def public_keys(self) -> Optional[Sequence['outputs.SshPublicKeyResponse']]:
return pulumi.get(self, "public_keys")
@pulumi.output_type
class SshPublicKeyResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyData":
suggest = "key_data"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SshPublicKeyResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SshPublicKeyResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_data: Optional[str] = None,
path: Optional[str] = None):
if key_data is not None:
pulumi.set(__self__, "key_data", key_data)
if path is not None:
pulumi.set(__self__, "path", path)
@property
@pulumi.getter(name="keyData")
def key_data(self) -> Optional[str]:
return pulumi.get(self, "key_data")
@property
@pulumi.getter
def path(self) -> Optional[str]:
return pulumi.get(self, "path")
@pulumi.output_type
class StorageProfileResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataDisks":
suggest = "data_disks"
elif key == "imageReference":
suggest = "image_reference"
elif key == "osDisk":
suggest = "os_disk"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StorageProfileResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StorageProfileResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StorageProfileResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disks: Optional[Sequence['outputs.DataDiskResponse']] = None,
image_reference: Optional['outputs.ImageReferenceResponse'] = None,
os_disk: Optional['outputs.OsDiskResponse'] = None):
if data_disks is not None:
pulumi.set(__self__, "data_disks", data_disks)
if image_reference is not None:
pulumi.set(__self__, "image_reference", image_reference)
if os_disk is not None:
pulumi.set(__self__, "os_disk", os_disk)
@property
@pulumi.getter(name="dataDisks")
def data_disks(self) -> Optional[Sequence['outputs.DataDiskResponse']]:
return pulumi.get(self, "data_disks")
@property
@pulumi.getter(name="imageReference")
def image_reference(self) -> Optional['outputs.ImageReferenceResponse']:
return pulumi.get(self, "image_reference")
@property
@pulumi.getter(name="osDisk")
def os_disk(self) -> Optional['outputs.OsDiskResponse']:
return pulumi.get(self, "os_disk")
@pulumi.output_type
class SubResourceResponse(dict):
def __init__(__self__, *,
id: Optional[str] = None):
if id is not None:
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@pulumi.output_type
class SystemDataResponse(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[str] = None):
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if last_modified_by_type is not None:
pulumi.set(__self__, "last_modified_by_type", last_modified_by_type)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def last_modified_by_type(self) -> Optional[str]:
return pulumi.get(self, "last_modified_by_type")
| true | true |
f7145cc12aed42e52b811d6e792bdbbe823aba63 | 9,103 | py | Python | rodnet/models/backbones/hgwi.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | rodnet/models/backbones/hgwi.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | rodnet/models/backbones/hgwi.py | zhengzangw/RODNet | eca5f2bd1f3051c2b823d279532ddafa71b009c1 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class RadarStackedHourglass(nn.Module):
def __init__(self, n_class, stacked_num=1):
super(RadarStackedHourglass, self).__init__()
self.stacked_num = stacked_num
self.conv1a = nn.Conv3d(
in_channels=2,
out_channels=32,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1c = nn.Conv3d(
in_channels=64,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.hourglass = []
for i in range(stacked_num):
self.hourglass.append(
nn.ModuleList(
[
RODEncode(),
RODDecode(),
nn.Conv3d(
in_channels=160,
out_channels=n_class,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
nn.Conv3d(
in_channels=n_class,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
]
)
)
self.hourglass = nn.ModuleList(self.hourglass)
self.relu = nn.ReLU()
self.bn1a = nn.BatchNorm3d(num_features=32)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn1c = nn.BatchNorm3d(num_features=160)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn1c(self.conv1c(x)))
out = []
for i in range(self.stacked_num):
x, x1, x2, x3 = self.hourglass[i][0](x)
x = self.hourglass[i][1](x, x1, x2, x3)
confmap = self.hourglass[i][2](x)
out.append(self.sigmoid(confmap))
if i < self.stacked_num - 1:
confmap_ = self.hourglass[i][3](confmap)
x = x + confmap_
return out
class InceptionLayerConcat(nn.Module):
"""
Kernal size: for 2d kernal size, since the kernal size in temporal domain will be fixed
"""
def __init__(self, kernal_size, in_channel, stride):
super(InceptionLayerConcat, self).__init__()
paddingX = kernal_size[0] // 2
paddingY = kernal_size[1] // 2
self.branch1 = nn.Conv3d(
in_channels=in_channel,
out_channels=32,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(2, paddingX, paddingY),
)
self.branch2a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch2b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(4, paddingX, paddingY),
)
self.branch3a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch3b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(13, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(6, paddingX, paddingY),
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch3 = self.branch3a(x)
branch3 = self.branch3b(branch3)
return torch.cat((branch1, branch2, branch3), 1)
class RODEncode(nn.Module):
def __init__(self):
super(RODEncode, self).__init__()
self.inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
# self.conv4a = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
# self.conv4b = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
# self.conv5a = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 1, 1), padding=(4, 2, 2))
# self.conv5b = nn.Conv3d(in_channels=64, out_channels=64,
# kernel_size=(9, 5, 5), stride=(1, 2, 2), padding=(4, 2, 2))
self.bn1 = nn.BatchNorm3d(num_features=160)
self.bn2 = nn.BatchNorm3d(num_features=160)
self.bn3 = nn.BatchNorm3d(num_features=160)
self.skip_bn1 = nn.BatchNorm3d(num_features=160)
self.skip_bn2 = nn.BatchNorm3d(num_features=160)
self.skip_bn3 = nn.BatchNorm3d(num_features=160)
# self.bn4a = nn.BatchNorm3d(num_features=64)
# self.bn4b = nn.BatchNorm3d(num_features=64)
# self.bn5a = nn.BatchNorm3d(num_features=64)
# self.bn5b = nn.BatchNorm3d(num_features=64)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.relu(self.skip_bn1(self.skip_inception1(x)))
x = self.relu(
self.bn1(self.inception1(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
x2 = self.relu(self.skip_bn2(self.skip_inception2(x)))
x = self.relu(
self.bn2(self.inception2(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
x3 = self.relu(self.skip_bn3(self.skip_inception3(x)))
x = self.relu(
self.bn3(self.inception3(x))
) # (B, 2, W, 128, 128) -> (B, 64, W, 128, 128)
return x, x1, x2, x3
class RODDecode(nn.Module):
def __init__(self):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.conv1 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
# self.upsample = nn.Upsample(size=(rodnet_configs['win_size'], radar_configs['ramap_rsize'],
# radar_configs['ramap_asize']), mode='nearest')
def forward(self, x, x1, x2, x3):
x = self.prelu(
self.convt1(x + x3)
) # (B, 256, W/4, 16, 16) -> (B, 128, W/2, 32, 32)
x = self.prelu(self.conv1(x))
x = self.prelu(
self.convt2(x + x2)
) # (B, 128, W/2, 32, 32) -> (B, 64, W, 64, 64)
x = self.prelu(self.conv2(x))
x = self.prelu(self.convt3(x + x1)) # (B, 64, W, 64, 64) -> (B, 3, W, 128, 128)
x = self.prelu(self.conv3(x))
return x
| 34.612167 | 101 | 0.494782 | import torch
import torch.nn as nn
class RadarStackedHourglass(nn.Module):
def __init__(self, n_class, stacked_num=1):
super(RadarStackedHourglass, self).__init__()
self.stacked_num = stacked_num
self.conv1a = nn.Conv3d(
in_channels=2,
out_channels=32,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1b = nn.Conv3d(
in_channels=32,
out_channels=64,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv1c = nn.Conv3d(
in_channels=64,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.hourglass = []
for i in range(stacked_num):
self.hourglass.append(
nn.ModuleList(
[
RODEncode(),
RODDecode(),
nn.Conv3d(
in_channels=160,
out_channels=n_class,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
nn.Conv3d(
in_channels=n_class,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
),
]
)
)
self.hourglass = nn.ModuleList(self.hourglass)
self.relu = nn.ReLU()
self.bn1a = nn.BatchNorm3d(num_features=32)
self.bn1b = nn.BatchNorm3d(num_features=64)
self.bn1c = nn.BatchNorm3d(num_features=160)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn1a(self.conv1a(x)))
x = self.relu(self.bn1b(self.conv1b(x)))
x = self.relu(self.bn1c(self.conv1c(x)))
out = []
for i in range(self.stacked_num):
x, x1, x2, x3 = self.hourglass[i][0](x)
x = self.hourglass[i][1](x, x1, x2, x3)
confmap = self.hourglass[i][2](x)
out.append(self.sigmoid(confmap))
if i < self.stacked_num - 1:
confmap_ = self.hourglass[i][3](confmap)
x = x + confmap_
return out
class InceptionLayerConcat(nn.Module):
def __init__(self, kernal_size, in_channel, stride):
super(InceptionLayerConcat, self).__init__()
paddingX = kernal_size[0] // 2
paddingY = kernal_size[1] // 2
self.branch1 = nn.Conv3d(
in_channels=in_channel,
out_channels=32,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(2, paddingX, paddingY),
)
self.branch2a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch2b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(9, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(4, paddingX, paddingY),
)
self.branch3a = nn.Conv3d(
in_channels=in_channel,
out_channels=64,
kernel_size=(5, kernal_size[0], kernal_size[1]),
stride=(1, 1, 1),
padding=(2, paddingX, paddingY),
)
self.branch3b = nn.Conv3d(
in_channels=64,
out_channels=64,
kernel_size=(13, kernal_size[0], kernal_size[1]),
stride=stride,
padding=(6, paddingX, paddingY),
)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2a(x)
branch2 = self.branch2b(branch2)
branch3 = self.branch3a(x)
branch3 = self.branch3b(branch3)
return torch.cat((branch1, branch2, branch3), 1)
class RODEncode(nn.Module):
def __init__(self):
super(RODEncode, self).__init__()
self.inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception1 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception2 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.skip_inception3 = InceptionLayerConcat(
kernal_size=(5, 5), in_channel=160, stride=(1, 2, 2)
)
self.bn1 = nn.BatchNorm3d(num_features=160)
self.bn2 = nn.BatchNorm3d(num_features=160)
self.bn3 = nn.BatchNorm3d(num_features=160)
self.skip_bn1 = nn.BatchNorm3d(num_features=160)
self.skip_bn2 = nn.BatchNorm3d(num_features=160)
self.skip_bn3 = nn.BatchNorm3d(num_features=160)
self.relu = nn.ReLU()
def forward(self, x):
x1 = self.relu(self.skip_bn1(self.skip_inception1(x)))
x = self.relu(
self.bn1(self.inception1(x))
)
x2 = self.relu(self.skip_bn2(self.skip_inception2(x)))
x = self.relu(
self.bn2(self.inception2(x))
)
x3 = self.relu(self.skip_bn3(self.skip_inception3(x)))
x = self.relu(
self.bn3(self.inception3(x))
)
return x, x1, x2, x3
class RODDecode(nn.Module):
def __init__(self):
super(RODDecode, self).__init__()
self.convt1 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt2 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.convt3 = nn.ConvTranspose3d(
in_channels=160,
out_channels=160,
kernel_size=(3, 6, 6),
stride=(1, 2, 2),
padding=(1, 2, 2),
)
self.conv1 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv2 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.conv3 = nn.Conv3d(
in_channels=160,
out_channels=160,
kernel_size=(9, 5, 5),
stride=(1, 1, 1),
padding=(4, 2, 2),
)
self.prelu = nn.PReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x, x1, x2, x3):
x = self.prelu(
self.convt1(x + x3)
)
x = self.prelu(self.conv1(x))
x = self.prelu(
self.convt2(x + x2)
)
x = self.prelu(self.conv2(x))
x = self.prelu(self.convt3(x + x1))
x = self.prelu(self.conv3(x))
return x
| true | true |
f7145d06775df411d8b6bbed45d9cb10c999cfeb | 203,536 | py | Python | salt/modules/file.py | sacren/salt | 887336c6deaaad6f9ad4948b69472bd043962d56 | [
"Apache-2.0"
] | null | null | null | salt/modules/file.py | sacren/salt | 887336c6deaaad6f9ad4948b69472bd043962d56 | [
"Apache-2.0"
] | null | null | null | salt/modules/file.py | sacren/salt | 887336c6deaaad6f9ad4948b69472bd043962d56 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Manage information about regular files, directories,
and special files on the minion, set/read user,
group, mode, and data
'''
# TODO: We should add the capability to do u+r type operations here
# some time in the future
from __future__ import absolute_import, print_function
# Import python libs
import datetime
import difflib
import errno
import fileinput
import fnmatch
import itertools
import logging
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import glob
import hashlib
import mmap
from collections import Iterable, Mapping
from functools import reduce # pylint: disable=redefined-builtin
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import grp
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
log = logging.getLogger(__name__)
__func_alias__ = {
'makedirs_': 'makedirs'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
# win_file takes care of windows
if salt.utils.platform.is_windows():
return (
False,
'The file execution module cannot be loaded: only available on '
'non-Windows systems - use win_file instead.'
)
return True
def __clean_tmp(sfn):
'''
Clean out a template temp file
'''
if sfn.startswith(os.path.join(tempfile.gettempdir(),
salt.utils.files.TEMPFILE_PREFIX)):
# Don't remove if it exists in file_roots (any saltenv)
all_roots = itertools.chain.from_iterable(
six.itervalues(__opts__['file_roots']))
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
'''
Common function for setting error information for return dicts
'''
ret['result'] = False
ret['comment'] = err_msg
return ret
def _binary_replace(old, new):
'''
This function does NOT do any diffing, it just checks the old and new files
to see if either is binary, and provides an appropriate string noting the
difference between the two files. If neither file is binary, an empty
string is returned.
This function should only be run AFTER it has been determined that the
files differ.
'''
old_isbin = not __utils__['files.is_text'](old)
new_isbin = not __utils__['files.is_text'](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return u'Replace binary file'
elif old_isbin:
return u'Replace binary file with text file'
elif new_isbin:
return u'Replace text file with binary file'
return u''
def _get_bkroot():
'''
Get the location of the backup dir in the minion cache
'''
# Get the cachedir from the minion config
return os.path.join(__salt__['config.get']('cachedir'), 'file_backup')
def _splitlines_preserving_trailing_newline(str):
'''
Returns a list of the lines in the string, breaking at line boundaries and
preserving a trailing newline (if present).
Essentially, this works like ``str.striplines(False)`` but preserves an
empty line at the end. This is equivalent to the following code:
.. code-block:: python
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
'''
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
return lines
def gid_to_group(gid):
'''
Convert the group id to the group name on this system
gid
gid to convert to a group name
CLI Example:
.. code-block:: bash
salt '*' file.gid_to_group 0
'''
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
# Don't even bother to feed it to grp
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
'''
Convert the group to the gid on this system
group
group to convert to its gid
CLI Example:
.. code-block:: bash
salt '*' file.group_to_gid root
'''
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ''
def get_gid(path, follow_symlinks=True):
'''
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
def get_group(path, follow_symlinks=True):
'''
Return the group that owns a given file
path
file or directory of which to get the group
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_group /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
def uid_to_user(uid):
'''
Convert a uid to a user name
uid
uid to convert to a username
CLI Example:
.. code-block:: bash
salt '*' file.uid_to_user 0
'''
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
'''
Convert user name to a uid
user
user name to convert to its uid
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid root
'''
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ''
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
path
file or directory of which to get the uid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_uid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
def get_user(path, follow_symlinks=True):
'''
Return the user that owns a given file
path
file or directory of which to get the user
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_user /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False)
def get_mode(path, follow_symlinks=True):
'''
Return the mode of a file
path
file or directory of which to get the mode
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
.. versionchanged:: 2014.1.0
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
def set_mode(path, mode):
'''
Set the mode of a file
path
file or directory of which to set the mode
mode
mode to set the path to
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
'''
path = os.path.expanduser(path)
mode = str(mode).lstrip('0Oo')
if not mode:
mode = '0'
if not os.path.exists(path):
raise CommandExecutionError('{0}: File not found'.format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return 'Invalid Mode ' + mode
return get_mode(path)
def lchown(path, user, group):
'''
Chown a file, pass the file the desired user and group without following
symlinks.
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
'''
Chown a file, pass the file the desired user and group
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
'''
Change the group of a file
path
path to the file or directory
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chgrp /etc/passwd root
'''
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
'''
.. versionadded: Oxygen
Compare attributes of a given file to given attributes.
Returns a pair (list) where first item are attributes to
add and second item are to be removed.
path
path to file to compare attributes with.
attrs
string of attributes to compare against a given file
'''
diff = [None, None]
lattrs = lsattr(path).get(path, '')
old = [chr for chr in lattrs if chr not in attrs]
if len(old) > 0:
diff[1] = ''.join(old)
new = [chr for chr in attrs if chr not in lattrs]
if len(new) > 0:
diff[0] = ''.join(new)
return diff
def lsattr(path):
'''
.. versionadded: Oxygen
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
'''
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist.")
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr'):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0])
return results
def chattr(*args, **kwargs):
'''
.. versionadded: Oxygen
Change the attributes of files
*args
list of files to modify attributes of
**kwargs - the following are valid <key,value> pairs:
operator
add|remove
determines whether attributes should be added or removed from files
attributes
acdijstuADST
string of characters representing attributes to add/remove from files
version
a version number to assign to the files
flags
[RVf]
flags to assign to chattr (recurse, verbose, suppress most errors)
CLI Example:
.. code-block:: bash
salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai
salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
'''
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
if flags is not None:
flgs = '-{0}'.format(flags)
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
raise CommandExecutionError(
"chattr failed to run, possibly due to bad parameters.")
return True
def get_sum(path, form='sha256'):
'''
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
'''
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form='sha256', chunk_size=65536):
'''
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
path
path to the file or directory
form
desired sum format
chunk_size
amount to sum at once
CLI Example:
.. code-block:: bash
salt '*' file.get_hash /etc/shadow
'''
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(file_name='',
source='',
source_hash=None,
source_hash_name=None,
saltenv='base'):
'''
.. versionadded:: 2016.11.0
Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to
obtain the hash and hash type from the parameters specified below.
file_name
Optional file name being managed, for matching with
:py:func:`file.extract_hash <salt.modules.file.extract_hash>`.
source
Source file, as used in :py:mod:`file <salt.states.file>` and other
states. If ``source_hash`` refers to a file containing hashes, then
this filename will be used to match a filename in that file. If the
``source_hash`` is a hash expression, then this argument will be
ignored.
source_hash
Hash file/expression, as used in :py:mod:`file <salt.states.file>` and
other states. If this value refers to a remote URL or absolute path to
a local file, it will be cached and :py:func:`file.extract_hash
<salt.modules.file.extract_hash>` will be used to obtain a hash from
it.
source_hash_name
Specific file name to look for when ``source_hash`` refers to a remote
file, used to disambiguate ambiguous matches.
saltenv : base
Salt fileserver environment from which to retrieve the source_hash. This
value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``).
CLI Example:
.. code-block:: bash
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz
'''
def _invalid_source_hash_format():
'''
DRY helper for reporting invalid source_hash input
'''
raise CommandExecutionError(
'Source hash {0} format is invalid. The supported formats are: '
'1) a hash, 2) an expression in the format <hash_type>=<hash>, or '
'3) either a path to a local file containing hashes, or a URI of '
'a remote hash file. Supported protocols for remote hash files '
'are: {1}. The hash may also not be of a valid length, the '
'following are supported hash types and lengths: {2}.'.format(
source_hash,
', '.join(salt.utils.files.VALID_PROTOS),
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret['hash_type'] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret['hash_type'] not in HASHES:
raise CommandExecutionError(
'Invalid hash type \'{0}\'. Supported hash types are: {1}. '
'Either remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to a supported type.'
.format(ret['hash_type'], ', '.join(HASHES), ret['hsum'])
)
else:
hsum_len = len(ret['hsum'])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret['hash_type']]:
raise CommandExecutionError(
'Invalid length ({0}) for hash type \'{1}\'. Either '
'remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to \'{3}\''.format(
hsum_len,
ret['hash_type'],
ret['hsum'],
HASHES_REVMAP[hsum_len],
)
)
return ret
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
'''
Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria.
The options include match criteria:
.. code-block:: text
name = path-glob # case sensitive
iname = path-glob # case insensitive
regex = path-regex # case sensitive
iregex = path-regex # case insensitive
type = file-types # match any listed type
user = users # match any listed user
group = groups # match any listed group
size = [+-]number[size-unit] # default unit = byte
mtime = interval # modified since date
grep = regex # search file contents
and/or actions:
.. code-block:: text
delete [= file-types] # default type = 'f'
exec = command [arg ...] # where {} is replaced by pathname
print [= print-opts]
and/or depth criteria:
.. code-block:: text
maxdepth = maximum depth to transverse in path
mindepth = minimum depth to transverse before checking files or directories
The default action is ``print=path``
``path-glob``:
.. code-block:: text
* = match zero or more chars
? = match any char
[abc] = match a, b, or c
[!abc] or [^abc] = match anything except a, b, and c
[x-y] = match chars x through y
[!x-y] or [^x-y] = match anything except chars x through y
{a,b,c} = match a or b or c
``path-regex``: a Python Regex (regular expression) pattern to match pathnames
``file-types``: a string of one or more of the following:
.. code-block:: text
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
``users``: a space and/or comma separated list of user names and/or uids
``groups``: a space and/or comma separated list of group names and/or gids
``size-unit``:
.. code-block:: text
b: bytes
k: kilobytes
m: megabytes
g: gigabytes
t: terabytes
interval:
.. code-block:: text
[<num>w] [<num>d] [<num>h] [<num>m] [<num>s]
where:
w: week
d: day
h: hour
m: minute
s: second
print-opts: a comma and/or space separated list of one or more of the
following:
.. code-block:: text
group: group name
md5: MD5 digest of file contents
mode: file permissions (as integer)
mtime: last modification time (as time_t)
name: file basename
path: file absolute path
size: file size in bytes
type: file type
user: user name
CLI Examples:
.. code-block:: bash
salt '*' file.find / type=f name=\\*.bak size=+10m
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
'''
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
'''
Escape single quotes and forward slashes
'''
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def sed_contains(path,
text,
limit='',
flags='g'):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result)
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file (pure Python version)
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
flags : ``gMS``
Flags to modify the search. Valid values are:
- ``g``: Replace all occurrences of the pattern, not just the first.
- ``I``: Ignore case.
- ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S``
dependent on the locale.
- ``M``: Treat multiple lines as a single line.
- ``S``: Make `.` match all characters, including newlines.
- ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``,
``\\s`` and ``\\S`` dependent on Unicode.
- ``X``: Verbose (whitespace is ignored).
multi: ``False``
If True, treat the entire file as a single line
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
#after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(_psed(line, before, after, limit, flags))
else:
ofile.write(_psed(ifile.read(), before, after, limit, flags))
RE_FLAG_TABLE = {'I': re.I,
'L': re.L,
'M': re.M,
'S': re.S,
'U': re.U,
'X': re.X}
def _psed(text,
before,
after,
limit,
flags):
'''
Does the actual work for file.psed, so that single lines can be passed in
'''
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
def comment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Comment out specified lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
CLI Example:
.. code-block:: bash
salt '*' file.comment /etc/modules pcspkr
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=True,
backup=backup)
def comment_line(path,
regex,
char='#',
cmnt=True,
backup='.bak'):
r'''
Comment or Uncomment a line in a text file.
:param path: string
The full path to the text file.
:param regex: string
A regex expression that begins with ``^`` that will find the line you wish
to comment. Can be as simple as ``^color =``
:param char: string
The character used to comment a line in the type of file you're referencing.
Default is ``#``
:param cmnt: boolean
True to comment the line. False to uncomment the line. Default is True.
:param backup: string
The file extension to give the backup file. Default is ``.bak``
Set to False/None to not keep a backup.
:return: boolean
Returns True if successful, False if not
CLI Example:
The following example will comment out the ``pcspkr`` line in the
``/etc/modules`` file using the default ``#`` character and create a backup
file named ``modules.bak``
.. code-block:: bash
salt '*' file.comment_line '/etc/modules' '^pcspkr'
CLI Example:
The following example will uncomment the ``log_level`` setting in ``minion``
config file if it is set to either ``warning``, ``info``, or ``debug`` using
the ``#`` character and create a backup file named ``minion.bk``
.. code-block:: bash
salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk'
'''
# Get the regex for comment or uncomment
if cmnt:
regex = '{0}({1}){2}'.format(
'^' if regex.startswith('^') else '',
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
else:
regex = r'^{0}\s*({1}){2}'.format(
char,
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError('File not found: {0}'.format(path))
# Make sure it is a text file
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'.format(path))
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
if six.PY3:
line = line.decode(__salt_system_encoding__)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append('{0}{1}'.format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='wb',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
if six.PY3:
line = line.decode(__salt_system_encoding__)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = '{0}{1}'.format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
if six.PY3:
wline = wline.encode(__salt_system_encoding__)
w_file.write(wline)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return ''.join(difflib.unified_diff(orig_file, new_file))
def _get_flags(flags):
'''
Return an integer appropriate for use as a flag for the re module from a
list of human-readable strings
.. code-block:: python
>>> _get_flags(['MULTILINE', 'IGNORECASE'])
10
>>> _get_flags('MULTILINE')
8
>>> _get_flags(2)
2
'''
if isinstance(flags, six.string_types):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, six.integer_types):
raise SaltInvocationError(
'Invalid re flag given: {0}'.format(flag)
)
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, six.integer_types):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{0}", must be given either as a single flag '
'string, a list of strings, or as an integer'.format(flags)
)
def _add_flags(flags, new_flags):
'''
Combine ``flags`` and ``new_flags``
'''
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path,
preserve_inode=True):
'''
Create a temp file and move/copy the contents of ``path`` to the temp file.
Return the path to the temp file.
path
The full path to the file whose contents will be moved/copied to a temp file.
Whether it's moved or copied depends on the value of ``preserve_inode``.
preserve_inode
Preserve the inode of the file, so that any hard links continue to share the
inode with the original filename. This works by *copying* the file, reading
from the copy, and writing to the file at the original inode. If ``False``, the
file will be *moved* rather than copied, and a new file will be written to a
new inode, but using the original filename. Hard links will then share an inode
with the backup, instead (if using ``backup`` to create a backup copy).
Default is ``True``.
'''
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
def _starts_till(src, probe, strip_comments=True):
'''
Returns True if src and probe at least matches at the beginning till some point.
'''
def _strip_comments(txt):
'''
Strip possible comments.
Usually comments are one or two symbols at the beginning of the line, separated with space
'''
buff = txt.split(" ", 1)
return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt
def _to_words(txt):
'''
Split by words
'''
return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt
no_match = -1
equal = 0
if not src or not probe:
return no_match
if src == probe:
return equal
src = _to_words(strip_comments and _strip_comments(src) or src)
probe = _to_words(strip_comments and _strip_comments(probe) or probe)
a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src)
b_buff = ' '.join(b_buff)
for idx in range(len(a_buff)):
prb = ' '.join(a_buff[:-(idx + 1)])
if prb and b_buff.startswith(prb):
return idx
return no_match
def _regex_to_static(src, regex):
'''
Expand regular expression to static match.
'''
if not src or not regex:
return None
try:
src = re.search(regex, src, re.M)
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group() or regex
def _assert_occurrence(src, probe, target, amount=1):
'''
Raise an exception, if there are different amount of specified occurrences in src.
'''
occ = src.count(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
msg = 'less than'
elif not occ:
msg = 'no'
else:
msg = None
if msg:
raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target))
return occ
def _get_line_indent(src, line, indent):
'''
Indent the line with the source line.
'''
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.strip()
def line(path, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
'''
.. versionadded:: 2015.8.0
Edit a line in the configuration file. The ``path`` and ``content``
arguments are required, as well as passing in one of the ``mode``
options.
path
Filesystem path to the file to be edited.
content
Content of the line. Allowed to be empty if mode=delete.
match
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
mode
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added. This is based on the
``content`` argument.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
location
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
before
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
after
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
show_changes
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
backup
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
quiet
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
indent
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
CLI Example:
.. code-block:: bash
salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure'
.. note::
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace"
'''
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path))
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ['insert', 'ensure', 'delete', 'replace']:
if mode is None:
raise CommandExecutionError('Mode was not defined. How to process the file?')
else:
raise CommandExecutionError('Unknown mode: "{0}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
empty_content_modes = ['delete']
if mode not in empty_content_modes and content is None:
raise CommandExecutionError('Content can only be empty if mode is "{0}"'.format(', '.join(empty_content_modes)))
del empty_content_modes
# Before/after has privilege. If nothing defined, match is used by content.
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = fp_.read()
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
in_range = False
for line in lines:
if line.find(after) > -1:
in_range = True
elif line.find(before) > -1 and in_range:
out.append(_get_line_indent(line, content, indent))
out.append(line)
body = os.linesep.join(out)
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx, _line in enumerate(lines):
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
# No duplicates or append, if "after" is the last line
if (_line.find(after) > -1 and
(lines[((idx + 1) < len(lines)) and idx + 1 or idx].strip() != cnd or
idx + 1 == len(lines))):
out.append(cnd)
body = os.linesep.join(out)
else:
if location == 'start':
body = os.linesep.join((content, body))
elif location == 'end':
body = os.linesep.join((body, _get_line_indent(body[-1], content, indent) if body else content))
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
is_there = bool(body.count(content))
if not is_there:
out = []
body = body.split(os.linesep)
for idx, line in enumerate(body):
out.append(line)
if line.find(content) > -1:
is_there = True
if not is_there:
if idx < (len(body) - 1) and line.find(after) > -1 and body[idx + 1].find(before) > -1:
out.append(content)
elif line.find(after) > -1:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
body = os.linesep.join(out)
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[idx], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc))
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_.write(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
'''
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the :ref:`re module documentation
<contents-of-module-re>`. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
'''
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else salt.utils.stringutils.to_str(repl)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
# Content was found, so set found.
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
# keep the backup only if it was requested
# and only if there were any changes
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
def get_changes():
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
if show_changes:
return get_changes()
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not get_changes():
has_changes = False
return has_changes
def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file.
show_changes
Output a unified diff of the old file and the new file. If ``False``,
return a boolean if any changes were made.
append_newline:
Append a newline to the content block. For more information see:
https://github.com/saltstack/salt/issues/33686
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='rb')
for line in fi_file:
line = salt.utils.stringutils.to_str(line)
result = line
if marker_start in line:
# managed block start found, start recording
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
in_block = False
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
finally:
fi_file.close()
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + os.linesep)
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
# add the markers and content at the end of file
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + os.linesep)
done = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
def search(path,
pattern,
flags=8,
bufsize=1,
ignore_if_missing=False,
multiline=False
):
'''
.. versionadded:: 0.17.0
Search for occurrences of a pattern in a file
Except for multiline, params are identical to
:py:func:`~salt.modules.file.replace`.
multiline
If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to
'file'.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' file.search /etc/crontab 'mymaintenance.sh'
'''
if multiline:
flags = _add_flags(flags, 'MULTILINE')
bufsize = 'file'
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
# Any enhancements or fixes to one should affect the other.
return replace(path,
pattern,
'',
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing)
def patch(originalfile, patchfile, options='', dry_run=False):
'''
.. versionadded:: 0.10.4
Apply a patch to a file or directory.
Equivalent to:
.. code-block:: bash
patch <options> -i <patchfile> <originalfile>
Or, when a directory is patched:
.. code-block:: bash
patch <options> -i <patchfile> -d <originalfile> -p0
originalfile
The full path to the file or directory to be patched
patchfile
A patch file to apply to ``originalfile``
options
Options to pass to patch.
CLI Example:
.. code-block:: bash
salt '*' file.patch /opt/file.txt /tmp/file.txt.patch
'''
patchpath = salt.utils.path.which('patch')
if not patchpath:
raise CommandExecutionError(
'patch executable not found. Is the distribution\'s patch '
'package installed?'
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
cmd.append('-C')
else:
cmd.append('--dry-run')
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if '-N' not in cmd and '--forward' not in cmd:
cmd.append('--forward')
has_rejectfile_option = False
for option in cmd:
if option == '-r' or option.startswith('-r ') \
or option.startswith('--reject-file'):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append('--reject-file=-')
cmd.extend(['-i', patchfile])
if os.path.isdir(originalfile):
cmd.extend(['-d', originalfile])
has_strip_option = False
for option in cmd:
if option.startswith('-p') or option.startswith('--strip='):
has_strip_option = True
break
if not has_strip_option:
cmd.append('--strip=0')
else:
cmd.append(originalfile)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def contains(path, text):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the file at ``path`` contains ``text``
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except (IOError, OSError):
return False
def contains_regex(path, regex, lchar=''):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the given regular expression matches on any line in the text
of a given file.
If the lchar argument (leading char) is specified, it
will strip `lchar` from the left side of each line before trying to match
CLI Example:
.. code-block:: bash
salt '*' file.contains_regex /etc/crontab
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, 'r') as target:
for line in target:
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except (IOError, OSError):
return False
def contains_glob(path, glob_expr):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the given glob matches a string in the named file
CLI Example:
.. code-block:: bash
salt '*' file.contains_glob /etc/foobar '*cheese*'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except (IOError, OSError):
return False
def append(path, *args, **kwargs):
'''
.. versionadded:: 0.9.5
Append text to the end of a file
path
path to file
`*args`
strings to append to file
CLI Example:
.. code-block:: bash
salt '*' file.append /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.append /etc/motd args='cheese=spam'
salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
# Make sure we have a newline at the end of the file. Do this in binary
# mode so SEEK_END with nonzero offset will work.
with salt.utils.files.fopen(path, 'rb+') as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except IOError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
# Empty file, simply append lines at the beginning of the file
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
# Append lines in text mode
with salt.utils.files.fopen(path, 'a') as ofile:
for new_line in args:
ofile.write('{0}{1}'.format(new_line, os.linesep))
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Prepend text to the beginning of a file
path
path to file
`*args`
strings to prepend to the file
CLI Example:
.. code-block:: bash
salt '*' file.prepend /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.prepend /etc/motd args='cheese=spam'
salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
try:
with salt.utils.files.fopen(path) as fhr:
contents = fhr.readlines()
except IOError:
contents = []
preface = []
for line in args:
preface.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(''.join(contents))
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
def write(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Write text to a file, overwriting any existing contents.
path
path to file
`*args`
strings to write to the file
CLI Example:
.. code-block:: bash
salt '*' file.write /etc/motd \\
"With all thine offerings thou shalt offer salt."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.write /etc/motd args='cheese=spam'
salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
contents = []
for line in args:
contents.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(''.join(contents))
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
'''
.. versionadded:: 0.9.5
Just like the ``touch`` command, create a file if it doesn't exist or
simply update the atime and mtime if it already does.
atime:
Access time in Unix epoch time
mtime:
Last modification in Unix epoch time
CLI Example:
.. code-block:: bash
salt '*' file.touch /var/log/emptyfile
'''
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, 'a') as fhw:
fhw.write('')
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError('atime and mtime must be integers')
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and read it
path
path to file
seek
amount to read at once
offset
offset to start into the file
CLI Example:
.. code-block:: bash
salt '*' file.seek_read /path/to/file 4096 0
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and write to it
path
path to file
data
data to write to file
offset
position in file to start writing
CLI Example:
.. code-block:: bash
salt '*' file.seek_write /path/to/file 'some data' 4096
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and delete everything after that point
path
path to file
length
offset into file to truncate
CLI Example:
.. code-block:: bash
salt '*' file.truncate /path/to/file 512
'''
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, 'rb+') as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
'''
.. versionadded:: 2014.1.0
Create a hard link to a file
CLI Example:
.. code-block:: bash
salt '*' file.link /path/to/file /path/to/link
'''
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.link(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def is_link(path):
'''
Check if the path is a symbolic link
CLI Example:
.. code-block:: bash
salt '*' file.is_link /path/to/link
'''
# This function exists because os.path.islink does not support Windows,
# therefore a custom function will need to be called. This function
# therefore helps API consistency by providing a single function to call for
# both operating systems.
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
'''
Create a symbolic link (symlink, soft link) to a file
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
'''
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug('link already in correct state: %s -> %s', path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
os.symlink(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def rename(src, dst):
'''
Rename a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.rename /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError(
'Could not rename \'{0}\' to \'{1}\''.format(src, dst)
)
return False
def copy(src, dst, recurse=False, remove_existing=False):
'''
Copy a file or directory from source to dst
In order to copy a directory, the recurse flag is required, and
will by default overwrite files in the destination with the same path,
and retain all other existing files. (similar to cp -r on unix)
remove_existing will remove all files in the target directory,
and then copy files from the source.
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
CLI Example:
.. code-block:: bash
salt '*' file.copy /path/to/src /path/to/dst
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
if not os.path.exists(src):
raise CommandExecutionError('No such file or directory \'{0}\''.format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to true!")
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError(
'Could not copy \'{0}\' to \'{1}\''.format(src, dst)
)
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
'''
.. versionadded:: 2014.1.0
Returns the lstat attributes for the given file or dir. Does not support
symbolic links.
CLI Example:
.. code-block:: bash
salt '*' file.lstat /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {}
def access(path, mode):
'''
.. versionadded:: 2014.1.0
Test whether the Salt process has the specified access to the file. One of
the following modes must be specified:
.. code-block::text
f: Test the existence of the path
r: Test the readability of the path
w: Test the writability of the path
x: Test whether the path can be executed
CLI Example:
.. code-block:: bash
salt '*' file.access /path/to/file f
salt '*' file.access /path/to/file x
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
modes = {'f': os.F_OK,
'r': os.R_OK,
'w': os.W_OK,
'x': os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in six.itervalues(modes):
return os.access(path, mode)
else:
raise SaltInvocationError('Invalid mode specified.')
def read(path, binary=False):
'''
.. versionadded:: 2017.7.0
Return the content of the file.
CLI Example:
.. code-block:: bash
salt '*' file.read /path/to/file
'''
access_mode = 'r'
if binary is True:
access_mode += 'b'
with salt.utils.files.fopen(path, access_mode) as file_obj:
return file_obj.read()
def readlink(path, canonicalize=False):
'''
.. versionadded:: 2014.1.0
Return the path that a symlink points to
If canonicalize is set to True, then it return the final target
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
if not os.path.islink(path):
raise SaltInvocationError('A valid link was not specified.')
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
'''
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
'''
Return a dict containing the stats for a given file
CLI Example:
.. code-block:: bash
salt '*' file.stats /etc/passwd
'''
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
# Broken symlinks will return False for os.path.exists(), but still
# have a uid and gid
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
return ret
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret['inode'] = pstat.st_ino
ret['uid'] = pstat.st_uid
ret['gid'] = pstat.st_gid
ret['group'] = gid_to_group(pstat.st_gid)
ret['user'] = uid_to_user(pstat.st_uid)
ret['atime'] = pstat.st_atime
ret['mtime'] = pstat.st_mtime
ret['ctime'] = pstat.st_ctime
ret['size'] = pstat.st_size
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret['sum'] = get_hash(path, hash_type)
ret['type'] = 'file'
if stat.S_ISDIR(pstat.st_mode):
ret['type'] = 'dir'
if stat.S_ISCHR(pstat.st_mode):
ret['type'] = 'char'
if stat.S_ISBLK(pstat.st_mode):
ret['type'] = 'block'
if stat.S_ISREG(pstat.st_mode):
ret['type'] = 'file'
if stat.S_ISLNK(pstat.st_mode):
ret['type'] = 'link'
if stat.S_ISFIFO(pstat.st_mode):
ret['type'] = 'pipe'
if stat.S_ISSOCK(pstat.st_mode):
ret['type'] = 'socket'
ret['target'] = os.path.realpath(path)
return ret
def rmdir(path):
'''
.. versionadded:: 2014.1.0
Remove the specified directory. Fails if a directory is not empty.
CLI Example:
.. code-block:: bash
salt '*' file.rmdir /tmp/foo/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
'''
Remove the named file. If a directory is supplied, it will be recursively
deleted.
CLI Example:
.. code-block:: bash
salt '*' file.remove /tmp/foo
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return False
def directory_exists(path):
'''
Tests to see if path is a valid directory. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.directory_exists /etc
'''
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
'''
Tests to see if path is a valid file. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.file_exists /etc/passwd
'''
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
'''
Tests to see if path after expansion is a valid path (file or directory).
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
.. versionadded:: Hellium
CLI Example:
.. code-block:: bash
salt '*' file.path_exists_glob /etc/pam*/pass*
'''
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
'''
Reset the SELinux context on a given path
CLI Example:
.. code-block:: bash
salt '*' file.restorecon /home/user/.ssh/authorized_keys
'''
if recursive:
cmd = ['restorecon', '-FR', path]
else:
cmd = ['restorecon', '-F', path]
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_selinux_context(path):
'''
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
'''
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
def set_selinux_context(path,
user=None,
role=None,
type=None, # pylint: disable=W0622
range=None): # pylint: disable=W0622
'''
Set a specific SELinux label on a given path
CLI Example:
.. code-block:: bash
salt '*' file.set_selinux_context path <user> <role> <type> <range>
salt '*' file.set_selinux_context /etc/yum.repos.d/epel.repo system_u object_r system_conf_t s0
'''
if not any((user, role, type, range)):
return False
cmd = ['chcon']
if user:
cmd.extend(['-u', user])
if role:
cmd.extend(['-r', role])
if type:
cmd.extend(['-t', type])
if range:
cmd.extend(['-l', range])
cmd.append(path)
ret = not __salt__['cmd.retcode'](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
'''
Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
'''
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
# get the master file list
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
# if it is a local file, check if the file exists
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
# Fix this for Windows
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
# None of the list items matched
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
'''
Return the contents after applying the templating engine
contents
template string
template
template format
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
CLI Example:
.. code-block:: bash
salt '*' file.apply_template_on_contents \\
contents='This is a {{ template }} string.' \\
template=jinja \\
"context={}" "defaults={'template': 'cool'}" \\
saltenv=base
'''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
# Apply templating
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
elif six.PY3 and isinstance(contents, bytes):
# bytes -> str
contents = contents.decode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
'''
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: Oxygen
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
'''
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
'''
DRY helper for getting the source_sum value from a locally cached
path.
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(source)
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
def extract_hash(hash_fn,
hash_type='sha256',
file_name='',
source='',
source_hash_name=None):
'''
.. versionchanged:: 2016.3.5
Prior to this version, only the ``file_name`` argument was considered
for filename matches in the hash file. This would be problematic for
cases in which the user was relying on a remote checksum file that they
do not control, and they wished to use a different name for that file
on the minion from the filename on the remote server (and in the
checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the
remote file was at ``https://mydomain.tld/different_name.tar.gz``. The
:py:func:`file.managed <salt.states.file.managed>` state now also
passes this function the source URI as well as the ``source_hash_name``
(if specified). In cases where ``source_hash_name`` is specified, it
takes precedence over both the ``file_name`` and ``source``. When it is
not specified, ``file_name`` takes precedence over ``source``. This
allows for better capability for matching hashes.
.. versionchanged:: 2016.11.0
File name and source URI matches are no longer disregarded when
``source_hash_name`` is specified. They will be used as fallback
matches if there is no match to the ``source_hash_name`` value.
This routine is called from the :mod:`file.managed
<salt.states.file.managed>` state to pull a hash from a remote file.
Regular expressions are used line by line on the ``source_hash`` file, to
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file layout rules. It specifically permits pulling
hash codes from debian ``*.dsc`` files.
If no exact match of a hash and filename are found, then the first hash
found (if any) will be returned. If no hashes at all are found, then
``None`` will be returned.
For example:
.. code-block:: yaml
openerp_7.0-latest-1.tar.gz:
file.managed:
- name: /tmp/openerp_7.0-20121227-075624-1_all.deb
- source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz
- source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc
CLI Example:
.. code-block:: bash
salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
'''
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
'file.extract_hash: Unsupported hash_type \'%s\', falling '
'back to matching any supported hash_type', hash_type
)
hash_type = ''
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r'\/'
if source_hash_name:
if not isinstance(source_hash_name, six.string_types):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
'file.extract_hash: Extracting %s hash for file matching '
'source_hash_name \'%s\'',
'any supported' if not hash_type else hash_type,
source_hash_name
)
if file_name:
if not isinstance(file_name, six.string_types):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, six.string_types):
source = str(source)
urlparsed_source = _urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
'file.extract_hash: %s %s hash for file matching%s: %s',
'If no source_hash_name match found, will extract'
if source_hash_name
else 'Extracting',
'any supported' if not hash_type else hash_type,
'' if len(basename_searches) == 1 else ' either of the following',
', '.join(basename_searches)
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
for line in fp_:
line = line.strip()
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# There was a match, but it's not of the correct length
# to match one of the supported hash types.
matched = None
else:
matched = {'hsum': matched_hsum,
'hash_type': matched_type}
if matched is None:
log.debug(
'file.extract_hash: In line \'%s\', no %shash found',
line,
'' if not hash_type else hash_type + ' '
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
'file.extract_hash: Line \'%s\' matches %s \'%s\'',
line, match_type, value
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, 'source',
source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r'\s+', line):
_add_to_matches(found, line, 'source', source, matched)
hash_matched = True
if not hash_matched:
log.debug(
'file.extract_hash: Line \'%s\' contains %s hash '
'\'%s\', but line did not meet the search criteria',
line, matched['hash_type'], matched['hsum']
)
for found_type, found_str in (('source_hash_name', source_hash_name),
('file_name', file_name),
('source', source)):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
'file.extract_hash: Multiple %s matches for %s: %s',
found_type,
found_str,
', '.join(
['{0} ({1})'.format(x['hsum'], x['hash_type'])
for x in found[found_type]]
)
)
ret = found[found_type][0]
log.debug(
'file.extract_hash: Returning %s hash \'%s\' as a match of %s',
ret['hash_type'], ret['hsum'], found_str
)
return ret
if partial:
log.debug(
'file.extract_hash: Returning the partially identified %s hash '
'\'%s\'', partial['hash_type'], partial['hsum']
)
return partial
log.debug('file.extract_hash: No matches, returning None')
return None
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
'''
Check the permissions on files, modify attributes and chown if needed. File
attributes are only verified if lsattr(1) is installed.
CLI Example:
.. code-block:: bash
salt '*' file.check_perms /etc/sudoers '{}' root root 400 ai
.. versionchanged:: 2014.1.3
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
'changes': {},
'comment': [],
'result': True}
orig_comment = ''
else:
orig_comment = ret['comment']
ret['comment'] = []
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.files.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# List attributes on file
perms['lattrs'] = ''.join(lsattr(name).get('name', ''))
# Remove attributes on file so changes can be enforced.
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms['lmode']:
if __opts__['test'] is True:
ret['changes']['mode'] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret['result'] = False
ret['comment'].append(
'Failed to change mode to {0}'.format(mode)
)
else:
ret['changes']['mode'] = mode
# user/group changes if needed, then check if it worked
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(perms['luser'])
) or (
not salt.utils.platform.is_windows() and user != perms['luser']
):
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(perms['lgroup'])
) or (
not salt.utils.platform.is_windows() and group != perms['lgroup']
):
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:
if not __opts__['test']:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms['luser']
if group is None:
group = perms['lgroup']
try:
chown_func(name, user, group)
except OSError:
ret['result'] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(
get_user(name, follow_symlinks=follow_symlinks)) and
user != ''
) or (
not salt.utils.platform.is_windows() and
user != get_user(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['user'] = user
else:
ret['result'] = False
ret['comment'].append('Failed to change user to {0}'
.format(user))
elif 'cuser' in perms and user != '':
ret['changes']['user'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(
get_group(name, follow_symlinks=follow_symlinks)) and
user != '') or (
not salt.utils.platform.is_windows() and
group != get_group(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['group'] = group
else:
ret['result'] = False
ret['comment'].append('Failed to change group to {0}'
.format(group))
elif 'cgroup' in perms and user != '':
ret['changes']['group'] = group
if isinstance(orig_comment, six.string_types):
if orig_comment:
ret['comment'].insert(0, orig_comment)
ret['comment'] = '; '.join(ret['comment'])
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
# Replace attributes on file if it had been removed
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs[0] is not None or diff_attrs[1] is not None:
if __opts__['test'] is True:
ret['changes']['attrs'] = attrs
else:
if diff_attrs[0] is not None:
chattr(name, operator="add", attributes=diff_attrs[0])
if diff_attrs[1] is not None:
chattr(name, operator="remove", attributes=diff_attrs[1])
cmp_attrs = _cmp_attrs(name, attrs)
if cmp_attrs[0] is not None or cmp_attrs[1] is not None:
ret['result'] = False
ret['comment'].append(
'Failed to change attributes to {0}'.format(attrs)
)
else:
ret['changes']['attrs'] = attrs
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
**kwargs):
'''
Check to see what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
# Ignore permission for files written temporary directories
# Files in any path will still be set correctly using get_managed()
if name.startswith(tempfile.gettempdir()):
for key in ['user', 'group', 'mode']:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ['The following values are set to be changed:\n']
comments.extend('{0}: {1}\n'.format(key, val)
for key, val in six.iteritems(changes))
return None, ''.join(comments)
return True, 'The file {0} is in the correct state'.format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
**kwargs):
'''
Return a dictionary of what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None):
'''
Check for the changes in the file metadata.
CLI Example:
.. code-block:: bash
salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' base
.. note::
Supported hash types include sha512, sha384, sha256, sha224, sha1, and
md5.
name
Path to file destination
sfn
Template-processed source file contents
source
URL to file source
source_sum
File checksum information as a dictionary
.. code-block:: yaml
{hash_type: md5, hsum: <md5sum>}
user
Destination file user owner
group
Destination file group owner
mode
Destination file permissions mode
attrs
Destination file attributes
.. versionadded:: Oxygen
saltenv
Salt environment used to resolve source files
contents
File contents
'''
lsattr_cmd = salt.utils.path.which('lsattr')
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum['hsum'])
if sfn:
try:
changes['diff'] = get_diff(
sfn, name, template=True, show_filenames=False)
except CommandExecutionError as exc:
changes['diff'] = exc.strerror
else:
changes['sum'] = 'Checksum differs'
if contents is not None:
# Write a tempfile with the static contents
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Compare the static contents with the named file
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error('Failed to diff files: {0}'.format(exc))
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'
else:
changes['diff'] = differences
if not salt.utils.platform.is_windows():
# Check owner
if (user is not None
and user != lstats['user']
and user != lstats['uid']):
changes['user'] = user
# Check group
if (group is not None
and group != lstats['group']
and group != lstats['gid']):
changes['group'] = group
# Normalize the file mode
smode = salt.utils.files.normalize_mode(lstats['mode'])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
if lsattr_cmd:
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and
diff_attrs[0] is not None or
diff_attrs[1] is not None
):
changes['attrs'] = attrs
return changes
def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
'''
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: Oxygen
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
'''
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
u'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for idx, filename in enumerate(files):
try:
with salt.utils.files.fopen(filename, 'r') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_str(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = u'<Obfuscated Template>'
elif not show_changes:
ret = u'<show_changes=False>'
else:
bdiff = _binary_replace(*files)
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(
[salt.utils.stringutils.to_str(x) for x in files]
)
ret = salt.utils.locales.sdecode(
''.join(difflib.unified_diff(*args)) # pylint: disable=no-value-for-parameter
)
return ret
return u''
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_hash
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded: Oxygen
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding : None
If None, str() will be applied to contents.
If not None, specified encoding will be used.
See https://docs.python.org/3/library/codecs.html#standard-encodings
for the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
# Check if file needs to be replaced
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
if ret['changes']:
ret['comment'] = u'File {0} updated'.format(
salt.utils.locales.sdecode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = u'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = str(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1121
makedirs_(name,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
kwargs.get('win_inheritance'))
# pylint: enable=E1121
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path,
user=None,
group=None,
mode=None):
'''
Ensure that a directory is available.
CLI Example:
.. code-block:: bash
salt '*' file.mkdir /opt/jetty/context
'''
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path,
user=None,
group=None,
mode=None):
'''
Ensure that the directory containing this path is available.
.. note::
The path must end with a trailing slash otherwise the directory/directories
will be created up to the parent directory. For example if path is
``/opt/code``, then it would be treated as ``/opt/`` but if the path
ends with a trailing slash like ``/opt/code/``, then it would be
treated as ``/opt/code/``.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs /opt/code/
'''
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = 'Directory \'{0}\' already exists'.format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = 'The path \'{0}\' already exists and is not a directory'.format(
dirname
)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
'Recursive creation for path \'{0}\' would result in an '
'infinite loop. Please use an absolute path.'.format(dirname)
)
# create parent directories from the topmost to the most deeply nested one
directories_to_create.reverse()
for directory_to_create in directories_to_create:
# all directories have the user, group and mode set!!
log.debug('Creating directory: %s', directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name,
user=None,
group=None,
mode='0755'):
'''
Taken and modified from os.makedirs to set user, group and mode for each
directory created.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs_perms /opt/code
'''
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
# be happy if someone already created the path
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
def get_devmm(name):
'''
Get major/minor info from a device
CLI Example:
.. code-block:: bash
salt '*' file.get_devmm /dev/chr
'''
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (
os.major(stat_structure.st_rdev),
os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
'''
Check if a file exists and is a character device.
CLI Example:
.. code-block:: bash
salt '*' file.is_chrdev /dev/chr
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the character device does not exist in the first place
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a character device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_chrdev /dev/chr 180 31
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created character device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_blkdev(name):
'''
Check if a file exists and is a block device.
CLI Example:
.. code-block:: bash
salt '*' file.is_blkdev /dev/blk
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the block device does not exist in the first place
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a block device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_blkdev /dev/blk 8 999
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created block device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_fifo(name):
'''
Check if a file exists and is a FIFO.
CLI Example:
.. code-block:: bash
salt '*' file.is_fifo /dev/fifo
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a FIFO pipe.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_fifo /dev/fifo
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating FIFO name: {0}'.format(name))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = None
else:
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created fifo
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def mknod(name,
ntype,
major=0,
minor=0,
user=None,
group=None,
mode='0600'):
'''
.. versionadded:: 0.17.0
Create a block device, character device, or fifo pipe.
Identical to the gnu mknod.
CLI Examples:
.. code-block:: bash
salt '*' file.mknod /dev/chr c 180 31
salt '*' file.mknod /dev/blk b 8 999
salt '*' file.nknod /dev/fifo p
'''
ret = False
makedirs_(name, user, group)
if ntype == 'c':
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == 'b':
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == 'p':
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype)
)
return ret
def list_backups(path, limit=None):
'''
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.functools.alias_function(list_backups, 'list_backup')
def list_backups_dir(path, limit=None):
'''
Lists the previous versions of a directory backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The directory on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups_dir /foo/bar/baz/
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups_dir: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
# Figure out full path of location of backup folder in minion cache
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])])
ff = os.listdir(bkdir)
for i, n in six.iteritems(f):
ssfile = {}
for x in sorted(ff):
basename = x.split('_')[0]
if i == basename:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
# Folder didn't match the strp format string, so it's not a backup
# for this folder. Move on to the next one.
continue
ssfile.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime('%a %b %d %Y %H:%M:%S.%f')
location = os.path.join(bkdir, x)
ssfile[timestamp]['Size'] = os.stat(location).st_size
ssfile[timestamp]['Location'] = location
sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]])))
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Restore a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to restore, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.restore_backup /foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
# Note: This only supports minion backups, so this function will need to be
# modified if/when master backups are implemented.
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup['Location'], path)
except IOError as exc:
ret['comment'] = \
'Unable to restore {0} to {1}: ' \
'{2}'.format(backup['Location'], path, exc)
return ret
else:
ret['result'] = True
ret['comment'] = 'Successfully restored {0} to ' \
'{1}'.format(backup['Location'], path)
# Try to set proper ownership
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except (OSError, IOError):
ret['comment'] += ', but was unable to set ownership'
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Delete a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to delete, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
try:
os.remove(backup['Location'])
except IOError as exc:
ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'],
exc)
else:
ret['result'] = True
ret['comment'] = 'Successfully removed {0}'.format(backup['Location'])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, 'remove_backup')
def grep(path,
pattern,
*opts):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
'''
Return a list of all physical open files on the system.
CLI Examples:
.. code-block:: bash
salt '*' file.open_files
salt '*' file.open_files by_pid=True
'''
# First we collect valid PIDs
pids = {}
procfs = os.listdir('/proc/')
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
# Not a valid PID, move on
pass
# Then we look at the open files for each PID
files = {}
for pid in pids:
ppath = '/proc/{0}'.format(pid)
try:
tids = os.listdir('{0}/task'.format(ppath))
except OSError:
continue
# Collect the names of all of the file descriptors
fd_ = []
#try:
# fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid)))
#except:
# pass
for fpath in os.listdir('{0}/fd'.format(ppath)):
fd_.append('{0}/fd/{1}'.format(ppath, fpath))
for tid in tids:
try:
fd_.append(
os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid))
)
except OSError:
continue
for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)):
fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
# Loop through file descriptors and return useful data for each file
for fdpath in fd_:
# Sometimes PIDs and TIDs disappear before we can query them
try:
name = os.path.realpath(fdpath)
# Running stat on the file cuts out all of the sockets and
# deleted files from the list
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
# We still want to know which PIDs are using each file
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
'''
Return the relative parent directory path symbol for underlying OS
.. versionadded:: 2014.7.0
This can be useful when constructing Salt Formulas.
.. code-block:: jinja
{% set pardir = salt['file.pardir']() %}
{% set final_path = salt['file.join']('subdir', pardir, 'confdir') %}
CLI Example:
.. code-block:: bash
salt '*' file.pardir
'''
return os.path.pardir
def normpath(path):
'''
Returns Normalize path, eliminating double slashes, etc.
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.normpath 'a/b/c/..'
'''
return os.path.normpath(path)
def basename(path):
'''
Returns the final component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- set filename = salt['file.basename'](source_file) %}
CLI Example:
.. code-block:: bash
salt '*' file.basename 'test/test.config'
'''
return os.path.basename(path)
def dirname(path):
'''
Returns the directory component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.dirname 'test/path/filename.config'
'''
return os.path.dirname(path)
def join(*args):
'''
Return a normalized file system path for the underlying OS
.. versionadded:: 2014.7.0
This can be useful at the CLI but is frequently useful when scripting
combining path variables:
.. code-block:: jinja
{% set www_root = '/var' %}
{% set app_dir = 'myapp' %}
myapp_config:
file:
- managed
- name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }}
CLI Example:
.. code-block:: bash
salt '*' file.join '/' 'usr' 'local' 'bin'
'''
return os.path.join(*args)
def move(src, dst):
'''
Move a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.move /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('Source path must be absolute.')
if not os.path.isabs(dst):
raise SaltInvocationError('Destination path must be absolute.')
ret = {
'result': True,
'comment': "'{0}' moved to '{1}'".format(src, dst),
}
try:
shutil.move(src, dst)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move '{0}' to '{1}': {2}".format(src, dst, exc)
)
return ret
def diskusage(path):
'''
Recursively calculate disk usage of path and return it
in bytes
CLI Example:
.. code-block:: bash
salt '*' file.diskusage /path/to/check
'''
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| 31.932225 | 178 | 0.555337 |
from __future__ import absolute_import, print_function
import datetime
import difflib
import errno
import fileinput
import fnmatch
import itertools
import logging
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import glob
import hashlib
import mmap
from collections import Iterable, Mapping
from functools import reduce
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
try:
import grp
import pwd
except ImportError:
pass
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
log = logging.getLogger(__name__)
__func_alias__ = {
'makedirs_': 'makedirs'
}
def __virtual__():
if salt.utils.platform.is_windows():
return (
False,
'The file execution module cannot be loaded: only available on '
'non-Windows systems - use win_file instead.'
)
return True
def __clean_tmp(sfn):
if sfn.startswith(os.path.join(tempfile.gettempdir(),
salt.utils.files.TEMPFILE_PREFIX)):
all_roots = itertools.chain.from_iterable(
six.itervalues(__opts__['file_roots']))
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _binary_replace(old, new):
old_isbin = not __utils__['files.is_text'](old)
new_isbin = not __utils__['files.is_text'](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return u'Replace binary file'
elif old_isbin:
return u'Replace binary file with text file'
elif new_isbin:
return u'Replace text file with binary file'
return u''
def _get_bkroot():
# Get the cachedir from the minion config
return os.path.join(__salt__['config.get']('cachedir'), 'file_backup')
def _splitlines_preserving_trailing_newline(str):
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
return lines
def gid_to_group(gid):
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ''
def get_gid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
def get_group(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
def uid_to_user(uid):
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ''
def get_uid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
def get_user(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False)
def get_mode(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
def set_mode(path, mode):
path = os.path.expanduser(path)
mode = str(mode).lstrip('0Oo')
if not mode:
mode = '0'
if not os.path.exists(path):
raise CommandExecutionError('{0}: File not found'.format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return 'Invalid Mode ' + mode
return get_mode(path)
def lchown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
diff = [None, None]
lattrs = lsattr(path).get(path, '')
old = [chr for chr in lattrs if chr not in attrs]
if len(old) > 0:
diff[1] = ''.join(old)
new = [chr for chr in attrs if chr not in lattrs]
if len(new) > 0:
diff[0] = ''.join(new)
return diff
def lsattr(path):
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist.")
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr'):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0])
return results
def chattr(*args, **kwargs):
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
if flags is not None:
flgs = '-{0}'.format(flags)
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
raise CommandExecutionError(
"chattr failed to run, possibly due to bad parameters.")
return True
def get_sum(path, form='sha256'):
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form='sha256', chunk_size=65536):
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(file_name='',
source='',
source_hash=None,
source_hash_name=None,
saltenv='base'):
def _invalid_source_hash_format():
raise CommandExecutionError(
'Source hash {0} format is invalid. The supported formats are: '
'1) a hash, 2) an expression in the format <hash_type>=<hash>, or '
'3) either a path to a local file containing hashes, or a URI of '
'a remote hash file. Supported protocols for remote hash files '
'are: {1}. The hash may also not be of a valid length, the '
'following are supported hash types and lengths: {2}.'.format(
source_hash,
', '.join(salt.utils.files.VALID_PROTOS),
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
return ret
else:
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret['hash_type'] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret['hash_type'] not in HASHES:
raise CommandExecutionError(
'Invalid hash type \'{0}\'. Supported hash types are: {1}. '
'Either remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to a supported type.'
.format(ret['hash_type'], ', '.join(HASHES), ret['hsum'])
)
else:
hsum_len = len(ret['hsum'])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret['hash_type']]:
raise CommandExecutionError(
'Invalid length ({0}) for hash type \'{1}\'. Either '
'remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to \'{3}\''.format(
hsum_len,
ret['hash_type'],
ret['hsum'],
HASHES_REVMAP[hsum_len],
)
)
return ret
def check_hash(path, file_hash):
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def sed_contains(path,
text,
limit='',
flags='g'):
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result)
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
#after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(_psed(line, before, after, limit, flags))
else:
ofile.write(_psed(ifile.read(), before, after, limit, flags))
RE_FLAG_TABLE = {'I': re.I,
'L': re.L,
'M': re.M,
'S': re.S,
'U': re.U,
'X': re.X}
def _psed(text,
before,
after,
limit,
flags):
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path,
regex,
char='
backup='.bak'):
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
def comment(path,
regex,
char='
backup='.bak'):
return comment_line(path=path,
regex=regex,
char=char,
cmnt=True,
backup=backup)
def comment_line(path,
regex,
char='
cmnt=True,
backup='.bak'):
# Get the regex for comment or uncomment
if cmnt:
regex = '{0}({1}){2}'.format(
'^' if regex.startswith('^') else '',
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
else:
regex = r'^{0}\s*({1}){2}'.format(
char,
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError('File not found: {0}'.format(path))
# Make sure it is a text file
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'.format(path))
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
if six.PY3:
line = line.decode(__salt_system_encoding__)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append('{0}{1}'.format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='wb',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
if six.PY3:
line = line.decode(__salt_system_encoding__)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = '{0}{1}'.format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
if six.PY3:
wline = wline.encode(__salt_system_encoding__)
w_file.write(wline)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return ''.join(difflib.unified_diff(orig_file, new_file))
def _get_flags(flags):
if isinstance(flags, six.string_types):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, six.integer_types):
raise SaltInvocationError(
'Invalid re flag given: {0}'.format(flag)
)
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, six.integer_types):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{0}", must be given either as a single flag '
'string, a list of strings, or as an integer'.format(flags)
)
def _add_flags(flags, new_flags):
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path,
preserve_inode=True):
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
def _starts_till(src, probe, strip_comments=True):
def _strip_comments(txt):
buff = txt.split(" ", 1)
return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt
def _to_words(txt):
return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt
no_match = -1
equal = 0
if not src or not probe:
return no_match
if src == probe:
return equal
src = _to_words(strip_comments and _strip_comments(src) or src)
probe = _to_words(strip_comments and _strip_comments(probe) or probe)
a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src)
b_buff = ' '.join(b_buff)
for idx in range(len(a_buff)):
prb = ' '.join(a_buff[:-(idx + 1)])
if prb and b_buff.startswith(prb):
return idx
return no_match
def _regex_to_static(src, regex):
if not src or not regex:
return None
try:
src = re.search(regex, src, re.M)
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group() or regex
def _assert_occurrence(src, probe, target, amount=1):
occ = src.count(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
msg = 'less than'
elif not occ:
msg = 'no'
else:
msg = None
if msg:
raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target))
return occ
def _get_line_indent(src, line, indent):
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.strip()
def line(path, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path))
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ['insert', 'ensure', 'delete', 'replace']:
if mode is None:
raise CommandExecutionError('Mode was not defined. How to process the file?')
else:
raise CommandExecutionError('Unknown mode: "{0}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
mpty_content_modes = ['delete']
if mode not in empty_content_modes and content is None:
raise CommandExecutionError('Content can only be empty if mode is "{0}"'.format(', '.join(empty_content_modes)))
del empty_content_modes
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = fp_.read()
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
in_range = False
for line in lines:
if line.find(after) > -1:
in_range = True
elif line.find(before) > -1 and in_range:
out.append(_get_line_indent(line, content, indent))
out.append(line)
body = os.linesep.join(out)
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0):
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx, _line in enumerate(lines):
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
if (_line.find(after) > -1 and
(lines[((idx + 1) < len(lines)) and idx + 1 or idx].strip() != cnd or
idx + 1 == len(lines))):
out.append(cnd)
body = os.linesep.join(out)
else:
if location == 'start':
body = os.linesep.join((content, body))
elif location == 'end':
body = os.linesep.join((body, _get_line_indent(body[-1], content, indent) if body else content))
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
is_there = bool(body.count(content))
if not is_there:
out = []
body = body.split(os.linesep)
for idx, line in enumerate(body):
out.append(line)
if line.find(content) > -1:
is_there = True
if not is_there:
if idx < (len(body) - 1) and line.find(after) > -1 and body[idx + 1].find(before) > -1:
out.append(content)
elif line.find(after) > -1:
raise CommandExecutionError('Found more than one line between '
'boundaries "before" and "after".')
body = os.linesep.join(out)
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[idx], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc))
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_.write(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
has_changes = False
orig_file = []
new_file = []
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else salt.utils.stringutils.to_str(repl)
try:
r_data = None
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
if re.search(cpattern, r_data):
return True
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
if nrepl > 0:
found = True
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
try:
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
else:
if 0 != len(new_file):
if not new_file[-1].endswith(salt.utils.stringutils.to_bytes(os.linesep)):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(not_found_content + salt.utils.stringutils.to_bytes(os.linesep))
has_changes = True
if not dry_run:
try:
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
def get_changes():
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
if show_changes:
return get_changes()
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not get_changes():
has_changes = False
return has_changes
def blockreplace(path,
marker_start='
marker_end='
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
if not __utils__['files.is_text'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='rb')
for line in fi_file:
line = salt.utils.stringutils.to_str(line)
result = line
if marker_start in line:
# managed block start found, start recording
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
in_block = False
# Handle situations where there may be multiple types
# of line endings in the same file. Separate the content
# into lines. Account for Windows-style line endings
# using os.linesep, then by linux-style line endings
# using '\n'
split_content = []
for linesep_line in content.split(os.linesep):
for content_line in linesep_line.split('\n'):
split_content.append(content_line)
# Trim any trailing new lines to avoid unwanted
# additional new lines
while not split_content[-1]:
split_content.pop()
# push new block content in file
for content_line in split_content:
new_file.append(content_line + os.linesep)
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
finally:
fi_file.close()
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + os.linesep)
if append_newline is True:
new_file.insert(0, content + os.linesep)
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + os.linesep)
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(os.linesep):
new_file[-1] += os.linesep
# add the markers and content at the end of file
new_file.append(marker_start + os.linesep)
if append_newline is True:
new_file.append(content + os.linesep)
else:
new_file.append(content)
new_file.append(marker_end + os.linesep)
done = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'wb')
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
def search(path,
pattern,
flags=8,
bufsize=1,
ignore_if_missing=False,
multiline=False
):
if multiline:
flags = _add_flags(flags, 'MULTILINE')
bufsize = 'file'
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
return replace(path,
pattern,
'',
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing)
def patch(originalfile, patchfile, options='', dry_run=False):
patchpath = salt.utils.path.which('patch')
if not patchpath:
raise CommandExecutionError(
'patch executable not found. Is the distribution\'s patch '
'package installed?'
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
cmd.append('-C')
else:
cmd.append('--dry-run')
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if '-N' not in cmd and '--forward' not in cmd:
cmd.append('--forward')
has_rejectfile_option = False
for option in cmd:
if option == '-r' or option.startswith('-r ') \
or option.startswith('--reject-file'):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append('--reject-file=-')
cmd.extend(['-i', patchfile])
if os.path.isdir(originalfile):
cmd.extend(['-d', originalfile])
has_strip_option = False
for option in cmd:
if option.startswith('-p') or option.startswith('--strip='):
has_strip_option = True
break
if not has_strip_option:
cmd.append('--strip=0')
else:
cmd.append(originalfile)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def contains(path, text):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except (IOError, OSError):
return False
def contains_regex(path, regex, lchar=''):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, 'r') as target:
for line in target:
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except (IOError, OSError):
return False
def contains_glob(path, glob_expr):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except (IOError, OSError):
return False
def append(path, *args, **kwargs):
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
with salt.utils.files.fopen(path, 'rb+') as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except IOError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
with salt.utils.files.fopen(path, 'a') as ofile:
for new_line in args:
ofile.write('{0}{1}'.format(new_line, os.linesep))
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
try:
with salt.utils.files.fopen(path) as fhr:
contents = fhr.readlines()
except IOError:
contents = []
preface = []
for line in args:
preface.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(''.join(contents))
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
def write(path, *args, **kwargs):
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
contents = []
for line in args:
contents.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(''.join(contents))
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, 'a') as fhw:
fhw.write('')
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError('atime and mtime must be integers')
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, 'rb+') as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.link(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def is_link(path):
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug('link already in correct state: %s -> %s', path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
os.symlink(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def rename(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError(
'Could not rename \'{0}\' to \'{1}\''.format(src, dst)
)
return False
def copy(src, dst, recurse=False, remove_existing=False):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
if not os.path.exists(src):
raise CommandExecutionError('No such file or directory \'{0}\''.format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to true!")
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError(
'Could not copy \'{0}\' to \'{1}\''.format(src, dst)
)
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {}
def access(path, mode):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
modes = {'f': os.F_OK,
'r': os.R_OK,
'w': os.W_OK,
'x': os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in six.itervalues(modes):
return os.access(path, mode)
else:
raise SaltInvocationError('Invalid mode specified.')
def read(path, binary=False):
access_mode = 'r'
if binary is True:
access_mode += 'b'
with salt.utils.files.fopen(path, access_mode) as file_obj:
return file_obj.read()
def readlink(path, canonicalize=False):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
if not os.path.islink(path):
raise SaltInvocationError('A valid link was not specified.')
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
pstat = os.lstat(path)
except OSError:
return ret
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret['inode'] = pstat.st_ino
ret['uid'] = pstat.st_uid
ret['gid'] = pstat.st_gid
ret['group'] = gid_to_group(pstat.st_gid)
ret['user'] = uid_to_user(pstat.st_uid)
ret['atime'] = pstat.st_atime
ret['mtime'] = pstat.st_mtime
ret['ctime'] = pstat.st_ctime
ret['size'] = pstat.st_size
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret['sum'] = get_hash(path, hash_type)
ret['type'] = 'file'
if stat.S_ISDIR(pstat.st_mode):
ret['type'] = 'dir'
if stat.S_ISCHR(pstat.st_mode):
ret['type'] = 'char'
if stat.S_ISBLK(pstat.st_mode):
ret['type'] = 'block'
if stat.S_ISREG(pstat.st_mode):
ret['type'] = 'file'
if stat.S_ISLNK(pstat.st_mode):
ret['type'] = 'link'
if stat.S_ISFIFO(pstat.st_mode):
ret['type'] = 'pipe'
if stat.S_ISSOCK(pstat.st_mode):
ret['type'] = 'socket'
ret['target'] = os.path.realpath(path)
return ret
def rmdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return False
def directory_exists(path):
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
if recursive:
cmd = ['restorecon', '-FR', path]
else:
cmd = ['restorecon', '-F', path]
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_selinux_context(path):
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
def set_selinux_context(path,
user=None,
role=None,
type=None,
range=None):
if not any((user, role, type, range)):
return False
cmd = ['chcon']
if user:
cmd.extend(['-u', user])
if role:
cmd.extend(['-r', role])
if type:
cmd.extend(['-t', type])
if range:
cmd.extend(['-l', range])
cmd.append(path)
ret = not __salt__['cmd.retcode'](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = _urlparse('file://' + single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and (
os.path.exists(urlparsed_single_src.netloc) or
os.path.exists(urlparsed_single_src.path) or
os.path.exists(os.path.join(
urlparsed_single_src.netloc,
urlparsed_single_src.path))):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = _urlparse('file://' + single)
proto = urlparsed_src.scheme
if proto == 'file' and (
os.path.exists(urlparsed_src.netloc) or
os.path.exists(urlparsed_src.path) or
os.path.exists(os.path.join(
urlparsed_src.netloc,
urlparsed_src.path))):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
elif six.PY3 and isinstance(contents, bytes):
contents = contents.decode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
if source:
urlparsed_source = _urlparse(source)
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(source)
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
if not sfn or not os.path.exists(sfn):
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
def extract_hash(hash_fn,
hash_type='sha256',
file_name='',
source='',
source_hash_name=None):
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
'file.extract_hash: Unsupported hash_type \'%s\', falling '
'back to matching any supported hash_type', hash_type
)
hash_type = ''
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r'\/'
if source_hash_name:
if not isinstance(source_hash_name, six.string_types):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
'file.extract_hash: Extracting %s hash for file matching '
'source_hash_name \'%s\'',
'any supported' if not hash_type else hash_type,
source_hash_name
)
if file_name:
if not isinstance(file_name, six.string_types):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, six.string_types):
source = str(source)
urlparsed_source = _urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
'file.extract_hash: %s %s hash for file matching%s: %s',
'If no source_hash_name match found, will extract'
if source_hash_name
else 'Extracting',
'any supported' if not hash_type else hash_type,
'' if len(basename_searches) == 1 else ' either of the following',
', '.join(basename_searches)
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
for line in fp_:
line = line.strip()
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# to match one of the supported hash types.
matched = None
else:
matched = {'hsum': matched_hsum,
'hash_type': matched_type}
if matched is None:
log.debug(
'file.extract_hash: In line \'%s\', no %shash found',
line,
'' if not hash_type else hash_type + ' '
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
'file.extract_hash: Line \'%s\' matches %s \'%s\'',
line, match_type, value
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, 'source',
source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r'\s+', line):
_add_to_matches(found, line, 'source', source, matched)
hash_matched = True
if not hash_matched:
log.debug(
'file.extract_hash: Line \'%s\' contains %s hash '
'\'%s\', but line did not meet the search criteria',
line, matched['hash_type'], matched['hsum']
)
for found_type, found_str in (('source_hash_name', source_hash_name),
('file_name', file_name),
('source', source)):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
'file.extract_hash: Multiple %s matches for %s: %s',
found_type,
found_str,
', '.join(
['{0} ({1})'.format(x['hsum'], x['hash_type'])
for x in found[found_type]]
)
)
ret = found[found_type][0]
log.debug(
'file.extract_hash: Returning %s hash \'%s\' as a match of %s',
ret['hash_type'], ret['hsum'], found_str
)
return ret
if partial:
log.debug(
'file.extract_hash: Returning the partially identified %s hash '
'\'%s\'', partial['hash_type'], partial['hsum']
)
return partial
log.debug('file.extract_hash: No matches, returning None')
return None
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
name = os.path.expanduser(name)
lsattr_cmd = salt.utils.path.which('lsattr')
if not ret:
ret = {'name': name,
'changes': {},
'comment': [],
'result': True}
orig_comment = ''
else:
orig_comment = ret['comment']
ret['comment'] = []
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.files.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
perms['lattrs'] = ''.join(lsattr(name).get('name', ''))
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])
if mode is not None:
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms['lmode']:
if __opts__['test'] is True:
ret['changes']['mode'] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret['result'] = False
ret['comment'].append(
'Failed to change mode to {0}'.format(mode)
)
else:
ret['changes']['mode'] = mode
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(perms['luser'])
) or (
not salt.utils.platform.is_windows() and user != perms['luser']
):
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(perms['lgroup'])
) or (
not salt.utils.platform.is_windows() and group != perms['lgroup']
):
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:
if not __opts__['test']:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms['luser']
if group is None:
group = perms['lgroup']
try:
chown_func(name, user, group)
except OSError:
ret['result'] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(
get_user(name, follow_symlinks=follow_symlinks)) and
user != ''
) or (
not salt.utils.platform.is_windows() and
user != get_user(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['user'] = user
else:
ret['result'] = False
ret['comment'].append('Failed to change user to {0}'
.format(user))
elif 'cuser' in perms and user != '':
ret['changes']['user'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(
get_group(name, follow_symlinks=follow_symlinks)) and
user != '') or (
not salt.utils.platform.is_windows() and
group != get_group(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['group'] = group
else:
ret['result'] = False
ret['comment'].append('Failed to change group to {0}'
.format(group))
elif 'cgroup' in perms and user != '':
ret['changes']['group'] = group
if isinstance(orig_comment, six.string_types):
if orig_comment:
ret['comment'].insert(0, orig_comment)
ret['comment'] = '; '.join(ret['comment'])
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir and lsattr_cmd:
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])
if attrs is not None and not is_dir:
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs[0] is not None or diff_attrs[1] is not None:
if __opts__['test'] is True:
ret['changes']['attrs'] = attrs
else:
if diff_attrs[0] is not None:
chattr(name, operator="add", attributes=diff_attrs[0])
if diff_attrs[1] is not None:
chattr(name, operator="remove", attributes=diff_attrs[1])
cmp_attrs = _cmp_attrs(name, attrs)
if cmp_attrs[0] is not None or cmp_attrs[1] is not None:
ret['result'] = False
ret['comment'].append(
'Failed to change attributes to {0}'.format(attrs)
)
else:
ret['changes']['attrs'] = attrs
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
**kwargs):
source, source_hash = source_list(source,
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
if name.startswith(tempfile.gettempdir()):
for key in ['user', 'group', 'mode']:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ['The following values are set to be changed:\n']
comments.extend('{0}: {1}\n'.format(key, val)
for key, val in six.iteritems(changes))
return None, ''.join(comments)
return True, 'The file {0} is in the correct state'.format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
**kwargs):
source, source_hash = source_list(source,
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None):
lsattr_cmd = salt.utils.path.which('lsattr')
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum['hsum'])
if sfn:
try:
changes['diff'] = get_diff(
sfn, name, template=True, show_filenames=False)
except CommandExecutionError as exc:
changes['diff'] = exc.strerror
else:
changes['sum'] = 'Checksum differs'
if contents is not None:
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error('Failed to diff files: {0}'.format(exc))
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'
else:
changes['diff'] = differences
if not salt.utils.platform.is_windows():
if (user is not None
and user != lstats['user']
and user != lstats['uid']):
changes['user'] = user
if (group is not None
and group != lstats['group']
and group != lstats['gid']):
changes['group'] = group
smode = salt.utils.files.normalize_mode(lstats['mode'])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
if lsattr_cmd:
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and
diff_attrs[0] is not None or
diff_attrs[1] is not None
):
changes['attrs'] = attrs
return changes
def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
u'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for idx, filename in enumerate(files):
try:
with salt.utils.files.fopen(filename, 'r') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_str(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = u'<Obfuscated Template>'
elif not show_changes:
ret = u'<show_changes=False>'
else:
bdiff = _binary_replace(*files)
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(
[salt.utils.stringutils.to_str(x) for x in files]
)
ret = salt.utils.locales.sdecode(
''.join(difflib.unified_diff(*args))
)
return ret
return u''
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None,
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
**kwargs):
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
if ret['changes']:
ret['comment'] = u'File {0} updated'.format(
salt.utils.locales.sdecode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = u'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else:
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = str(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1121
makedirs_(name,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
kwargs.get('win_inheritance'))
# pylint: enable=E1121
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path,
user=None,
group=None,
mode=None):
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path,
user=None,
group=None,
mode=None):
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = 'Directory \'{0}\' already exists'.format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = 'The path \'{0}\' already exists and is not a directory'.format(
dirname
)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
'Recursive creation for path \'{0}\' would result in an '
'infinite loop. Please use an absolute path.'.format(dirname)
)
directories_to_create.reverse()
for directory_to_create in directories_to_create:
log.debug('Creating directory: %s', directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name,
user=None,
group=None,
mode='0755'):
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir:
return
os.mkdir(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
def get_devmm(name):
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (
os.major(stat_structure.st_rdev),
os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_blkdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_fifo(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name,
user=None,
group=None,
mode='0660'):
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating FIFO name: {0}'.format(name))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = None
else:
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def mknod(name,
ntype,
major=0,
minor=0,
user=None,
group=None,
mode='0600'):
ret = False
makedirs_(name, user, group)
if ntype == 'c':
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == 'b':
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == 'p':
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype)
)
return ret
def list_backups(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.functools.alias_function(list_backups, 'list_backup')
def list_backups_dir(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups_dir: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])])
ff = os.listdir(bkdir)
for i, n in six.iteritems(f):
ssfile = {}
for x in sorted(ff):
basename = x.split('_')[0]
if i == basename:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
continue
ssfile.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime('%a %b %d %Y %H:%M:%S.%f')
location = os.path.join(bkdir, x)
ssfile[timestamp]['Size'] = os.stat(location).st_size
ssfile[timestamp]['Location'] = location
sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]])))
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup['Location'], path)
except IOError as exc:
ret['comment'] = \
'Unable to restore {0} to {1}: ' \
'{2}'.format(backup['Location'], path, exc)
return ret
else:
ret['result'] = True
ret['comment'] = 'Successfully restored {0} to ' \
'{1}'.format(backup['Location'], path)
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except (OSError, IOError):
ret['comment'] += ', but was unable to set ownership'
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
try:
os.remove(backup['Location'])
except IOError as exc:
ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'],
exc)
else:
ret['result'] = True
ret['comment'] = 'Successfully removed {0}'.format(backup['Location'])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, 'remove_backup')
def grep(path,
pattern,
*opts):
path = os.path.expanduser(path)
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
pids = {}
procfs = os.listdir('/proc/')
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
pass
files = {}
for pid in pids:
ppath = '/proc/{0}'.format(pid)
try:
tids = os.listdir('{0}/task'.format(ppath))
except OSError:
continue
fd_ = []
for fpath in os.listdir('{0}/fd'.format(ppath)):
fd_.append('{0}/fd/{1}'.format(ppath, fpath))
for tid in tids:
try:
fd_.append(
os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid))
)
except OSError:
continue
for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)):
fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
for fdpath in fd_:
try:
name = os.path.realpath(fdpath)
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
return os.path.pardir
def normpath(path):
return os.path.normpath(path)
def basename(path):
return os.path.basename(path)
def dirname(path):
return os.path.dirname(path)
def join(*args):
return os.path.join(*args)
def move(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('Source path must be absolute.')
if not os.path.isabs(dst):
raise SaltInvocationError('Destination path must be absolute.')
ret = {
'result': True,
'comment': "'{0}' moved to '{1}'".format(src, dst),
}
try:
shutil.move(src, dst)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move '{0}' to '{1}': {2}".format(src, dst, exc)
)
return ret
def diskusage(path):
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| true | true |
f7145d3436320d6def2071605cdc3fc5a509c911 | 2,682 | py | Python | catalog/bindings/gmd/cubic_spline_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/cubic_spline_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/cubic_spline_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_curve_segment_type import AbstractCurveSegmentType
from bindings.gmd.coordinates import Coordinates
from bindings.gmd.curve_interpolation_type import CurveInterpolationType
from bindings.gmd.point_property import PointProperty
from bindings.gmd.point_rep import PointRep
from bindings.gmd.pos import Pos
from bindings.gmd.pos_list import PosList
from bindings.gmd.vector_type import VectorType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CubicSplineType(AbstractCurveSegmentType):
pos: List[Pos] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_property: List[PointProperty] = field(
default_factory=list,
metadata={
"name": "pointProperty",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_rep: List[PointRep] = field(
default_factory=list,
metadata={
"name": "pointRep",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
pos_list: Optional[PosList] = field(
default=None,
metadata={
"name": "posList",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinates: Optional[Coordinates] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vector_at_start: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtStart",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
vector_at_end: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtEnd",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
interpolation: CurveInterpolationType = field(
init=False,
default=CurveInterpolationType.CUBIC_SPLINE,
metadata={
"type": "Attribute",
},
)
degree: int = field(
init=False,
default=3,
metadata={
"type": "Attribute",
},
)
| 28.83871 | 77 | 0.561894 | from dataclasses import dataclass, field
from typing import List, Optional
from bindings.gmd.abstract_curve_segment_type import AbstractCurveSegmentType
from bindings.gmd.coordinates import Coordinates
from bindings.gmd.curve_interpolation_type import CurveInterpolationType
from bindings.gmd.point_property import PointProperty
from bindings.gmd.point_rep import PointRep
from bindings.gmd.pos import Pos
from bindings.gmd.pos_list import PosList
from bindings.gmd.vector_type import VectorType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class CubicSplineType(AbstractCurveSegmentType):
pos: List[Pos] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_property: List[PointProperty] = field(
default_factory=list,
metadata={
"name": "pointProperty",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
point_rep: List[PointRep] = field(
default_factory=list,
metadata={
"name": "pointRep",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"min_occurs": 2,
"sequential": True,
},
)
pos_list: Optional[PosList] = field(
default=None,
metadata={
"name": "posList",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
coordinates: Optional[Coordinates] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.opengis.net/gml",
},
)
vector_at_start: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtStart",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
vector_at_end: Optional[VectorType] = field(
default=None,
metadata={
"name": "vectorAtEnd",
"type": "Element",
"namespace": "http://www.opengis.net/gml",
"required": True,
},
)
interpolation: CurveInterpolationType = field(
init=False,
default=CurveInterpolationType.CUBIC_SPLINE,
metadata={
"type": "Attribute",
},
)
degree: int = field(
init=False,
default=3,
metadata={
"type": "Attribute",
},
)
| true | true |
f7145dbe062462ea587231c7a6d56ded0ad5f8e1 | 323 | py | Python | examples/02_Example_WaterwaySearch/TerminalColors.py | jaywilhelm/OpenUxAS | 76b08d94c4c51ca51d9f79c9db03d7344e9d6552 | [
"NASA-1.3"
] | 13 | 2019-09-19T01:07:23.000Z | 2022-01-06T17:25:48.000Z | src/TerminalColors.py | JTEnglish/UAVHeading-CollisionAvoidance | 97e732616b6243184d64455e143ffe798840273a | [
"MIT"
] | 3 | 2019-06-10T06:10:52.000Z | 2020-07-21T16:10:41.000Z | src/TerminalColors.py | JTEnglish/UAVHeading-CollisionAvoidance | 97e732616b6243184d64455e143ffe798840273a | [
"MIT"
] | 3 | 2020-02-12T06:13:36.000Z | 2021-02-14T03:00:34.000Z | '''
Class: TerminalColors
Credit: https://stackoverflow.com/questions/287871/print-in-terminal-with-colors
'''
class TerminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | 23.071429 | 81 | 0.609907 |
class TerminalColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m' | true | true |
f7145e9a3b17f8a481672804c82a177d305100f7 | 3,194 | py | Python | pictures/tests.py | David5627/My_Gallary | cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3 | [
"MIT"
] | null | null | null | pictures/tests.py | David5627/My_Gallary | cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3 | [
"MIT"
] | null | null | null | pictures/tests.py | David5627/My_Gallary | cfbdcb13586f3d132993f9ceb1aa84c2f0ca61b3 | [
"MIT"
] | null | null | null | from django.test import TestCase
# Create your tests here.
from .models import Image, Category, Location
class TestImage(TestCase):
def setUp(self):
self.location = Location(locationName='Kiambu')
self.location.saveLocation()
self.category = Category(categoryName='job')
self.category.saveCategory()
self.testInstance = Image(id=1, imageName='IMG.jpg', imageDescription=' a test image', imageLocation=self.location,
imageCategory=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.testInstance, Image))
def test_save_image(self):
self.testInstance.saveImage()
filterImage= Image.objects.all()
self.assertTrue(len(filterImage) > 0)
def test_delete_image(self):
self.testInstance.deleteImage()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.testInstance.saveImage()
self.testInstance.updateImage(self.testInstance.id, 'images/img.jpg')
imgUpdt = Image.objects.filter(image='images/test.jpg')
self.assertTrue(len(imgUpdt) > 0)
def test_get_image_by_id(self):
imageF = self.testInstance.getimageById(self.testInstance.id)
image = Image.objects.filter(id=self.testInstance.id)
self.assertTrue(imageF, image)
def test_search_image_by_location(self):
self.testInstance.saveImage()
foundImages = self.testInstance.filterimageByLocation(imageLocation='Kiambu')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
foundImages = self.testInstance.searchImage(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='kiambu')
self.location.saveLocation()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 1)
def test_delete_location(self):
self.location.deleteLocation()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='job')
self.category.saveCategory()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.saveCategory()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.deleteCategory()
category = Category.objects.all()
self.assertTrue(len(category) == 0) | 32.591837 | 123 | 0.66938 | from django.test import TestCase
from .models import Image, Category, Location
class TestImage(TestCase):
def setUp(self):
self.location = Location(locationName='Kiambu')
self.location.saveLocation()
self.category = Category(categoryName='job')
self.category.saveCategory()
self.testInstance = Image(id=1, imageName='IMG.jpg', imageDescription=' a test image', imageLocation=self.location,
imageCategory=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.testInstance, Image))
def test_save_image(self):
self.testInstance.saveImage()
filterImage= Image.objects.all()
self.assertTrue(len(filterImage) > 0)
def test_delete_image(self):
self.testInstance.deleteImage()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.testInstance.saveImage()
self.testInstance.updateImage(self.testInstance.id, 'images/img.jpg')
imgUpdt = Image.objects.filter(image='images/test.jpg')
self.assertTrue(len(imgUpdt) > 0)
def test_get_image_by_id(self):
imageF = self.testInstance.getimageById(self.testInstance.id)
image = Image.objects.filter(id=self.testInstance.id)
self.assertTrue(imageF, image)
def test_search_image_by_location(self):
self.testInstance.saveImage()
foundImages = self.testInstance.filterimageByLocation(imageLocation='Kiambu')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
foundImages = self.testInstance.searchImage(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='kiambu')
self.location.saveLocation()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.saveLocation()
locations = Location.getLocations()
self.assertTrue(len(locations) > 1)
def test_delete_location(self):
self.location.deleteLocation()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='job')
self.category.saveCategory()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.saveCategory()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.deleteCategory()
category = Category.objects.all()
self.assertTrue(len(category) == 0) | true | true |
f7145f89446bea1ed70f31be8e13fd069d3d268f | 16,419 | py | Python | venv/lib/python2.7/site-packages/sklearn/base.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/base.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | null | null | null | venv/lib/python2.7/site-packages/sklearn/base.py | bopopescu/fbserver | e812dbc4dc0cbf2fda19473015a3d7e253718a19 | [
"Apache-2.0"
] | 1 | 2020-07-23T19:26:19.000Z | 2020-07-23T19:26:19.000Z | """Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
###############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
equality_test = new_object_params[name] == params_set[name]
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0, lower values are worse.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples,)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
from .cluster.bicluster.utils import get_indices
return get_indices(self.rows_[i], self.columns_[i])
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
from .cluster.bicluster.utils import get_shape
return get_shape(self.rows_[i], self.columns_[i])
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .cluster.bicluster.utils import get_submatrix
return get_submatrix(self.rows_[i], self.columns_[i], data)
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
"""Returns the final estimator if there is any."""
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| 36.006579 | 79 | 0.554053 |
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
items=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
@classmethod
def _get_param_names(cls):
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
out = dict()
for key in self._get_param_names():
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if not name in valid_params:
raise ValueError('Invalid parameter %s for estimator %s' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if not key in valid_params:
raise ValueError('Invalid parameter %s ' 'for estimator %s'
% (key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
def score(self, X, y, sample_weight=None):
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
def score(self, X, y, sample_weight=None):
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class ClusterMixin(object):
def fit_predict(self, X, y=None):
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
@property
def biclusters_(self):
return self.rows_, self.columns_
def get_indices(self, i):
from .cluster.bicluster.utils import get_indices
return get_indices(self.rows_[i], self.columns_[i])
def get_shape(self, i):
from .cluster.bicluster.utils import get_shape
return get_shape(self.rows_[i], self.columns_[i])
def get_submatrix(self, i, data):
from .cluster.bicluster.utils import get_submatrix
return get_submatrix(self.rows_[i], self.columns_[i], data)
###############################################################################
class TransformerMixin(object):
def fit_transform(self, X, y=None, **fit_params):
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
# this is just a tag for the moment
###############################################################################
# XXX: Temporary solution to figure out if an estimator is a classifier
def _get_sub_estimator(estimator):
if hasattr(estimator, 'estimator'):
# GridSearchCV and other CV-tuned estimators
return _get_sub_estimator(estimator.estimator)
if hasattr(estimator, 'steps'):
# Pipeline
return _get_sub_estimator(estimator.steps[-1][1])
return estimator
def is_classifier(estimator):
estimator = _get_sub_estimator(estimator)
return isinstance(estimator, ClassifierMixin)
| true | true |
f71461118a36638bf9f86bc877bea372a4e45f9a | 689 | py | Python | app.py | JoeDReynolds/HW_13 | 8fc15c37554069ff51e1d29685384e6e521a4b2a | [
"ADSL"
] | null | null | null | app.py | JoeDReynolds/HW_13 | 8fc15c37554069ff51e1d29685384e6e521a4b2a | [
"ADSL"
] | null | null | null | app.py | JoeDReynolds/HW_13 | 8fc15c37554069ff51e1d29685384e6e521a4b2a | [
"ADSL"
] | null | null | null | # import necessary libraries
from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
# create instance of Flask app
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# create route that renders index.html template
@app.route("/")
def index():
mars_data = mongo.db.mars_db.find_one()
return render_template("index.html", mars_data=mars_data)
@app.route("/scrape")
def scraper():
mongo.db.marsdata.drop()
results = scrape_mars.scrape()
mongo.db.marsdata.insert_one(results)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True) | 23.758621 | 62 | 0.740203 |
from flask import Flask, render_template, jsonify, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars_data = mongo.db.mars_db.find_one()
return render_template("index.html", mars_data=mars_data)
@app.route("/scrape")
def scraper():
mongo.db.marsdata.drop()
results = scrape_mars.scrape()
mongo.db.marsdata.insert_one(results)
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True) | true | true |
f71461fff2ddfcf30af051a048b0f75af416145e | 3,596 | py | Python | ma.py | nishishailesh/moving_average_clin_lab | c8ee448ca16b0d3845c42cafa070dafd307594dc | [
"MIT"
] | null | null | null | ma.py | nishishailesh/moving_average_clin_lab | c8ee448ca16b0d3845c42cafa070dafd307594dc | [
"MIT"
] | null | null | null | ma.py | nishishailesh/moving_average_clin_lab | c8ee448ca16b0d3845c42cafa070dafd307594dc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import sys
import fcntl
import logging
import time
import io
import datetime
import decimal
import statistics
from astm_bidirectional_common import my_sql , file_mgmt, print_to_log
#For mysql password
sys.path.append('/var/gmcs_config')
import astm_var
####Settings section start#####
logfile_name='/var/log/ma.log'
log=1
n_size=50
####Settings section end#####
'''
select sample_id,result,avg(result)
over (ROWS BETWEEN 10 PRECEDING AND CURRENT ROW)
from result where result>0 and examination_id=5031 order by sample_id desc limit 40
'''
last_sample_id_dict={}
logging.basicConfig(filename=logfile_name,level=logging.DEBUG,format='%(asctime)s %(message)s')
if(log==0):
logging.disable(logging.DEBUG)
print_to_log("Moving Average Logging Test","[OK]")
def check_if_new_result_arrived(ms,examination_id):
global last_sample_id_dict
prepared_sql='select max(sample_id) from result where examination_id=%s and result>0'
data_tpl=(examination_id,)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
if(cur!=None):
r=ms.get_single_row(cur)
print_to_log("max sample_id for {}".format(examination_id),r[0])
ms.close_cursor(cur)
if(examination_id in last_sample_id_dict):
if(last_sample_id_dict[examination_id]==r[0]):
print_to_log("Last sample id is not changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
return False
else:
print_to_log("Last sample id is changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
prepared_sql_sample_data='select * from result where examination_id=%s and sample_id=%s'
data_tpl_sample_data=(examination_id,r[0])
cur_sample_data=ms.run_query_with_log(prepared_sql_sample_data,data_tpl_sample_data)
r_sample_data=ms.get_single_row(cur_sample_data)
return r_sample_data[0],r_sample_data[2] #sample id and result
else:
print_to_log("Examination not in dict:{}".format(last_sample_id_dict),examination_id)
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
return 0,0,0
def calculate_moving_average(ms,examination_id):
chk=check_if_new_result_arrived(ms,examination_id)
if(chk==False):
print_to_log("Last sample id is not changed.. nothing to do for:",examination_id)
return
#prepared_sql='select avg(result) from result where examination_id=%s and result>0 order by sample_id desc limit %s'
prepared_sql='select result from result where examination_id=%s and result>0 order by sample_id desc limit %s'
data_tpl=(examination_id,n_size)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
r_tuple=()
if(cur!=None):
r=ms.get_single_row(cur)
while(r!=None):
r_tuple=r_tuple+(decimal.Decimal(r[0]),)
r=ms.get_single_row(cur)
ms.close_cursor(cur)
r_avg=statistics.mean(r_tuple)
dt=datetime.datetime.now()
print_to_log("datetime",dt.strftime("%Y-%m-%d-%H-%M-%S"))
prepared_sql_insert='insert into moving_average (examination_id,date_time,avg_value,sample_id,value) values(%s,%s,%s,%s,%s)'
data_tpl_insert=(examination_id,dt,r_avg,chk[0],chk[1])
curi=ms.run_query_with_log(prepared_sql_insert,data_tpl_insert)
ms=my_sql()
ms.get_link(astm_var.my_host,astm_var.my_user,astm_var.my_pass,astm_var.my_db)
while True:
calculate_moving_average(ms,5031)
time.sleep(10)
ms.close_link()
| 35.60396 | 128 | 0.743326 |
import sys
import fcntl
import logging
import time
import io
import datetime
import decimal
import statistics
from astm_bidirectional_common import my_sql , file_mgmt, print_to_log
sys.path.append('/var/gmcs_config')
import astm_var
(logging.DEBUG)
print_to_log("Moving Average Logging Test","[OK]")
def check_if_new_result_arrived(ms,examination_id):
global last_sample_id_dict
prepared_sql='select max(sample_id) from result where examination_id=%s and result>0'
data_tpl=(examination_id,)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
if(cur!=None):
r=ms.get_single_row(cur)
print_to_log("max sample_id for {}".format(examination_id),r[0])
ms.close_cursor(cur)
if(examination_id in last_sample_id_dict):
if(last_sample_id_dict[examination_id]==r[0]):
print_to_log("Last sample id is not changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
return False
else:
print_to_log("Last sample id is changed {}".format(last_sample_id_dict),"{}:{}".format(examination_id,r[0]))
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
prepared_sql_sample_data='select * from result where examination_id=%s and sample_id=%s'
data_tpl_sample_data=(examination_id,r[0])
cur_sample_data=ms.run_query_with_log(prepared_sql_sample_data,data_tpl_sample_data)
r_sample_data=ms.get_single_row(cur_sample_data)
return r_sample_data[0],r_sample_data[2]
else:
print_to_log("Examination not in dict:{}".format(last_sample_id_dict),examination_id)
last_sample_id_dict.update({examination_id:r[0]})
print_to_log("updated dictionary",format(last_sample_id_dict))
return 0,0,0
def calculate_moving_average(ms,examination_id):
chk=check_if_new_result_arrived(ms,examination_id)
if(chk==False):
print_to_log("Last sample id is not changed.. nothing to do for:",examination_id)
return
prepared_sql='select result from result where examination_id=%s and result>0 order by sample_id desc limit %s'
data_tpl=(examination_id,n_size)
cur=ms.run_query_with_log(prepared_sql,data_tpl)
r_tuple=()
if(cur!=None):
r=ms.get_single_row(cur)
while(r!=None):
r_tuple=r_tuple+(decimal.Decimal(r[0]),)
r=ms.get_single_row(cur)
ms.close_cursor(cur)
r_avg=statistics.mean(r_tuple)
dt=datetime.datetime.now()
print_to_log("datetime",dt.strftime("%Y-%m-%d-%H-%M-%S"))
prepared_sql_insert='insert into moving_average (examination_id,date_time,avg_value,sample_id,value) values(%s,%s,%s,%s,%s)'
data_tpl_insert=(examination_id,dt,r_avg,chk[0],chk[1])
curi=ms.run_query_with_log(prepared_sql_insert,data_tpl_insert)
ms=my_sql()
ms.get_link(astm_var.my_host,astm_var.my_user,astm_var.my_pass,astm_var.my_db)
while True:
calculate_moving_average(ms,5031)
time.sleep(10)
ms.close_link()
| true | true |
f7146227c802ba11eb67d4ee45f43ada79d84b3d | 3,553 | py | Python | app/selenium_ui/jsm/pages/customer_selectors.py | mapit-plugin/dc-app-performance-toolkit | 75d7562c7ffc925c8ba8dfbe81db08af85fadcfa | [
"Apache-2.0"
] | 1 | 2021-09-17T04:34:03.000Z | 2021-09-17T04:34:03.000Z | app/selenium_ui/jsm/pages/customer_selectors.py | mapit-plugin/dc-app-performance-toolkit | 75d7562c7ffc925c8ba8dfbe81db08af85fadcfa | [
"Apache-2.0"
] | null | null | null | app/selenium_ui/jsm/pages/customer_selectors.py | mapit-plugin/dc-app-performance-toolkit | 75d7562c7ffc925c8ba8dfbe81db08af85fadcfa | [
"Apache-2.0"
] | 1 | 2020-12-30T11:12:58.000Z | 2020-12-30T11:12:58.000Z | from util.conf import JSM_SETTINGS
from selenium.webdriver.common.by import By
class UrlManager:
def __init__(self, portal_id=None, request_key=None):
self.host = JSM_SETTINGS.server_url
self.login_params = '/servicedesk/customer/user/login'
self.portal_params = f'/servicedesk/customer/portal/{portal_id}'
self.request_params = f'/servicedesk/customer/portal/{portal_id}/{request_key}'
self.my_requests = '/servicedesk/customer/user/requests'
self.all_requests = '/servicedesk/customer/user/requests?reporter=all'
def login_url(self):
return f'{self.host}{self.login_params}'
def portal_url(self):
return f'{self.host}{self.portal_params}'
def request_url(self):
return f'{self.host}{self.request_params}'
def my_requests_url(self):
return f'{self.host}{self.my_requests}'
def all_requests_url(self):
return f'{self.host}{self.all_requests}'
class LoginPageLocators:
login_url = UrlManager().login_url()
search_input_field = (By.ID, 'sd-customer-portal-smart-search-input')
welcome_logged_in_page = (By.CSS_SELECTOR, "div.cv-help-center-container")
login_field = (By.ID, 'os_username')
password_field = (By.ID, 'os_password')
login_submit_button = (By.ID, 'js-login-submit')
class TopPanelSelectors:
profile_icon = (By.XPATH, '//a[@href="#dropdown2-header"]')
profile_button = (By.CSS_SELECTOR, 'a.js-profile')
logout_button = (By.CSS_SELECTOR, 'a.js-logout')
class CustomerPortalsSelectors:
browse_portals_button = (By.CSS_SELECTOR, 'button.cv-smart-portal-browse-portals')
full_portals_list = (By.CSS_SELECTOR, 'ul.cv-smart-portal-all-portals-list')
portal_from_list = (By.CSS_SELECTOR, '"ul.cv-smart-portal-all-portals-list>li>a>span"')
class CustomerPortalSelectors:
portal_title = (By.CSS_SELECTOR, '.cv-page-title-text')
request_type = (By.CSS_SELECTOR, 'li>span.js-cv-request-type>a')
create_request_button = (By.XPATH, "//button[contains(text(),'Create')]")
summary_field = (By.ID, 'summary')
description_field = (By.ID, 'description')
required_dropdown_field = (By.CSS_SELECTOR, "#s2id_components>ul.select2-choices")
required_dropdown_list = (By.ID, 'select2-drop')
required_dropdown_element = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
required_calendar_button = (By.CSS_SELECTOR, 'button#trigger-duedate')
required_calendar_input_field = (By.CSS_SELECTOR, 'input#duedate')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
class RequestSelectors:
request_url = UrlManager().request_url()
request_option = (By.CLASS_NAME, 'cv-request-options')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]")
share_request_button = (By.CSS_SELECTOR, 'a.js-share-request')
share_request_search_field = (By.ID, 's2id_participants')
share_request_dropdown = (By.ID, 'select2-drop')
share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
share_request_dropdown_one_elem = (By.CSS_SELECTOR,
'#select2-drop>ul.select2-results>li>div>span.user-picker-display-name')
share_request_modal_button = (By.XPATH, "//button[contains(text(),'Share')]")
class RequestsSelectors:
my_requests_url = UrlManager().my_requests_url()
requests_label = (By.XPATH, "//h2[contains(text(),'Requests')]")
| 39.921348 | 111 | 0.710104 | from util.conf import JSM_SETTINGS
from selenium.webdriver.common.by import By
class UrlManager:
def __init__(self, portal_id=None, request_key=None):
self.host = JSM_SETTINGS.server_url
self.login_params = '/servicedesk/customer/user/login'
self.portal_params = f'/servicedesk/customer/portal/{portal_id}'
self.request_params = f'/servicedesk/customer/portal/{portal_id}/{request_key}'
self.my_requests = '/servicedesk/customer/user/requests'
self.all_requests = '/servicedesk/customer/user/requests?reporter=all'
def login_url(self):
return f'{self.host}{self.login_params}'
def portal_url(self):
return f'{self.host}{self.portal_params}'
def request_url(self):
return f'{self.host}{self.request_params}'
def my_requests_url(self):
return f'{self.host}{self.my_requests}'
def all_requests_url(self):
return f'{self.host}{self.all_requests}'
class LoginPageLocators:
login_url = UrlManager().login_url()
search_input_field = (By.ID, 'sd-customer-portal-smart-search-input')
welcome_logged_in_page = (By.CSS_SELECTOR, "div.cv-help-center-container")
login_field = (By.ID, 'os_username')
password_field = (By.ID, 'os_password')
login_submit_button = (By.ID, 'js-login-submit')
class TopPanelSelectors:
profile_icon = (By.XPATH, '//a[@href="#dropdown2-header"]')
profile_button = (By.CSS_SELECTOR, 'a.js-profile')
logout_button = (By.CSS_SELECTOR, 'a.js-logout')
class CustomerPortalsSelectors:
browse_portals_button = (By.CSS_SELECTOR, 'button.cv-smart-portal-browse-portals')
full_portals_list = (By.CSS_SELECTOR, 'ul.cv-smart-portal-all-portals-list')
portal_from_list = (By.CSS_SELECTOR, '"ul.cv-smart-portal-all-portals-list>li>a>span"')
class CustomerPortalSelectors:
portal_title = (By.CSS_SELECTOR, '.cv-page-title-text')
request_type = (By.CSS_SELECTOR, 'li>span.js-cv-request-type>a')
create_request_button = (By.XPATH, "//button[contains(text(),'Create')]")
summary_field = (By.ID, 'summary')
description_field = (By.ID, 'description')
required_dropdown_field = (By.CSS_SELECTOR, "#s2id_components>ul.select2-choices")
required_dropdown_list = (By.ID, 'select2-drop')
required_dropdown_element = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
required_calendar_button = (By.CSS_SELECTOR, 'button#trigger-duedate')
required_calendar_input_field = (By.CSS_SELECTOR, 'input#duedate')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
class RequestSelectors:
request_url = UrlManager().request_url()
request_option = (By.CLASS_NAME, 'cv-request-options')
comment_request_field = (By.CSS_SELECTOR, 'textarea#comment-on-request')
add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]")
share_request_button = (By.CSS_SELECTOR, 'a.js-share-request')
share_request_search_field = (By.ID, 's2id_participants')
share_request_dropdown = (By.ID, 'select2-drop')
share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li')
share_request_dropdown_one_elem = (By.CSS_SELECTOR,
'#select2-drop>ul.select2-results>li>div>span.user-picker-display-name')
share_request_modal_button = (By.XPATH, "//button[contains(text(),'Share')]")
class RequestsSelectors:
my_requests_url = UrlManager().my_requests_url()
requests_label = (By.XPATH, "//h2[contains(text(),'Requests')]")
| true | true |
f71462edb7ac3e4f02fba779f9139da6a78624ba | 6,058 | py | Python | tests/evaluators_test.py | gaussalgo/adaptor | 8d8ae1b7694108f4bde78c127fe9ff97fa6b9470 | [
"MIT"
] | 11 | 2022-01-25T13:44:15.000Z | 2022-03-16T12:46:58.000Z | tests/evaluators_test.py | gaussalgo/adaptor | 8d8ae1b7694108f4bde78c127fe9ff97fa6b9470 | [
"MIT"
] | 3 | 2022-01-29T18:19:01.000Z | 2022-02-01T15:34:44.000Z | tests/evaluators_test.py | gaussalgo/adaptor | 8d8ae1b7694108f4bde78c127fe9ff97fa6b9470 | [
"MIT"
] | 1 | 2022-02-17T17:11:40.000Z | 2022-02-17T17:11:40.000Z | from adaptor.evaluators.generative import GenerativeEvaluator
from adaptor.evaluators.sequence_classification import SeqClassificationEvaluator
from adaptor.evaluators.token_classification import TokenClassificationEvaluator
from adaptor.lang_module import LangModule
from adaptor.objectives.objective_base import Objective
from adaptor.objectives.seq2seq import Sequence2Sequence
from utils import paths, test_base_models
def assert_evaluator_logs(lang_module: LangModule, objective: Objective, split: str) -> None:
# dataset iteration test
dataset_sample = next(iter(objective.get_dataset(split, objective_i=0, device="cpu")))
# providing labels makes HF lang_module to compute its own loss, which is in DA redundantly done by Objective
outputs = lang_module(**dataset_sample)
# request objective for its loss
loss = objective.compute_loss(outputs, dataset_sample["labels"], dataset_sample, split)
assert loss.item()
log = objective.per_objective_log(split)
# assert that objective's id can be found in each key of the logs
assert all(str(objective) in k for k in log.keys())
for split_evaluator in objective.evaluators[split]:
# assert that each evaluator of given split was logged and has a value of expected type
assert any(str(split_evaluator) in k and isinstance(v, float) for k, v in log.items())
gen_lang_module = LangModule(test_base_models["translation_mono"])
gen_lang_module_multi = LangModule(test_base_models["translation_multi"]["model"])
def assert_gen_evaluator_logs(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(gen_lang_module, gen_objective, split)
def assert_gen_evaluator_logs_mbart(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module_multi,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator],
source_lang_id=test_base_models["translation_multi"]["test_src_lang"],
target_lang_id=test_base_models["translation_multi"]["test_tgt_lang"])
assert_evaluator_logs(gen_lang_module_multi, gen_objective, split)
def assert_ner_evaluator_logs(evaluator: TokenClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import TokenClassification
lang_module = LangModule(test_base_models["token_classification"])
gen_objective = TokenClassification(lang_module,
texts_or_path=paths["texts"]["ner"],
labels_or_path=paths["labels"]["ner"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def assert_classification_evaluator_logs(evaluator: SeqClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import SequenceClassification
lang_module = LangModule(test_base_models["sequence_classification"])
gen_objective = SequenceClassification(lang_module,
texts_or_path=paths["texts"]["classification"],
labels_or_path=paths["labels"]["classification"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def test_bleu():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs(BLEU(use_generate=True, decides_convergence=True), "train")
def test_bleu_mbart():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs_mbart(BLEU(use_generate=True, decides_convergence=True), "train")
def test_rouge():
from adaptor.evaluators.generative import ROUGE
assert_gen_evaluator_logs(ROUGE(use_generate=False, decides_convergence=True), "train")
def test_bertscore():
from adaptor.evaluators.generative import BERTScore
assert_gen_evaluator_logs(BERTScore(use_generate=False, decides_convergence=True), "train")
def test_meteor():
from adaptor.evaluators.generative import METEOR
assert_gen_evaluator_logs(METEOR(decides_convergence=True), "train")
def test_prism():
"""
PRISM downloads relatively big model, we omit that by default.
"""
# from adaptor.evaluators.generative import PRISM
# assert_gen_evaluator_logs(PRISM(use_cuda=False, language="en", decides_convergence=True), "train")
def test_divergence():
"""
Default JS_Divergence uses PRISM - note that this test will download PRISM model
"""
# from adaptor.evaluators.generative import JS_Divergence
# assert_gen_evaluator_logs(JS_Divergence(decides_convergence=True), "train")
def test_token_fscore():
from adaptor.evaluators.token_classification import MeanFScore
assert_ner_evaluator_logs(MeanFScore(decides_convergence=True), "train")
def test_sequence_accuracy():
from adaptor.evaluators.sequence_classification import SequenceAccuracy
assert_classification_evaluator_logs(SequenceAccuracy(decides_convergence=False), "train")
| 44.544118 | 113 | 0.680753 | from adaptor.evaluators.generative import GenerativeEvaluator
from adaptor.evaluators.sequence_classification import SeqClassificationEvaluator
from adaptor.evaluators.token_classification import TokenClassificationEvaluator
from adaptor.lang_module import LangModule
from adaptor.objectives.objective_base import Objective
from adaptor.objectives.seq2seq import Sequence2Sequence
from utils import paths, test_base_models
def assert_evaluator_logs(lang_module: LangModule, objective: Objective, split: str) -> None:
dataset_sample = next(iter(objective.get_dataset(split, objective_i=0, device="cpu")))
outputs = lang_module(**dataset_sample)
loss = objective.compute_loss(outputs, dataset_sample["labels"], dataset_sample, split)
assert loss.item()
log = objective.per_objective_log(split)
assert all(str(objective) in k for k in log.keys())
for split_evaluator in objective.evaluators[split]:
# assert that each evaluator of given split was logged and has a value of expected type
assert any(str(split_evaluator) in k and isinstance(v, float) for k, v in log.items())
gen_lang_module = LangModule(test_base_models["translation_mono"])
gen_lang_module_multi = LangModule(test_base_models["translation_multi"]["model"])
def assert_gen_evaluator_logs(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(gen_lang_module, gen_objective, split)
def assert_gen_evaluator_logs_mbart(evaluator: GenerativeEvaluator, split: str) -> None:
gen_objective = Sequence2Sequence(gen_lang_module_multi,
texts_or_path=paths["texts"]["translation"],
labels_or_path=paths["labels"]["translation"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator],
source_lang_id=test_base_models["translation_multi"]["test_src_lang"],
target_lang_id=test_base_models["translation_multi"]["test_tgt_lang"])
assert_evaluator_logs(gen_lang_module_multi, gen_objective, split)
def assert_ner_evaluator_logs(evaluator: TokenClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import TokenClassification
lang_module = LangModule(test_base_models["token_classification"])
gen_objective = TokenClassification(lang_module,
texts_or_path=paths["texts"]["ner"],
labels_or_path=paths["labels"]["ner"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def assert_classification_evaluator_logs(evaluator: SeqClassificationEvaluator, split: str) -> None:
from adaptor.objectives.classification import SequenceClassification
lang_module = LangModule(test_base_models["sequence_classification"])
gen_objective = SequenceClassification(lang_module,
texts_or_path=paths["texts"]["classification"],
labels_or_path=paths["labels"]["classification"],
batch_size=1,
train_evaluators=[evaluator],
val_evaluators=[evaluator])
assert_evaluator_logs(lang_module, gen_objective, split)
def test_bleu():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs(BLEU(use_generate=True, decides_convergence=True), "train")
def test_bleu_mbart():
from adaptor.evaluators.generative import BLEU
assert_gen_evaluator_logs_mbart(BLEU(use_generate=True, decides_convergence=True), "train")
def test_rouge():
from adaptor.evaluators.generative import ROUGE
assert_gen_evaluator_logs(ROUGE(use_generate=False, decides_convergence=True), "train")
def test_bertscore():
from adaptor.evaluators.generative import BERTScore
assert_gen_evaluator_logs(BERTScore(use_generate=False, decides_convergence=True), "train")
def test_meteor():
from adaptor.evaluators.generative import METEOR
assert_gen_evaluator_logs(METEOR(decides_convergence=True), "train")
def test_prism():
# from adaptor.evaluators.generative import PRISM
# assert_gen_evaluator_logs(PRISM(use_cuda=False, language="en", decides_convergence=True), "train")
def test_divergence():
# from adaptor.evaluators.generative import JS_Divergence
# assert_gen_evaluator_logs(JS_Divergence(decides_convergence=True), "train")
def test_token_fscore():
from adaptor.evaluators.token_classification import MeanFScore
assert_ner_evaluator_logs(MeanFScore(decides_convergence=True), "train")
def test_sequence_accuracy():
from adaptor.evaluators.sequence_classification import SequenceAccuracy
assert_classification_evaluator_logs(SequenceAccuracy(decides_convergence=False), "train")
| true | true |
f714644c0e16e7716dae2a067aae906a9e263d99 | 23,086 | py | Python | script_helper/Script/Network.py | jupiterman/Data-Transfer-Neural-Way | a38140aab141e4749aedc30899714ad4028a6a8a | [
"Apache-2.0"
] | 1 | 2020-02-17T06:38:58.000Z | 2020-02-17T06:38:58.000Z | script_helper/Script/Network.py | minihat/Neural-Style-Transfer | d900a5552c78f81450c3918640aa3e9210a57488 | [
"Apache-2.0"
] | null | null | null | script_helper/Script/Network.py | minihat/Neural-Style-Transfer | d900a5552c78f81450c3918640aa3e9210a57488 | [
"Apache-2.0"
] | 1 | 2018-02-07T12:59:04.000Z | 2018-02-07T12:59:04.000Z | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
"""
Neural Style Transfer with Keras 2.0.5
Based on:
https://github.com/fchollet/keras/blob/master/examples/neural_style_transfer.py
-----------------------------------------------------------------------------------------------------------------------
"""
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--style_masks", type=str, default=None, nargs='+',
help='Masks for style images')
parser.add_argument("--content_mask", type=str, default=None,
help='Masks for the content image')
parser.add_argument("--color_mask", type=str, default=None,
help='Mask for color preservation')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--model", default="vgg16", type=str,
help="Choices are 'vgg16' and 'vgg19'")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--rescale_image", dest="rescale_image", default="False", type=str,
help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str,
help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str,
help="Maintain aspect ratio of loaded images")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
parser.add_argument("--pool_type", dest="pool", default="max", type=str,
help='Pooling type. Can be "ave" for average pooling or "max" for max pooling')
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument('--min_improvement', default=0.0, type=float,
help='Defines minimum improvement required to continue script')
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
''' Arguments '''
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.syle_image_paths
result_prefix = args.result_prefix
style_image_paths = []
for style_image_path in style_reference_image_paths:
style_image_paths.append(style_image_path)
style_masks_present = args.style_masks is not None
mask_paths = []
if style_masks_present:
for mask_path in args.style_masks:
mask_paths.append(mask_path)
if style_masks_present:
assert len(style_image_paths) == len(mask_paths), "Wrong number of style masks provided.\n" \
"Number of style images = %d, \n" \
"Number of style mask paths = %d." % \
(len(style_image_paths), len(style_masks_present))
content_mask_present = args.content_mask is not None
content_mask_path = args.content_mask
color_mask_present = args.color_mask is not None
rescale_image = str_to_bool(args.rescale_image)
maintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)
preserve_color = str_to_bool(args.color)
# these are the weights of the different loss components
content_weight = args.content_weight
total_variation_weight = args.tv_weight
style_weights = []
if len(style_image_paths) != len(args.style_weight):
print("Mismatch in number of style images provided and number of style weights provided. \n"
"Found %d style images and %d style weights. \n"
"Equally distributing weights to all other styles." % (len(style_image_paths), len(args.style_weight)))
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
for style_weight in args.style_weight:
style_weights.append(style_weight * args.style_scale)
# Decide pooling function
pooltype = str(args.pool).lower()
assert pooltype in ["ave", "max"], 'Pooling argument is wrong. Needs to be either "ave" or "max".'
pooltype = 1 if pooltype == "ave" else 0
read_mode = "gray" if args.init_image == "gray" else "color"
# dimensions of the generated picture.
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
assert args.content_loss_type in [0, 1, 2], "Content Loss Type must be one of 0, 1 or 2"
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode) # Prevents crashes due to PNG images (ARGB)
if mode == "L":
# Expand the 1 channel grayscale to 3 channel grayscale image
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = args.img_size
img = imresize(img, (img_width, img_height)).astype('float32')
# RGB -> BGR
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# BGR -> RGB
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# util function to preserve image color
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr') # Convert to YCbCr color space
if mask is None:
generated[:, :, 1:] = content[:, :, 1:] # Generated CbCr = Content CbCr
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB') # Convert to RGB color space
return generated
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_dim_ordering() == "th":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L") # Grayscale mask load
mask = imresize(mask, (width, height)).astype('float32')
# Perform binarization of mask
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_dim_ordering() == "th":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
# this will contain our generated image
if K.image_dim_ordering() == 'th':
combination_image = K.placeholder((1, 3, img_width, img_height))
else:
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2 # Content and Output image not considered
# combine the various images into a single Keras tensor
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
if K.image_dim_ordering() == "th":
if args.model == "vgg19":
weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
if args.model == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights)
if K.backend() == 'tensorflow' and K.image_dim_ordering() == "th":
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_dim_ordering() == "th":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif args.content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer] # 'conv5_2' or 'conv4_2'
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
style_masks = []
if style_masks_present:
style_masks = mask_paths # If mask present, pass dictionary of masks to style loss
else:
style_masks = [None for _ in range(nb_style_images)] # If masks not present, pass None to the style loss
channel_index = 1 if K.image_dim_ordering() == "th" else -1
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
for j in range(nb_style_images):
loss += (style_weights[j] / len(feature_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((1, 3, img_width, img_height))
else:
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image, read_mode=read_mode)
# We require original image if we are to preserve color in YCbCr mode
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
if K.image_dim_ordering() == "th":
color_mask_shape = (None, None, img_width, img_height)
else:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = args.num_iter
prev_min_val = -1
improvement_threshold = float(args.min_improvement)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
# save current generated image
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." % (
improvement, improvement_threshold))
exit()
| 36.878594 | 152 | 0.668067 | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from scipy.misc import imread, imresize, imsave, fromimage, toimage
from scipy.optimize import fmin_l_bfgs_b
import numpy as np
import time
import argparse
import warnings
from keras.models import Model
from keras.layers import Input
from keras.layers.convolutional import Convolution2D, AveragePooling2D, MaxPooling2D
from keras import backend as K
from keras.utils.data_utils import get_file
from keras.utils.layer_utils import convert_all_kernels_in_model
THEANO_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
TH_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_th_dim_ordering_th_kernels_notop.h5'
TF_19_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5'
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('syle_image_paths', metavar='ref', nargs='+', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
parser.add_argument("--style_masks", type=str, default=None, nargs='+',
help='Masks for style images')
parser.add_argument("--content_mask", type=str, default=None,
help='Masks for the content image')
parser.add_argument("--color_mask", type=str, default=None,
help='Mask for color preservation')
parser.add_argument("--image_size", dest="img_size", default=400, type=int,
help='Minimum image size')
parser.add_argument("--content_weight", dest="content_weight", default=0.025, type=float,
help="Weight of content")
parser.add_argument("--style_weight", dest="style_weight", nargs='+', default=[1], type=float,
help="Weight of style, can be multiple for multiple styles")
parser.add_argument("--style_scale", dest="style_scale", default=1.0, type=float,
help="Scale the weighing of the style")
parser.add_argument("--total_variation_weight", dest="tv_weight", default=8.5e-5, type=float,
help="Total Variation weight")
parser.add_argument("--num_iter", dest="num_iter", default=10, type=int,
help="Number of iterations")
parser.add_argument("--model", default="vgg16", type=str,
help="Choices are 'vgg16' and 'vgg19'")
parser.add_argument("--content_loss_type", default=0, type=int,
help='Can be one of 0, 1 or 2. Readme contains the required information of each mode.')
parser.add_argument("--rescale_image", dest="rescale_image", default="False", type=str,
help="Rescale image after execution to original dimentions")
parser.add_argument("--rescale_method", dest="rescale_method", default="bilinear", type=str,
help="Rescale image algorithm")
parser.add_argument("--maintain_aspect_ratio", dest="maintain_aspect_ratio", default="True", type=str,
help="Maintain aspect ratio of loaded images")
parser.add_argument("--content_layer", dest="content_layer", default="conv5_2", type=str,
help="Content layer used for content loss.")
parser.add_argument("--init_image", dest="init_image", default="content", type=str,
help="Initial image used to generate the final image. Options are 'content', 'noise', or 'gray'")
parser.add_argument("--pool_type", dest="pool", default="max", type=str,
help='Pooling type. Can be "ave" for average pooling or "max" for max pooling')
parser.add_argument('--preserve_color', dest='color', default="False", type=str,
help='Preserve original color in image')
parser.add_argument('--min_improvement', default=0.0, type=float,
help='Defines minimum improvement required to continue script')
def str_to_bool(v):
return v.lower() in ("true", "yes", "t", "1")
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_paths = args.syle_image_paths
result_prefix = args.result_prefix
style_image_paths = []
for style_image_path in style_reference_image_paths:
style_image_paths.append(style_image_path)
style_masks_present = args.style_masks is not None
mask_paths = []
if style_masks_present:
for mask_path in args.style_masks:
mask_paths.append(mask_path)
if style_masks_present:
assert len(style_image_paths) == len(mask_paths), "Wrong number of style masks provided.\n" \
"Number of style images = %d, \n" \
"Number of style mask paths = %d." % \
(len(style_image_paths), len(style_masks_present))
content_mask_present = args.content_mask is not None
content_mask_path = args.content_mask
color_mask_present = args.color_mask is not None
rescale_image = str_to_bool(args.rescale_image)
maintain_aspect_ratio = str_to_bool(args.maintain_aspect_ratio)
preserve_color = str_to_bool(args.color)
content_weight = args.content_weight
total_variation_weight = args.tv_weight
style_weights = []
if len(style_image_paths) != len(args.style_weight):
print("Mismatch in number of style images provided and number of style weights provided. \n"
"Found %d style images and %d style weights. \n"
"Equally distributing weights to all other styles." % (len(style_image_paths), len(args.style_weight)))
weight_sum = sum(args.style_weight) * args.style_scale
count = len(style_image_paths)
for i in range(len(style_image_paths)):
style_weights.append(weight_sum / count)
else:
for style_weight in args.style_weight:
style_weights.append(style_weight * args.style_scale)
pooltype = str(args.pool).lower()
assert pooltype in ["ave", "max"], 'Pooling argument is wrong. Needs to be either "ave" or "max".'
pooltype = 1 if pooltype == "ave" else 0
read_mode = "gray" if args.init_image == "gray" else "color"
img_width = img_height = 0
img_WIDTH = img_HEIGHT = 0
aspect_ratio = 0
assert args.content_loss_type in [0, 1, 2], "Content Loss Type must be one of 0, 1 or 2"
def preprocess_image(image_path, load_dims=False, read_mode="color"):
global img_width, img_height, img_WIDTH, img_HEIGHT, aspect_ratio
mode = "RGB" if read_mode == "color" else "L"
img = imread(image_path, mode=mode)
if mode == "L":
temp = np.zeros(img.shape + (3,), dtype=np.uint8)
temp[:, :, 0] = img
temp[:, :, 1] = img.copy()
temp[:, :, 2] = img.copy()
img = temp
if load_dims:
img_WIDTH = img.shape[0]
img_HEIGHT = img.shape[1]
aspect_ratio = float(img_HEIGHT) / img_WIDTH
img_width = args.img_size
if maintain_aspect_ratio:
img_height = int(img_width * aspect_ratio)
else:
img_height = args.img_size
img = imresize(img, (img_width, img_height)).astype('float32')
img = img[:, :, ::-1]
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
if K.image_dim_ordering() == "th":
img = img.transpose((2, 0, 1)).astype('float32')
img = np.expand_dims(img, axis=0)
return img
def deprocess_image(x):
if K.image_dim_ordering() == "th":
x = x.reshape((3, img_width, img_height))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_width, img_height, 3))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
def original_color_transform(content, generated, mask=None):
generated = fromimage(toimage(generated, mode='RGB'), mode='YCbCr')
if mask is None:
generated[:, :, 1:] = content[:, :, 1:]
else:
width, height, channels = generated.shape
for i in range(width):
for j in range(height):
if mask[i, j] == 1:
generated[i, j, 1:] = content[i, j, 1:]
generated = fromimage(toimage(generated, mode='YCbCr'), mode='RGB')
return generated
def load_mask(mask_path, shape, return_mask_img=False):
if K.image_dim_ordering() == "th":
_, channels, width, height = shape
else:
_, width, height, channels = shape
mask = imread(mask_path, mode="L")
mask = imresize(mask, (width, height)).astype('float32')
mask[mask <= 127] = 0
mask[mask > 128] = 255
max = np.amax(mask)
mask /= max
if return_mask_img: return mask
mask_shape = shape[1:]
mask_tensor = np.empty(mask_shape)
for i in range(channels):
if K.image_dim_ordering() == "th":
mask_tensor[i, :, :] = mask
else:
mask_tensor[:, :, i] = mask
return mask_tensor
def pooling_func(x):
if pooltype == 1:
return AveragePooling2D((2, 2), strides=(2, 2))(x)
else:
return MaxPooling2D((2, 2), strides=(2, 2))(x)
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))
style_reference_images = []
for style_path in style_image_paths:
style_reference_images.append(K.variable(preprocess_image(style_path)))
if K.image_dim_ordering() == 'th':
combination_image = K.placeholder((1, 3, img_width, img_height))
else:
combination_image = K.placeholder((1, img_width, img_height, 3))
image_tensors = [base_image]
for style_image_tensor in style_reference_images:
image_tensors.append(style_image_tensor)
image_tensors.append(combination_image)
nb_tensors = len(image_tensors)
nb_style_images = nb_tensors - 2
input_tensor = K.concatenate(image_tensors, axis=0)
if K.image_dim_ordering() == "th":
shape = (nb_tensors, 3, img_width, img_height)
else:
shape = (nb_tensors, img_width, img_height, 3)
ip = Input(tensor=input_tensor, batch_shape=shape)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv4_4', padding='same')(x)
x = pooling_func(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_1', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_2', padding='same')(x)
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_3', padding='same')(x)
if args.model == "vgg19":
x = Convolution2D(512, (3, 3), activation='relu', name='conv5_4', padding='same')(x)
x = pooling_func(x)
model = Model(ip, x)
if K.image_dim_ordering() == "th":
if args.model == "vgg19":
weights = get_file('vgg19_weights_th_dim_ordering_th_kernels_notop.h5', TH_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_th_dim_ordering_th_kernels_notop.h5', THEANO_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
if args.model == "vgg19":
weights = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_19_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
else:
weights = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', TF_WEIGHTS_PATH_NO_TOP, cache_subdir='models')
model.load_weights(weights)
if K.backend() == 'tensorflow' and K.image_dim_ordering() == "th":
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
print('Model loaded.')
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
shape_dict = dict([(layer.name, layer.output_shape) for layer in model.layers])
def gram_matrix(x):
assert K.ndim(x) == 3
if K.image_dim_ordering() == "th":
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
def style_loss(style, combination, mask_path=None, nb_channels=None):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
if content_mask_path is not None:
content_mask = K.variable(load_mask(content_mask_path, nb_channels))
combination = combination * K.stop_gradient(content_mask)
del content_mask
if mask_path is not None:
style_mask = K.variable(load_mask(mask_path, nb_channels))
style = style * K.stop_gradient(style_mask)
if content_mask_path is None:
combination = combination * K.stop_gradient(style_mask)
del style_mask
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
def content_loss(base, combination):
channel_dim = 0 if K.image_dim_ordering() == "th" else -1
try:
channels = K.int_shape(base)[channel_dim]
except TypeError:
channels = K.shape(base)[channel_dim]
size = img_width * img_height
if args.content_loss_type == 1:
multiplier = 1. / (2. * (channels ** 0.5) * (size ** 0.5))
elif args.content_loss_type == 2:
multiplier = 1. / (channels * size)
else:
multiplier = 1.
return multiplier * K.sum(K.square(combination - base))
def total_variation_loss(x):
assert K.ndim(x) == 4
if K.image_dim_ordering() == 'th':
a = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, 1:, :img_height - 1])
b = K.square(x[:, :, :img_width - 1, :img_height - 1] - x[:, :, :img_width - 1, 1:])
else:
a = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, 1:, :img_height - 1, :])
b = K.square(x[:, :img_width - 1, :img_height - 1, :] - x[:, :img_width - 1, 1:, :])
return K.sum(K.pow(a + b, 1.25))
loss = K.variable(0.)
layer_features = outputs_dict[args.content_layer]
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[nb_tensors - 1, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
style_masks = []
if style_masks_present:
style_masks = mask_paths
else:
style_masks = [None for _ in range(nb_style_images)]
channel_index = 1 if K.image_dim_ordering() == "th" else -1
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
shape = shape_dict[layer_name]
combination_features = layer_features[nb_tensors - 1, :, :, :]
style_reference_features = layer_features[1:nb_tensors - 1, :, :, :]
sl = []
for j in range(nb_style_images):
sl.append(style_loss(style_reference_features[j], combination_features, style_masks[j], shape))
for j in range(nb_style_images):
loss += (style_weights[j] / len(feature_layers)) * sl[j]
loss += total_variation_weight * total_variation_loss(combination_image)
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
if K.image_dim_ordering() == 'th':
x = x.reshape((1, 3, img_width, img_height))
else:
x = x.reshape((1, img_width, img_height, 3))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
if "content" in args.init_image or "gray" in args.init_image:
x = preprocess_image(base_image_path, True, read_mode=read_mode)
elif "noise" in args.init_image:
x = np.random.uniform(0, 255, (1, img_width, img_height, 3)) - 128.
if K.image_dim_ordering() == "th":
x = x.transpose((0, 3, 1, 2))
else:
print("Using initial image : ", args.init_image)
x = preprocess_image(args.init_image, read_mode=read_mode)
if preserve_color:
content = imread(base_image_path, mode="YCbCr")
content = imresize(content, (img_width, img_height))
if color_mask_present:
if K.image_dim_ordering() == "th":
color_mask_shape = (None, None, img_width, img_height)
else:
color_mask_shape = (None, img_width, img_height, None)
color_mask = load_mask(args.color_mask, color_mask_shape, return_mask_img=True)
else:
color_mask = None
else:
color_mask = None
num_iter = args.num_iter
prev_min_val = -1
improvement_threshold = float(args.min_improvement)
for i in range(num_iter):
print("Starting iteration %d of %d" % ((i + 1), num_iter))
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(), fprime=evaluator.grads, maxfun=20)
if prev_min_val == -1:
prev_min_val = min_val
improvement = (prev_min_val - min_val) / prev_min_val * 100
print('Current loss value:', min_val, " Improvement : %0.3f" % improvement, "%")
prev_min_val = min_val
img = deprocess_image(x.copy())
if preserve_color and content is not None:
img = original_color_transform(content, img, mask=color_mask)
if not rescale_image:
img_ht = int(img_width * aspect_ratio)
print("Rescaling Image to (%d, %d)" % (img_width, img_ht))
img = imresize(img, (img_width, img_ht), interp=args.rescale_method)
if rescale_image:
print("Rescaling Image to (%d, %d)" % (img_WIDTH, img_HEIGHT))
img = imresize(img, (img_WIDTH, img_HEIGHT), interp=args.rescale_method)
fname = result_prefix + '_at_iteration_%d.png' % (i + 1)
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i + 1, end_time - start_time))
if improvement_threshold is not 0.0:
if improvement < improvement_threshold and improvement is not 0.0:
print("Improvement (%f) is less than improvement threshold (%f). Early stopping script." % (
improvement, improvement_threshold))
exit()
| true | true |
f7146499f43a1ececce716dc775d27d50a4ee29c | 846 | py | Python | test/test_item_option.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | test/test_item_option.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | test/test_item_option.py | gstingy/uc_python_api | 9a0bd3f6e63f616586681518e44fe37c6bae2bba | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_option import ItemOption
class TestItemOption(unittest.TestCase):
""" ItemOption unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testItemOption(self):
"""
Test ItemOption
"""
# FIXME: construct object with mandatory attributes with example values
#model = ultracart.models.item_option.ItemOption()
pass
if __name__ == '__main__':
unittest.main()
| 18.8 | 79 | 0.680851 |
from __future__ import absolute_import
import os
import sys
import unittest
import ultracart
from ultracart.rest import ApiException
from ultracart.models.item_option import ItemOption
class TestItemOption(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testItemOption(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f714679fa4b4036479edd5366153bd136b63a604 | 8,720 | py | Python | pta_sim/pint_sim.py | Hazboun6/pta_sim | cf8676e23056586ecb35a030dbaad45a1f985764 | [
"MIT"
] | 1 | 2019-05-22T10:35:49.000Z | 2019-05-22T10:35:49.000Z | pta_sim/pint_sim.py | Hazboun6/pta_sim | cf8676e23056586ecb35a030dbaad45a1f985764 | [
"MIT"
] | 1 | 2021-11-15T17:48:32.000Z | 2021-11-15T17:48:32.000Z | pta_sim/pint_sim.py | Hazboun6/pta_sim | cf8676e23056586ecb35a030dbaad45a1f985764 | [
"MIT"
] | 2 | 2019-05-23T13:55:53.000Z | 2021-06-23T13:15:22.000Z | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from pint.residuals import resids
import pint.toa as toa
from pint import models
__all__ = ['make_ideal',
'createfourierdesignmatrix_red',
'add_rednoise',
'add_dm_rednoise',
'add_efac',
'add_equad',
'add_ecorr']
def make_ideal(toas, model, iterations=2):
'''
Takes a pint.toas and pint.model object and effectively zeros out the residuals.
'''
for ii in range(iterations):
rs=resids(toas, model)
toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))
def createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
pshift=False, modes=None):
"""
Construct fourier design matrix from eq 11 of Lentati et al, 2013
Parameters
----------
toas : array
Vector of time series in seconds.
nmodes : int
Number of fourier coefficients to use.
Tspan : float
Option to us some other Tspan [s]
logf : bool
Use log frequency spacing.
fmin : float
Lower sampling frequency.
fmax : float
Upper sampling frequency.
pshift : bool
Option to add random phase shift.
modes : array
Option to provide explicit list or array of sampling frequencies.
Returns
-------
F : array
fourier design matrix, [NTOAs x 2 nfreqs].
f : arraty
Sampling frequencies, [2 nfreqs].
"""
T = Tspan if Tspan is not None else toas.max() - toas.min()
# define sampling frequencies
if modes is not None:
nmodes = len(modes)
f = modes
elif fmin is None and fmax is None and not logf:
# make sure partially overlapping sets of modes
# have identical frequencies
f = 1.0 * np.arange(1, nmodes + 1) / T
else:
# more general case
if fmin is None:
fmin = 1 / T
if fmax is None:
fmax = nmodes / T
if logf:
f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)
else:
f = np.linspace(fmin, fmax, nmodes)
# add random phase shift to basis functions
ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)
if pshift else np.zeros(nmodes))
Ffreqs = np.repeat(f, 2)
N = len(toas)
F = np.zeros((N, 2 * nmodes))
# The sine/cosine modes
F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
return F, Ffreqs
def add_rednoise(TOAs, A, gamma, components=30,
seed=None, modes=None, Tspan=None):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f * year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
if modes is not None:
print('Must use linear spacing.')
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,
seed=None, modes=None, Tspan=None, useDM=False):
"""Add red noise with P(f) = A^2 / (12 pi^2) (f year)^-gamma,
using `components` Fourier bases.
Optionally take a pseudorandom-number-generator seed."""
# nobs=len(psr.toas)
nobs = len(TOAs.table)
radio_freqs = TOAs.table['freq']
if useDM:
rf_ref = 4.15e3
chrom = rf_ref**2 / radio_freqs**2
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec #to sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = chrom.quantity.value * np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_equad(TOAs, equad, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
equadvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = np.ones(TOAs.ntoas) * equad
if flags is not None and flagid is not None and not np.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
equadvec[ind] = equad[ct]
equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))
def add_efac(TOAs, efac, flagid=None, flags=None, seed=None):
"""Add quadrature noise of rms `equad` [s].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
# default equadvec
efacvec = np.zeros(TOAs.ntoas)
# check that equad is scalar if flags is None
if flags is None:
if not np.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = np.ones(TOAs.ntoas) * efac
if flags is not None and flagid is not None and not np.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
efacvec[ind] = efac[ct]
dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def quantize(times, flags=None, dt=1.0):
isort = np.argsort(times)
bucket_ref = [times[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if times[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(times[i])
bucket_ind.append([i])
avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')
if flags is not None:
aveflags = np.array([flags[l[0]] for l in bucket_ind])
U = np.zeros((len(times),len(bucket_ind)),'d')
for i,l in enumerate(bucket_ind):
U[l,i] = 1
if flags is not None:
return avetoas, aveflags, U
else:
return avetoas, U
def add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):
"""Add correlated quadrature noise of rms `ecorr` [s],
with coarse-graining time `coarsegrain` [days].
Optionally take a pseudorandom-number-generator seed."""
if seed is not None:
np.random.seed(seed)
times = np.array(TOAs.table['tdbld'], dtype='float64')
if flags is None:
t, U = quantize(times, dt=coarsegrain.to('day').value)
elif flags is not None and flagid is not None:
flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])
t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)
# default ecorr value
ecorrvec = np.zeros(len(t))
# check that ecorr is scalar if flags is None
if flags is None:
if not np.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, ecorr must be a scalar')
else:
ecorrvec = np.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not np.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array(f)
ecorrvec[ind] = ecorr[ct]
ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s
TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))
| 31.142857 | 84 | 0.592431 |
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from pint.residuals import resids
import pint.toa as toa
from pint import models
__all__ = ['make_ideal',
'createfourierdesignmatrix_red',
'add_rednoise',
'add_dm_rednoise',
'add_efac',
'add_equad',
'add_ecorr']
def make_ideal(toas, model, iterations=2):
for ii in range(iterations):
rs=resids(toas, model)
toas.adjust_TOAs(TimeDelta(-1.0*rs.time_resids))
def createfourierdesignmatrix_red(toas, nmodes=30, Tspan=None,
logf=False, fmin=None, fmax=None,
pshift=False, modes=None):
T = Tspan if Tspan is not None else toas.max() - toas.min()
if modes is not None:
nmodes = len(modes)
f = modes
elif fmin is None and fmax is None and not logf:
f = 1.0 * np.arange(1, nmodes + 1) / T
else:
if fmin is None:
fmin = 1 / T
if fmax is None:
fmax = nmodes / T
if logf:
f = np.logspace(np.log10(fmin), np.log10(fmax), nmodes)
else:
f = np.linspace(fmin, fmax, nmodes)
ranphase = (np.random.uniform(0.0, 2 * np.pi, nmodes)
if pshift else np.zeros(nmodes))
Ffreqs = np.repeat(f, 2)
N = len(toas)
F = np.zeros((N, 2 * nmodes))
F[:,::2] = np.sin(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
F[:,1::2] = np.cos(2*np.pi*toas[:,None]*f[None,:] +
ranphase[None,:])
return F, Ffreqs
def add_rednoise(TOAs, A, gamma, components=30,
seed=None, modes=None, Tspan=None):
nobs = len(TOAs.table)
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
if modes is not None:
print('Must use linear spacing.')
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_dm_rednoise(TOAs, A, gamma, components=30, rf_ref=1400,
seed=None, modes=None, Tspan=None, useDM=False):
nobs = len(TOAs.table)
radio_freqs = TOAs.table['freq']
if useDM:
rf_ref = 4.15e3
chrom = rf_ref**2 / radio_freqs**2
day_in_sec = 86400
year_in_sec = 365.25*day_in_sec
fyr = 1 / year_in_sec
if seed is not None:
np.random.seed(seed)
toas = np.array(TOAs.table['tdbld'], dtype='float64') * day_in_sec
Tspan = toas.max() - toas.min()
F, freqs = createfourierdesignmatrix_red(toas,Tspan=Tspan,modes=modes)
prior = A**2 * (freqs/fyr)**(-gamma) / (12 * np.pi**2 * Tspan) * year_in_sec**3
y = np.sqrt(prior) * np.random.randn(freqs.size)
dt = chrom.quantity.value * np.dot(F,y) * u.s
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def add_equad(TOAs, equad, flagid=None, flags=None, seed=None):
if seed is not None:
np.random.seed(seed)
equadvec = np.zeros(TOAs.ntoas)
if flags is None:
if not np.isscalar(equad):
raise ValueError('ERROR: If flags is None, equad must be a scalar')
else:
equadvec = np.ones(TOAs.ntoas) * equad
if flags is not None and flagid is not None and not np.isscalar(equad):
if len(equad) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
equadvec[ind] = equad[ct]
equadvec = equadvec * u.s * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(equadvec.to('day')))
def add_efac(TOAs, efac, flagid=None, flags=None, seed=None):
if seed is not None:
np.random.seed(seed)
efacvec = np.zeros(TOAs.ntoas)
if flags is None:
if not np.isscalar(efac):
raise ValueError('ERROR: If flags is None, efac must be a scalar')
else:
efacvec = np.ones(TOAs.ntoas) * efac
if flags is not None and flagid is not None and not np.isscalar(efac):
if len(efac) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array([f['f'] for f
in TOAs.table['flags'].data])
efacvec[ind] = efac[ct]
dt = efacvec * TOAs.get_errors().to('s') * np.random.randn(TOAs.ntoas)
TOAs.adjust_TOAs(TimeDelta(dt.to('day')))
def quantize(times, flags=None, dt=1.0):
isort = np.argsort(times)
bucket_ref = [times[isort[0]]]
bucket_ind = [[isort[0]]]
for i in isort[1:]:
if times[i] - bucket_ref[-1] < dt:
bucket_ind[-1].append(i)
else:
bucket_ref.append(times[i])
bucket_ind.append([i])
avetoas = np.array([np.mean(times[l]) for l in bucket_ind],'d')
if flags is not None:
aveflags = np.array([flags[l[0]] for l in bucket_ind])
U = np.zeros((len(times),len(bucket_ind)),'d')
for i,l in enumerate(bucket_ind):
U[l,i] = 1
if flags is not None:
return avetoas, aveflags, U
else:
return avetoas, U
def add_ecorr(TOAs, ecorr, flagid=None, flags=None, coarsegrain=1*u.s, seed=None):
if seed is not None:
np.random.seed(seed)
times = np.array(TOAs.table['tdbld'], dtype='float64')
if flags is None:
t, U = quantize(times, dt=coarsegrain.to('day').value)
elif flags is not None and flagid is not None:
flagvals = np.array([f[flagid] for f in TOAs.table['flags'].data])
t, f, U = quantize(times, flagvals, dt=coarsegrain.to('day').value)
ecorrvec = np.zeros(len(t))
if flags is None:
if not np.isscalar(ecorr):
raise ValueError('ERROR: If flags is None, ecorr must be a scalar')
else:
ecorrvec = np.ones(len(t)) * ecorr
if flags is not None and flagid is not None and not np.isscalar(ecorr):
if len(ecorr) == len(flags):
for ct, flag in enumerate(flags):
ind = flag == np.array(f)
ecorrvec[ind] = ecorr[ct]
ecorrvec = np.dot(U * ecorrvec, np.random.randn(U.shape[1])) * u.s
TOAs.adjust_TOAs(TimeDelta(ecorrvec.to('day')))
| true | true |
f71467a510667cf3558e0f2dd126bccf19a330a0 | 8,753 | py | Python | data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/137656/blundercheck-master/combine/data_prep/prepare_pgmodel.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
from pandas import *
from numpy import *
from djeval import *
import csv, code
import pickle as pickle
from sklearn.externals import joblib
NUM_GAMES=50000
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
msg("Hi! Reading eheaders")
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = eheaders['elos']
result = eheaders['result']
checkmate = eheaders['checkmate']
openings = eheaders['openings']
ocount = eheaders['opening_count']
msg("Hi! Reading crunched movescores from %s" % sys.argv[1])
crunched_path = sys.argv[1]
crunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])
do_gb = False
if do_gb:
msg("Hi! Reading GB scores from %s" % sys.argv[2])
gb_path = sys.argv[2]
gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])
msg("Hi! Reading depthstats")
depthstats_path = '/data/depthstats.csv'
columns = [
'gamenum',
'side',
'mean_depth',
'mean_seldepth',
'mean_depths_agreeing_ratio',
'mean_deepest_agree_ratio',
'pct_sanemoves',
'gamelength',
'mean_num_bestmoves',
'mean_num_bestmove_changes',
'mean_bestmove_depths_agreeing',
'mean_deepest_change',
'mean_deepest_change_ratio',
]
depthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
depthstats_df = depthstats_df.set_index(['gamenum', 'side'])
# we have the gamelength column in another df, drop it here to avoid conflicts
depthstats_df.drop('gamelength', axis=1, inplace=True)
do_material = True
if do_material:
msg("Hi! Reading material")
material_path = '/data/material.csv'
columns = [
'gamenum',
'material_break_0',
'material_break_1',
'material_break_2',
'material_break_3',
'material_break_4',
'opening_length',
'midgame_length',
'endgame_length',
'mean_acwsa',
'mean_acwsa_0',
'mean_acwsa_1',
'mean_acwsa_2',
'mean_acwsa_3',
'mean_acwsa_4',
'mean_acwsa_5',
'mean_acwsa_6',
'mean_acwsa_7',
'mean_acwsa_8',
'mean_acwsa_9',
]
material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
material_df = material_df.set_index(['gamenum'])
material_df = material_df.reindex(list(range(1, NUM_GAMES+1)))
material_df = material_df.fillna(material_df.mean())
msg("Reading ELOscored data")
eloscored_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
'elopath_min',
'elopath_max',
]
eloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)
eloscored_df = eloscored_df.set_index(['gamenum'])
msg("Reading ELOscored data 4")
eloscored4_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]
eloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)
eloscored4_df = eloscored4_df.set_index(['gamenum'])
msg("Reading ELOscored data 10")
eloscored10_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]
eloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)
eloscored10_df = eloscored10_df.set_index(['gamenum'])
do_movemodel=True
if do_movemodel:
msg("Hi! Reading moveaggs")
move_aggs = joblib.load('/data/move_aggs.p')
move_aggs.fillna(move_aggs.mean(), inplace=True)
move_aggs = move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
msg("Hi! Reading wmoveaggs")
wmove_aggs = joblib.load('/data/wmove_aggs.p')
wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)
wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)
wmove_aggs = wmove_aggs['moveelo_weighted']
do_elochunk = False
if do_elochunk:
ch_agg_df = joblib.load('/data/chunk_aggs.p')
ch_agg_df.index = ch_agg_df.index.droplevel('elo')
ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]
msg("Hi! Setting up playergame rows")
if do_elochunk:
elorange_cols = list(ch_agg_df.columns.values)
msg("elorange cols are %s" % elorange_cols)
msg('Preparing ELO df')
elo_rows = [[x[0][0], x[0][1], x[1]] for x in list(elos.items())]
elo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])
elo_df.set_index(['gamenum','side'], inplace=True)
msg('Joining DFs')
supplemental_dfs = [depthstats_df, elo_df, crunched_df]
if do_movemodel:
supplemental_dfs.extend([move_aggs, wmove_aggs])
if do_elochunk:
supplemental_dfs.append(ch_agg_df)
mega_df = concat(supplemental_dfs, axis=1)
if do_material:
mega_df = mega_df.join(material_df, how='outer')
mega_df = mega_df.join(eloscored_df, how='outer')
mega_df = mega_df.join(eloscored4_df, how='outer')
mega_df = mega_df.join(eloscored10_df, how='outer')
if do_gb:
mega_df = mega_df.join(gb_df, how='outer')
yy_df = mega_df
msg("hi, columns are %s" % yy_df.columns)
# TODO confirm that all columns are there
def opening_feature(opening):
if ocount[opening] < 20:
return 'rare'
if ocount[opening] < 200:
return 'uncommon'
return opening
msg("Hi! Computing additional features")
yy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]
yy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]
yy_df['any_grit'] = (yy_df['grit'] > 0)
yy_df['major_grit'] = (yy_df['grit'] > 5)
yy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53
yy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)
yy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)
yy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']
yy_df['noblunders'] = (yy_df['blunderrate'] == 0)
yy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)
yy_df['early_lead'] = yy_df['early_lead'].clip(0,100)
yy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)
yy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)
# prepare opponent_df with selected info about opponent
opponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio', 'pct_sanemoves']
if do_elochunk:
opponent_columns.extend(elorange_cols)
opponent_df = yy_df[opponent_columns]
opponent_df = opponent_df.reset_index()
opponent_df['side'] = opponent_df['side'] * -1
opponent_df.set_index(['gamenum', 'side'], inplace=True)
opponent_df.columns = ['opponent_' + x for x in opponent_df.columns]
yy_df = concat([yy_df, opponent_df], axis=1)
# more derived columns that use opponent comparisons
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)
yy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)
yy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)
yy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)
yy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])
yy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']
msg("Hi! Computing dummy variables")
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features]).astype(np.int8)
yy_df = yy_df.join(dummies)
# fill in missing values
msg("Hi! Filling in missing values")
full_index = pandas.MultiIndex.from_product([list(range(1,NUM_GAMES + 1)), [1,-1]], names=['gamenum', 'side'])
yy_df = yy_df.reindex(full_index)
yy_elo = yy_df['elo'].copy(True)
yy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)
yy_df.fillna(False, inplace=True)
yy_df['elo'] = yy_elo
# stupid patch for some stupid opening feature that got assigned to False by fillna ?!!?!?!?
yy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'
msg("Hi! Writing yy_df to disk")
yy_df.to_pickle(sys.argv[3])
msg("Column counts are:")
counts = yy_df.count(axis=0)
print(counts)
| 35.294355 | 280 | 0.69302 |
from pandas import *
from numpy import *
from djeval import *
import csv, code
import pickle as pickle
from sklearn.externals import joblib
NUM_GAMES=50000
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
msg("Hi! Reading eheaders")
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = eheaders['elos']
result = eheaders['result']
checkmate = eheaders['checkmate']
openings = eheaders['openings']
ocount = eheaders['opening_count']
msg("Hi! Reading crunched movescores from %s" % sys.argv[1])
crunched_path = sys.argv[1]
crunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])
do_gb = False
if do_gb:
msg("Hi! Reading GB scores from %s" % sys.argv[2])
gb_path = sys.argv[2]
gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])
msg("Hi! Reading depthstats")
depthstats_path = '/data/depthstats.csv'
columns = [
'gamenum',
'side',
'mean_depth',
'mean_seldepth',
'mean_depths_agreeing_ratio',
'mean_deepest_agree_ratio',
'pct_sanemoves',
'gamelength',
'mean_num_bestmoves',
'mean_num_bestmove_changes',
'mean_bestmove_depths_agreeing',
'mean_deepest_change',
'mean_deepest_change_ratio',
]
depthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
depthstats_df = depthstats_df.set_index(['gamenum', 'side'])
depthstats_df.drop('gamelength', axis=1, inplace=True)
do_material = True
if do_material:
msg("Hi! Reading material")
material_path = '/data/material.csv'
columns = [
'gamenum',
'material_break_0',
'material_break_1',
'material_break_2',
'material_break_3',
'material_break_4',
'opening_length',
'midgame_length',
'endgame_length',
'mean_acwsa',
'mean_acwsa_0',
'mean_acwsa_1',
'mean_acwsa_2',
'mean_acwsa_3',
'mean_acwsa_4',
'mean_acwsa_5',
'mean_acwsa_6',
'mean_acwsa_7',
'mean_acwsa_8',
'mean_acwsa_9',
]
material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
material_df = material_df.set_index(['gamenum'])
material_df = material_df.reindex(list(range(1, NUM_GAMES+1)))
material_df = material_df.fillna(material_df.mean())
msg("Reading ELOscored data")
eloscored_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
'elopath_min',
'elopath_max',
]
eloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)
eloscored_df = eloscored_df.set_index(['gamenum'])
msg("Reading ELOscored data 4")
eloscored4_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]
eloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)
eloscored4_df = eloscored4_df.set_index(['gamenum'])
msg("Reading ELOscored data 10")
eloscored10_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]
eloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)
eloscored10_df = eloscored10_df.set_index(['gamenum'])
do_movemodel=True
if do_movemodel:
msg("Hi! Reading moveaggs")
move_aggs = joblib.load('/data/move_aggs.p')
move_aggs.fillna(move_aggs.mean(), inplace=True)
move_aggs = move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']]
msg("Hi! Reading wmoveaggs")
wmove_aggs = joblib.load('/data/wmove_aggs.p')
wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)
wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)
wmove_aggs = wmove_aggs['moveelo_weighted']
do_elochunk = False
if do_elochunk:
ch_agg_df = joblib.load('/data/chunk_aggs.p')
ch_agg_df.index = ch_agg_df.index.droplevel('elo')
ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]
msg("Hi! Setting up playergame rows")
if do_elochunk:
elorange_cols = list(ch_agg_df.columns.values)
msg("elorange cols are %s" % elorange_cols)
msg('Preparing ELO df')
elo_rows = [[x[0][0], x[0][1], x[1]] for x in list(elos.items())]
elo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])
elo_df.set_index(['gamenum','side'], inplace=True)
msg('Joining DFs')
supplemental_dfs = [depthstats_df, elo_df, crunched_df]
if do_movemodel:
supplemental_dfs.extend([move_aggs, wmove_aggs])
if do_elochunk:
supplemental_dfs.append(ch_agg_df)
mega_df = concat(supplemental_dfs, axis=1)
if do_material:
mega_df = mega_df.join(material_df, how='outer')
mega_df = mega_df.join(eloscored_df, how='outer')
mega_df = mega_df.join(eloscored4_df, how='outer')
mega_df = mega_df.join(eloscored10_df, how='outer')
if do_gb:
mega_df = mega_df.join(gb_df, how='outer')
yy_df = mega_df
msg("hi, columns are %s" % yy_df.columns)
def opening_feature(opening):
if ocount[opening] < 20:
return 'rare'
if ocount[opening] < 200:
return 'uncommon'
return opening
msg("Hi! Computing additional features")
yy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]
yy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]
yy_df['any_grit'] = (yy_df['grit'] > 0)
yy_df['major_grit'] = (yy_df['grit'] > 5)
yy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53
yy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)
yy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)
yy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']
yy_df['noblunders'] = (yy_df['blunderrate'] == 0)
yy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)
yy_df['early_lead'] = yy_df['early_lead'].clip(0,100)
yy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)
yy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)
opponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio', 'pct_sanemoves']
if do_elochunk:
opponent_columns.extend(elorange_cols)
opponent_df = yy_df[opponent_columns]
opponent_df = opponent_df.reset_index()
opponent_df['side'] = opponent_df['side'] * -1
opponent_df.set_index(['gamenum', 'side'], inplace=True)
opponent_df.columns = ['opponent_' + x for x in opponent_df.columns]
yy_df = concat([yy_df, opponent_df], axis=1)
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)
yy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)
yy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)
yy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)
yy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])
yy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']
msg("Hi! Computing dummy variables")
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features]).astype(np.int8)
yy_df = yy_df.join(dummies)
msg("Hi! Filling in missing values")
full_index = pandas.MultiIndex.from_product([list(range(1,NUM_GAMES + 1)), [1,-1]], names=['gamenum', 'side'])
yy_df = yy_df.reindex(full_index)
yy_elo = yy_df['elo'].copy(True)
yy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)
yy_df.fillna(False, inplace=True)
yy_df['elo'] = yy_elo
yy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'
msg("Hi! Writing yy_df to disk")
yy_df.to_pickle(sys.argv[3])
msg("Column counts are:")
counts = yy_df.count(axis=0)
print(counts)
| true | true |
f71467e65dae3f982a9af5237ac320ca8270123d | 9,283 | py | Python | src/transformers/models/mctct/configuration_mctct.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/mctct/configuration_mctct.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | src/transformers/models/mctct/configuration_mctct.py | shangz-ai/transformers | 75259b44bf2e2b98b5a4d431fb400b7190342a01 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""M-CTC-T model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class MCTCTConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MCTCTModel`]. It is used to instantiate an
M-CTC-T model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the M-CTC-T
[speechbrain/m-ctc-t-large](https://huggingface.co/speechbrain/m-ctc-t-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 8065):
Vocabulary size of the M-CTC-T model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MCTCTModel`].
hidden_size (`int`, *optional*, defaults to 1536):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 6144):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 4):
Number of attention heads for each attention layer in the Transformer encoder.
attention_head_dim (`int`, *optional*, defaults to 384):
Dimensions of each attention head for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 920):
The maximum sequence length that this model might ever be used with (after log-mel spectrogram extraction).
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
layerdrop (`float`, *optional*, defaults to 0.3):
The probability of dropping an encoder layer during training. The default 0.3 value is used in the original
implementation.
hidden_act (`str` or `function`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
pad_token_id (`int`, *optional*, defaults to 1):
The tokenizer index of the pad token.
bos_token_id (`int`, *optional*, defaults to 0):
The tokenizer index of the bos token.
eos_token_id (`int`, *optional*, defaults to 2):
The tokenizer index of the eos token.
conv_glu_dim (`int`, *optional*, defaults to 1):
The dimension of the output of the `Conv1dSubsampler` layer in which GLU is applied on. Though the original
Flashlight code uses the value of 2, here it's adapted to 1 due to transposition differences.
conv_dropout (`int`, *optional*, defaults to 0.3):
The probability of randomly dropping the `Conv1dSubsampler` layer during training.
num_conv_layers (`int`, *optional*, defaults to 1):
Number of convolution layers before applying transformer encoder layers.
conv_kernel (`List[int]`, *optional*, defaults to `[7]`):
The kernel size of the 1D convolution applied before transformer layers. `len(conv_kernel)` must be equal
to `num_conv_layers`.
conv_stride (`List[int]`, *optional*, defaults to `[3]`):
The stride length of the 1D convolution applied before transformer layers. `len(conv_stride)` must be equal
to `num_conv_layers`.
input_feat_per_channel (`int`, *optional*, defaults to 80):
Feature dimensions of the channels of the input to the Conv1D layer.
input_channels (`int`, *optional*, defaults to 1):
Number of input channels of the input to the Conv1D layer.
conv_channels (`List[int]`, *optional*, defaults to None):
Channel sizes of intermediate Conv1D layers.
ctc_loss_reduction (`str`, *optional*, defaults to `"sum"`):
Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
instance of [`MCTCTForCTC`].
ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
of [`MCTCTForCTC`].
Example:
```python
>>> from transformers import MCTCTModel, MCTCTConfig
>>> # Initializing a M-CTC-T mctct-large style configuration
>>> configuration = MCTCTConfig()
>>> # Initializing a model from the mctct-large style configuration
>>> model = MCTCTModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "mctct"
def __init__(
self,
vocab_size=8065,
hidden_size=1536,
num_hidden_layers=36,
intermediate_size=6144,
num_attention_heads=4,
attention_head_dim=384,
max_position_embeddings=920,
layer_norm_eps=1e-5,
layerdrop=0.3,
hidden_act="relu",
initializer_range=0.02,
hidden_dropout_prob=0.3,
attention_probs_dropout_prob=0.3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
conv_glu_dim=1,
conv_dropout=0.3,
num_conv_layers=1,
conv_kernel=(7,),
conv_stride=(3,),
input_feat_per_channel=80,
input_channels=1,
conv_channels=None,
ctc_loss_reduction="sum",
ctc_zero_infinity=False,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
# prevents config testing fail with exporting to json
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`."
)
| 49.908602 | 119 | 0.679845 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
}
class MCTCTConfig(PretrainedConfig):
model_type = "mctct"
def __init__(
self,
vocab_size=8065,
hidden_size=1536,
num_hidden_layers=36,
intermediate_size=6144,
num_attention_heads=4,
attention_head_dim=384,
max_position_embeddings=920,
layer_norm_eps=1e-5,
layerdrop=0.3,
hidden_act="relu",
initializer_range=0.02,
hidden_dropout_prob=0.3,
attention_probs_dropout_prob=0.3,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
conv_glu_dim=1,
conv_dropout=0.3,
num_conv_layers=1,
conv_kernel=(7,),
conv_stride=(3,),
input_feat_per_channel=80,
input_channels=1,
conv_channels=None,
ctc_loss_reduction="sum",
ctc_zero_infinity=False,
**kwargs
):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.layerdrop = layerdrop
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.conv_glu_dim = conv_glu_dim
self.conv_dropout = conv_dropout
self.num_conv_layers = num_conv_layers
self.input_feat_per_channel = input_feat_per_channel
self.input_channels = input_channels
self.conv_channels = conv_channels
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.conv_kernel = list(conv_kernel)
self.conv_stride = list(conv_stride)
if len(self.conv_kernel) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
f"but is `len(config.conv_kernel) = {len(self.conv_kernel)}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`."
)
| true | true |
f71468717fc5e61acbe354ad1694025b5b1bf250 | 1,649 | py | Python | .venv/lib/python3.8/site-packages/opencensus/stats/measurement.py | MarkusMeyer13/graph-teams-presence | c302b79248f31623a1b209e098afc4f85d96228d | [
"MIT"
] | null | null | null | .venv/lib/python3.8/site-packages/opencensus/stats/measurement.py | MarkusMeyer13/graph-teams-presence | c302b79248f31623a1b209e098afc4f85d96228d | [
"MIT"
] | 1 | 2021-07-28T09:45:24.000Z | 2021-07-28T09:45:24.000Z | .venv/lib/python3.8/site-packages/opencensus/stats/measurement.py | MarkusMeyer13/graph-teams-presence | c302b79248f31623a1b209e098afc4f85d96228d | [
"MIT"
] | null | null | null | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Measurement(object):
""" A measurement is an object with a measure and a value attached to it
:type measure: :class: '~opencensus.stats.measure.Measure'
:param measure: A measure to pass into the measurement
:type value: int or float
:param value: value of the measurement
"""
def __init__(self, measure, value):
self._measure = measure
self._value = value
@property
def value(self):
"""The value of the current measurement"""
return self._value
@property
def measure(self):
"""The measure of the current measurement"""
return self._measure
class MeasurementInt(Measurement):
""" Creates a new Integer Measurement """
def __init__(self, measure, value):
super(MeasurementInt, self).__init__(measure, value)
class MeasurementFloat(Measurement):
""" Creates a new Float Measurement """
def __init__(self, measure, value):
super(MeasurementFloat, self).__init__(measure, value)
| 32.333333 | 77 | 0.681019 |
class Measurement(object):
def __init__(self, measure, value):
self._measure = measure
self._value = value
@property
def value(self):
return self._value
@property
def measure(self):
return self._measure
class MeasurementInt(Measurement):
def __init__(self, measure, value):
super(MeasurementInt, self).__init__(measure, value)
class MeasurementFloat(Measurement):
def __init__(self, measure, value):
super(MeasurementFloat, self).__init__(measure, value)
| true | true |
f71468baccb7f26415744498bbf3284f96465119 | 26,158 | py | Python | tests/system/test_integration.py | jhonnysanchezillisaca/apm-server | eeae18ef1551769bd03998e6798aadc94dda0a3d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/system/test_integration.py | jhonnysanchezillisaca/apm-server | eeae18ef1551769bd03998e6798aadc94dda0a3d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/system/test_integration.py | jhonnysanchezillisaca/apm-server | eeae18ef1551769bd03998e6798aadc94dda0a3d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import os
import unittest
from apmserver import ElasticTest, ExpvarBaseTest
from apmserver import ClientSideElasticTest, SmapIndexBaseTest, SmapCacheBaseTest
from apmserver import SplitIndicesTest
from beat.beat import INTEGRATION_TESTS
import json
import time
class Test(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_onboarding_doc(self):
"""
This test starts the beat and checks that the onboarding doc has been published to ES
"""
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
self.wait_until(
lambda: (self.es.count(index=self.index_name)['count'] == 1)
)
# Makes sure no error or warnings were logged
self.assert_no_logged_warnings()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
"""
This test starts the beat and checks that the template has been loaded to ES
"""
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
templates = self.es.indices.get_template(self.index_name)
assert len(templates) == 1
t = templates[self.index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_transaction(self):
"""
This test starts the beat with a loaded template and sends transaction data to elasticsearch.
It verifies that all data make it into ES, means data is compatible with the template
and data are in expected format.
"""
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
# compare existing ES documents for transactions with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'transaction.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'transaction')
# compare existing ES documents for spans with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "span"}}})
assert rs['hits']['total'] == 5, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'spans.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'span')
self.check_backend_transaction_sourcemap(count=5)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_mark_navigation_timing(self):
self.load_docs_with_template(self.get_transaction_payload_path(), self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="transaction.marks.*")
found_other = False
for name, metric in mappings[self.index_name]["mappings"]["doc"].items():
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if name.startswith("transaction.marks.navigationTiming."):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
# only navigation timing marks are scaled floats for now
assert mtype != "scaled_float", name + " mapped as scaled_float"
found_other = True
assert found_other, "no non-scaled_float marks found"
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_error(self):
"""
This test starts the beat with a loaded template and sends error data to elasticsearch.
It verifies that all data make it into ES means data is compatible with the template.
"""
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url, 'error', 4)
self.assert_no_logged_warnings()
# compare existing ES documents for errors with new ones
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'error.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'error')
self.check_backend_error_sourcemap(count=4)
def check_docs(self, approved, received, doc_type):
for rec_entry in received:
checked = False
rec = rec_entry['_source']
rec_id = rec[doc_type]['id']
for appr_entry in approved:
appr = appr_entry['_source']
if rec_id == appr[doc_type]['id']:
checked = True
self.assert_docs(rec[doc_type], appr[doc_type])
self.assert_docs(rec['context'], appr['context'])
self.assert_docs(rec['@timestamp'], appr['@timestamp'])
self.assert_docs(rec['processor'], appr['processor'])
assert checked == True, "New entry with id {}".format(rec_id)
def assert_docs(self, received, approved):
assert approved == received, "expected:\n{}\nreceived:\n{}".format(self.dump(approved), self.dump(received))
def dump(self, data):
return json.dumps(data, indent=4, separators=(',', ': '))
class RumEnabledIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
self.load_docs_with_template(self.get_error_payload_path(name="payload.json"),
'http://localhost:8200/v1/errors',
'error',
4)
self.check_library_frames({"true": 1, "false": 1, "empty": 2}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 1, "empty": 0}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions',
'transaction',
9)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 1, "empty": 0}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_backend_event(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions', 'transaction', 9)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert "ip" in rs['hits']['hits'][0]["_source"]["context"]["system"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_rum_event(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
hits = rs['hits']['hits']
for hit in hits:
assert "ip" in hit["_source"]["context"]["user"], rs['hits']
assert "user-agent" in hit["_source"]["context"]["user"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_grouping_key_for_error(self):
# upload the same error, once via rum, once via backend endpoint
# check they don't have the same grouping key, as the
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
2)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, event):
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": event}}})
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
if "exception" in err:
self.count_library_frames(err["exception"], l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
def count_library_frames(self, doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if frame.has_key("library_frame"):
k = "true" if frame["library_frame"] == True else "false"
lf[k] += 1
else:
lf["empty"] += 1
class SplitIndicesIntegrationTest(SplitIndicesTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_split_docs_into_separate_indices(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
4,
query_index="test-apm*")
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
9,
query_index="test-apm*")
# check that every document is indexed once (incl.1 onboarding doc)
assert 14 == self.es.count(index="test-apm*")['count']
# check that documents are split into separate indices
ct = self.es.count(
index="test-apm-error-12-12-2017",
body={"query": {"term": {"processor.event": "error"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-transaction-12-12-2017",
body={"query": {"term": {"processor.event": "transaction"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-span-12-12-2017",
body={"query": {"term": {"processor.event": "span"}}}
)['count']
assert 5 == ct
class SourcemappingIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.assert_no_logged_warnings()
self.check_backend_error_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_duplicated_sourcemap_warning(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps()
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(2)
assert self.log_contains(
"Overriding sourcemap"), "A log should be written when a sourcemap is overwritten"
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(3)
assert self.log_contains(
"Multiple sourcemaps found"), "the 3rd fetch should query ES and find that there are 2 sourcemaps with the same caching key"
self.assert_no_logged_warnings(
["WARN.*Overriding sourcemap", "WARN.*Multiple sourcemaps"])
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
'http://localhost:8200/v1/transactions',
'transaction',
2)
self.assert_no_logged_warnings()
self.check_backend_transaction_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.assert_no_logged_warnings()
self.check_rum_transaction_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_sourcemap(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_matching_sourcemap(self):
r = self.upload_sourcemap('bundle_no_mapping.js.map')
self.assert_no_logged_warnings()
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.test_no_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_fetch_latest_of_multiple_sourcemaps(self):
# upload sourcemap file that finds no matchings
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle_no_mapping.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap found for")
# remove existing document
self.es.delete_by_query(index=self.index_name,
body={"query": {"term": {"processor.name": 'error'}}})
self.wait_until(
lambda: (self.es.count(index=self.index_name, body={
"query": {"term": {"processor.name": 'error'}}}
)['count'] == 0)
)
# upload second sourcemap file with same key,
# that actually leads to proper matchings
# this also tests that the cache gets invalidated,
# as otherwise the former sourcemap would be taken from the cache.
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps(expected_ct=2)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(True, count=1)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_mapping_cache_usage(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# insert document,
# fetching sourcemap without errors, so it must be fetched from cache
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingIntegrationChangedConfigTest(SmapIndexBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error_changed_index(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingCacheIntegrationTest(SmapCacheBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_cache_expiration(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# after cache expiration no sourcemap should be found any more
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar disabled, should 404"""
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class MetricsIntegrationTest(ElasticTest):
def all_metrics_docs(self):
return self.es.search(index=self.index_name,
body={"query": {"term": {"processor.event": "metric"}}})
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_metric_doc(self):
self.load_docs_with_template(self.get_metricset_payload_path(), self.metrics_url, 'metric', 1)
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="system.process.cpu.total.norm.pct")
expected_type = "scaled_float"
actual_type = mappings[self.index_name]["mappings"]["doc"]["system.process.cpu.total.norm.pct"]["mapping"]["pct"]["type"]
assert expected_type == actual_type, "want: {}, got: {}".format(expected_type, actual_type)
class PipelineRegisterTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "true"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_default_pipeline_registered(self):
pipeline_id = "apm_user_agent"
default_desc = "Add user agent information for APM events"
loaded_msg = "Pipeline successfully registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
pipeline = self.es.ingest.get_pipeline(id=pipeline_id)
assert pipeline[pipeline_id]['description'] == default_desc
class PipelineDisableOverwriteTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "false"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_overwritten(self):
loaded_msg = "Pipeline already registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
class PipelineDisableTest(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_registered(self):
loaded_msg = "No pipeline callback registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
| 45.571429 | 136 | 0.601155 | import os
import unittest
from apmserver import ElasticTest, ExpvarBaseTest
from apmserver import ClientSideElasticTest, SmapIndexBaseTest, SmapCacheBaseTest
from apmserver import SplitIndicesTest
from beat.beat import INTEGRATION_TESTS
import json
import time
class Test(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_onboarding_doc(self):
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
self.wait_until(
lambda: (self.es.count(index=self.index_name)['count'] == 1)
)
self.assert_no_logged_warnings()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_template(self):
self.wait_until(lambda: self.es.indices.exists(self.index_name))
self.es.indices.refresh(index=self.index_name)
templates = self.es.indices.get_template(self.index_name)
assert len(templates) == 1
t = templates[self.index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'transaction.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'transaction')
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "span"}}})
assert rs['hits']['total'] == 5, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'spans.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'span')
self.check_backend_transaction_sourcemap(count=5)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_mark_navigation_timing(self):
self.load_docs_with_template(self.get_transaction_payload_path(), self.transactions_url, 'transaction', 9)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="transaction.marks.*")
found_other = False
for name, metric in mappings[self.index_name]["mappings"]["doc"].items():
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if name.startswith("transaction.marks.navigationTiming."):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
assert mtype != "scaled_float", name + " mapped as scaled_float"
found_other = True
assert found_other, "no non-scaled_float marks found"
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_load_docs_with_template_and_add_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url, 'error', 4)
self.assert_no_logged_warnings()
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
assert rs['hits']['total'] == 4, "found {} documents".format(rs['count'])
with open(self._beat_path_join(os.path.dirname(__file__), 'error.approved.json')) as f:
approved = json.load(f)
self.check_docs(approved, rs['hits']['hits'], 'error')
self.check_backend_error_sourcemap(count=4)
def check_docs(self, approved, received, doc_type):
for rec_entry in received:
checked = False
rec = rec_entry['_source']
rec_id = rec[doc_type]['id']
for appr_entry in approved:
appr = appr_entry['_source']
if rec_id == appr[doc_type]['id']:
checked = True
self.assert_docs(rec[doc_type], appr[doc_type])
self.assert_docs(rec['context'], appr['context'])
self.assert_docs(rec['@timestamp'], appr['@timestamp'])
self.assert_docs(rec['processor'], appr['processor'])
assert checked == True, "New entry with id {}".format(rec_id)
def assert_docs(self, received, approved):
assert approved == received, "expected:\n{}\nreceived:\n{}".format(self.dump(approved), self.dump(received))
def dump(self, data):
return json.dumps(data, indent=4, separators=(',', ': '))
class RumEnabledIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
self.load_docs_with_template(self.get_error_payload_path(name="payload.json"),
'http://localhost:8200/v1/errors',
'error',
4)
self.check_library_frames({"true": 1, "false": 1, "empty": 2}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 1, "empty": 0}, "error")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions',
'transaction',
9)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 1, "empty": 0}, "span")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_backend_event(self):
self.load_docs_with_template(self.get_transaction_payload_path(name="payload.json"),
'http://localhost:8200/v1/transactions', 'transaction', 9)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "transaction"}}})
assert "ip" in rs['hits']['hits'][0]["_source"]["context"]["system"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_enrich_rum_event(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
hits = rs['hits']['hits']
for hit in hits:
assert "ip" in hit["_source"]["context"]["user"], rs['hits']
assert "user-agent" in hit["_source"]["context"]["user"], rs['hits']
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_grouping_key_for_error(self):
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
2)
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": "error"}}})
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, event):
rs = self.es.search(index=self.index_name, body={
"query": {"term": {"processor.event": event}}})
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
if "exception" in err:
self.count_library_frames(err["exception"], l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
def count_library_frames(self, doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if frame.has_key("library_frame"):
k = "true" if frame["library_frame"] == True else "false"
lf[k] += 1
else:
lf["empty"] += 1
class SplitIndicesIntegrationTest(SplitIndicesTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_split_docs_into_separate_indices(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
4,
query_index="test-apm*")
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
9,
query_index="test-apm*")
# check that every document is indexed once (incl.1 onboarding doc)
assert 14 == self.es.count(index="test-apm*")['count']
# check that documents are split into separate indices
ct = self.es.count(
index="test-apm-error-12-12-2017",
body={"query": {"term": {"processor.event": "error"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-transaction-12-12-2017",
body={"query": {"term": {"processor.event": "transaction"}}}
)['count']
assert 4 == ct
ct = self.es.count(
index="test-apm-span-12-12-2017",
body={"query": {"term": {"processor.event": "span"}}}
)['count']
assert 5 == ct
class SourcemappingIntegrationTest(ClientSideElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_error(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
'http://localhost:8200/v1/errors',
'error',
1)
self.assert_no_logged_warnings()
self.check_backend_error_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_duplicated_sourcemap_warning(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps()
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(2)
assert self.log_contains(
"Overriding sourcemap"), "A log should be written when a sourcemap is overwritten"
self.upload_sourcemap(file_name='bundle.js.map', bundle_filepath=path)
self.wait_for_sourcemaps(3)
assert self.log_contains(
"Multiple sourcemaps found"), "the 3rd fetch should query ES and find that there are 2 sourcemaps with the same caching key"
self.assert_no_logged_warnings(
["WARN.*Overriding sourcemap", "WARN.*Multiple sourcemaps"])
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_backend_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
'http://localhost:8200/v1/transactions',
'transaction',
2)
self.assert_no_logged_warnings()
self.check_backend_transaction_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_transaction(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(file_name='bundle.js.map',
bundle_filepath=path,
service_version='1.0.0')
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_transaction_payload_path(),
self.transactions_url,
'transaction',
2)
self.assert_no_logged_warnings()
self.check_rum_transaction_sourcemap(True)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_sourcemap(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_no_matching_sourcemap(self):
r = self.upload_sourcemap('bundle_no_mapping.js.map')
self.assert_no_logged_warnings()
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.test_no_sourcemap()
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_fetch_latest_of_multiple_sourcemaps(self):
# upload sourcemap file that finds no matchings
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle_no_mapping.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap found for")
# remove existing document
self.es.delete_by_query(index=self.index_name,
body={"query": {"term": {"processor.name": 'error'}}})
self.wait_until(
lambda: (self.es.count(index=self.index_name, body={
"query": {"term": {"processor.name": 'error'}}}
)['count'] == 0)
)
# upload second sourcemap file with same key,
# that actually leads to proper matchings
# this also tests that the cache gets invalidated,
# as otherwise the former sourcemap would be taken from the cache.
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps(expected_ct=2)
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(True, count=1)
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_mapping_cache_usage(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# insert document,
# fetching sourcemap without errors, so it must be fetched from cache
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingIntegrationChangedConfigTest(SmapIndexBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_rum_error_changed_index(self):
# use an uncleaned path to test that path is cleaned in upload
path = 'http://localhost:8000/test/e2e/../e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
self.check_rum_error_sourcemap(True)
class SourcemappingCacheIntegrationTest(SmapCacheBaseTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_sourcemap_cache_expiration(self):
path = 'http://localhost:8000/test/e2e/general-usecase/bundle.js.map'
r = self.upload_sourcemap(
file_name='bundle.js.map', bundle_filepath=path)
assert r.status_code == 202, r.status_code
self.wait_for_sourcemaps()
# insert document, which also leads to caching the sourcemap
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.assert_no_logged_warnings()
# delete sourcemap from ES
# fetching from ES would lead to an error afterwards
self.es.indices.delete(index=self.index_name, ignore=[400, 404])
self.wait_until(lambda: not self.es.indices.exists(self.index_name))
# after cache expiration no sourcemap should be found any more
self.load_docs_with_template(self.get_error_payload_path(),
self.errors_url,
'error',
1)
self.check_rum_error_sourcemap(
False, expected_err="No Sourcemap available for")
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
class MetricsIntegrationTest(ElasticTest):
def all_metrics_docs(self):
return self.es.search(index=self.index_name,
body={"query": {"term": {"processor.event": "metric"}}})
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_metric_doc(self):
self.load_docs_with_template(self.get_metricset_payload_path(), self.metrics_url, 'metric', 1)
mappings = self.es.indices.get_field_mapping(index=self.index_name, fields="system.process.cpu.total.norm.pct")
expected_type = "scaled_float"
actual_type = mappings[self.index_name]["mappings"]["doc"]["system.process.cpu.total.norm.pct"]["mapping"]["pct"]["type"]
assert expected_type == actual_type, "want: {}, got: {}".format(expected_type, actual_type)
class PipelineRegisterTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "true"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_default_pipeline_registered(self):
pipeline_id = "apm_user_agent"
default_desc = "Add user agent information for APM events"
loaded_msg = "Pipeline successfully registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
pipeline = self.es.ingest.get_pipeline(id=pipeline_id)
assert pipeline[pipeline_id]['description'] == default_desc
class PipelineDisableOverwriteTest(ElasticTest):
config_overrides = {
"register_pipeline_enabled": "true",
"register_pipeline_overwrite": "false"
}
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_overwritten(self):
loaded_msg = "Pipeline already registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
class PipelineDisableTest(ElasticTest):
@unittest.skipUnless(INTEGRATION_TESTS, "integration test")
def test_pipeline_not_registered(self):
loaded_msg = "No pipeline callback registered"
self.wait_until(lambda: self.log_contains(loaded_msg),
max_timeout=5)
| true | true |
f71469034f1006ce1d48aedf70acee846deb393a | 6,692 | py | Python | pyelastic/pendulum.py | tacox5/elastic_pendulum | c2058444ca161a420466b531b008fe247a87db60 | [
"BSD-2-Clause"
] | null | null | null | pyelastic/pendulum.py | tacox5/elastic_pendulum | c2058444ca161a420466b531b008fe247a87db60 | [
"BSD-2-Clause"
] | 8 | 2021-06-11T15:26:47.000Z | 2021-07-29T23:52:01.000Z | pyelastic/pendulum.py | tyler-a-cox/elastic-pendulum | c2058444ca161a420466b531b008fe247a87db60 | [
"BSD-2-Clause"
] | null | null | null | import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
from .settings import *
class ElasticPendulum:
"""Class that handles the simulation of springy, double pendulums. This class
handles a number of initial conditions from starting angle to pendulum mass"""
def __init__(self, **kwargs):
"""Animate
Args:
alpha_0 : float
Inital angle of the top pendulum in radians
beta_0 : float
Inital angle of the bottom pendulum in radians
alpha_1 : float, default=True
Inital angular velocity of the top pendulum in radians
beta_1 : float, default=True
Inital angular velocity of the top pendulum in radians
k1 : boolean, default=True
Spring constant of the top pendulum in arbitrary units
k2 : boolean, default=True
Spring constant of the top pendulum in arbitrary units
l1 : boolean, default=True
Length of the top pendulum in arbitrary units
l2 : boolean, default=True
Length of the bottom pendulum in arbitrary units
m1 : float, default=1.0
Mass of the top pendulum in arbitrary units
m2 : float, default=1.0
Mass of the bottom pendulum in arbitrary units
a0 : boolean, default=True
b0 : boolean, default=True
a1 : boolean, default=True
b1 : boolean, default=True
t_end : float, default=2
Length of the simulation in seconds
fps : int, default=24
Frame rate of the video simulation. Sets the resolution of the integrator
and helps to visualize the results later
"""
prop_defaults = {
"alpha_0": np.random.uniform(-np.pi, np.pi),
"beta_0": np.random.uniform(-np.pi, np.pi),
"alpha_1": 0.0,
"beta_1": 0.0,
"k1": np.random.uniform(35, 55),
"k2": np.random.uniform(35, 55),
"l1": 1.0,
"l2": 1.0,
"m1": 1.0,
"m2": 1.0,
"a0": 1.0,
"b0": 1.0,
"a1": 1.0,
"b1": 1.0,
"t_end": 2,
"fps": 24,
"g": GRAVITY,
}
for (prop, default) in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
self.dt = 1.0 / self.fps
self.t_eval = np.arange(0, self.t_end, self.dt)
def _spherical_to_cartesian(self, array, interpolate=True):
"""Transforms from 2D spherical coordinate system to a cartesian coordinate system
Args:
array : np.ndarray
Output array from integration function in spherical coordinates
interpolate : boolean, default=True
Returns:
None
"""
x1 = array[:, 2] * np.sin(array[:, 0])
x2 = x1 + array[:, 3] * np.sin(array[:, 1])
y1 = -array[:, 2] * np.cos(array[:, 0])
y2 = y1 - array[:, 3] * np.cos(array[:, 1])
if interpolate:
self.fx1 = interp1d(np.arange(0, x1.shape[0]), x1)
self.fy1 = interp1d(np.arange(0, x1.shape[0]), y1)
self.fx2 = interp1d(np.arange(0, x1.shape[0]), x2)
self.fy2 = interp1d(np.arange(0, x1.shape[0]), y2)
return x1, x2, y1, y2
def _alpha_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, _ = Y
return -(
self.g * self.m1 * np.sin(alpha_0)
- self.k2 * self.l2 * np.sin(alpha_0 - beta_0)
+ self.k2 * b0 * np.sin(alpha_0 - beta_0)
+ 2 * self.m1 * a1 * alpha_1
) / (self.m1 * a0)
def _beta_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
-self.k1 * self.l1 * np.sin(alpha_0 - beta_0)
+ self.k1 * a0 * np.sin(alpha_0 - beta_0)
- 2.0 * self.m1 * b1 * beta_1
) / (self.m1 * b0)
def _a_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k1 * self.l1
+ self.g * self.m1 * np.cos(alpha_0)
- self.k2 * self.l2 * np.cos(alpha_0 - beta_0)
+ self.k2 * b0 * np.cos(alpha_0 - beta_0)
+ a0 * (-self.k1 + self.m1 * alpha_1 ** 2)
) / self.m1
def _b_pp(self, t, Y):
""" """
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k2 * self.l2 * self.m1
+ self.k2 * self.l2 * self.m2 * np.cos(alpha_0 - beta_0)
+ self.k1 * self.m2 * a0 * np.cos(alpha_0 - beta_0)
- b0 * (self.k2 * (self.m1 + self.m2) - self.m1 * self.m2 * beta_1 ** 2)
) / (self.m1 * self.m2)
def _lagrangian(self, t, Y):
"""Set of differential equations to integrate to solve the equations of motion
for the pendulum masses. Incorporates
Args:
t : np.ndarray
Evaluation time array
Y : np.ndarray
Initial conditions of the pendulum masses
Returns:
list :
Evaluation of the differential equations
"""
return [
Y[1],
self._alpha_pp(t, Y),
Y[3],
self._beta_pp(t, Y),
Y[5],
self._a_pp(t, Y),
Y[7],
self._b_pp(t, Y),
]
def integrate(self, method="LSODA", interpolate=True):
"""Main
Args:
method : str, default=LSODA
Integrator type to integrate the set of differential equations. Options
are: RK45, RK23, DOP853, Radu, BDF, and LSODA. For more information, see
scipy.integrate.solve_ivp documentation
interpolate : boolean, default=True
Whether to interpolate the final results. Useful for animation
Returns:
None
"""
Y0 = [
self.alpha_0,
self.alpha_1,
self.beta_0,
self.beta_1,
self.a0,
self.a1,
self.b0,
self.b1,
]
self.solution = solve_ivp(
self._lagrangian, [0, self.t_end], Y0, t_eval=self.t_eval, method=method
)
self.x1, self.x2, self.y1, self.y2 = self._spherical_to_cartesian(
self.solution.y[[0, 2, 4, 6]].T, interpolate=interpolate
)
| 34.494845 | 90 | 0.51569 | import os
import glob
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from scipy.interpolate import interp1d
from .settings import *
class ElasticPendulum:
def __init__(self, **kwargs):
prop_defaults = {
"alpha_0": np.random.uniform(-np.pi, np.pi),
"beta_0": np.random.uniform(-np.pi, np.pi),
"alpha_1": 0.0,
"beta_1": 0.0,
"k1": np.random.uniform(35, 55),
"k2": np.random.uniform(35, 55),
"l1": 1.0,
"l2": 1.0,
"m1": 1.0,
"m2": 1.0,
"a0": 1.0,
"b0": 1.0,
"a1": 1.0,
"b1": 1.0,
"t_end": 2,
"fps": 24,
"g": GRAVITY,
}
for (prop, default) in prop_defaults.items():
setattr(self, prop, kwargs.get(prop, default))
self.dt = 1.0 / self.fps
self.t_eval = np.arange(0, self.t_end, self.dt)
def _spherical_to_cartesian(self, array, interpolate=True):
x1 = array[:, 2] * np.sin(array[:, 0])
x2 = x1 + array[:, 3] * np.sin(array[:, 1])
y1 = -array[:, 2] * np.cos(array[:, 0])
y2 = y1 - array[:, 3] * np.cos(array[:, 1])
if interpolate:
self.fx1 = interp1d(np.arange(0, x1.shape[0]), x1)
self.fy1 = interp1d(np.arange(0, x1.shape[0]), y1)
self.fx2 = interp1d(np.arange(0, x1.shape[0]), x2)
self.fy2 = interp1d(np.arange(0, x1.shape[0]), y2)
return x1, x2, y1, y2
def _alpha_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, _ = Y
return -(
self.g * self.m1 * np.sin(alpha_0)
- self.k2 * self.l2 * np.sin(alpha_0 - beta_0)
+ self.k2 * b0 * np.sin(alpha_0 - beta_0)
+ 2 * self.m1 * a1 * alpha_1
) / (self.m1 * a0)
def _beta_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
-self.k1 * self.l1 * np.sin(alpha_0 - beta_0)
+ self.k1 * a0 * np.sin(alpha_0 - beta_0)
- 2.0 * self.m1 * b1 * beta_1
) / (self.m1 * b0)
def _a_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k1 * self.l1
+ self.g * self.m1 * np.cos(alpha_0)
- self.k2 * self.l2 * np.cos(alpha_0 - beta_0)
+ self.k2 * b0 * np.cos(alpha_0 - beta_0)
+ a0 * (-self.k1 + self.m1 * alpha_1 ** 2)
) / self.m1
def _b_pp(self, t, Y):
alpha_0, alpha_1, beta_0, beta_1, a0, a1, b0, b1 = Y
return (
self.k2 * self.l2 * self.m1
+ self.k2 * self.l2 * self.m2 * np.cos(alpha_0 - beta_0)
+ self.k1 * self.m2 * a0 * np.cos(alpha_0 - beta_0)
- b0 * (self.k2 * (self.m1 + self.m2) - self.m1 * self.m2 * beta_1 ** 2)
) / (self.m1 * self.m2)
def _lagrangian(self, t, Y):
return [
Y[1],
self._alpha_pp(t, Y),
Y[3],
self._beta_pp(t, Y),
Y[5],
self._a_pp(t, Y),
Y[7],
self._b_pp(t, Y),
]
def integrate(self, method="LSODA", interpolate=True):
Y0 = [
self.alpha_0,
self.alpha_1,
self.beta_0,
self.beta_1,
self.a0,
self.a1,
self.b0,
self.b1,
]
self.solution = solve_ivp(
self._lagrangian, [0, self.t_end], Y0, t_eval=self.t_eval, method=method
)
self.x1, self.x2, self.y1, self.y2 = self._spherical_to_cartesian(
self.solution.y[[0, 2, 4, 6]].T, interpolate=interpolate
)
| true | true |
f71469867882f63d4249d507eeda5f87798b2b79 | 535 | py | Python | restful_test.py | Corey0606/FlaskSite | 3c547b6e69a955d281451f18a9db8dde65013bd3 | [
"CC-BY-3.0"
] | null | null | null | restful_test.py | Corey0606/FlaskSite | 3c547b6e69a955d281451f18a9db8dde65013bd3 | [
"CC-BY-3.0"
] | null | null | null | restful_test.py | Corey0606/FlaskSite | 3c547b6e69a955d281451f18a9db8dde65013bd3 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@Time : 2020/12/11 11:57
@Author : Corey
"""
from flask import Flask, request
from flask_restful import Api, Resource, marshal, fields, reqparse
app = Flask(__name__)
# restful接口方法
api = Api(app)
class UserApi(Resource):
def get(self):
return 'get restful api data'
def post(self):
return 'update restful api data'
def delete(self):
return 'delete restful api data '
api.add_resource(UserApi, '/users', endpoint='user')
if __name__ == '__main__':
app.run() | 19.814815 | 66 | 0.648598 |
from flask import Flask, request
from flask_restful import Api, Resource, marshal, fields, reqparse
app = Flask(__name__)
api = Api(app)
class UserApi(Resource):
def get(self):
return 'get restful api data'
def post(self):
return 'update restful api data'
def delete(self):
return 'delete restful api data '
api.add_resource(UserApi, '/users', endpoint='user')
if __name__ == '__main__':
app.run() | true | true |
f71469a4c65abe3c8976410b9d79a0dd097398f2 | 379 | py | Python | dbaas/physical/admin/vip_instance_group.py | amintasvrp/database-as-a-service | 8221df604f9252ddf877cd2216bdf1e3f76220ba | [
"BSD-3-Clause"
] | 303 | 2015-01-08T10:35:54.000Z | 2022-02-28T08:54:06.000Z | dbaas/physical/admin/vip_instance_group.py | amintasvrp/database-as-a-service | 8221df604f9252ddf877cd2216bdf1e3f76220ba | [
"BSD-3-Clause"
] | 124 | 2015-01-14T12:56:15.000Z | 2022-03-22T20:45:11.000Z | dbaas/physical/admin/vip_instance_group.py | amintasvrp/database-as-a-service | 8221df604f9252ddf877cd2216bdf1e3f76220ba | [
"BSD-3-Clause"
] | 110 | 2015-01-02T11:59:48.000Z | 2022-02-28T08:54:06.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class VipInstanceGroupAdmin(admin.ModelAdmin):
search_fields = ("name", "identifier",)
list_display = ("name", "identifier", "vip", )
search_fields = ("name", "identifier", "vip__infra__name")
list_filter = ('vip__infra',)
#save_on_top = True
| 31.583333 | 62 | 0.699208 |
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
class VipInstanceGroupAdmin(admin.ModelAdmin):
search_fields = ("name", "identifier",)
list_display = ("name", "identifier", "vip", )
search_fields = ("name", "identifier", "vip__infra__name")
list_filter = ('vip__infra',)
| true | true |
f71469ad5f5395756c71d0408eb3aba396919d37 | 3,078 | py | Python | denorm/join_defer.py | rivethealth/denorm | c9b9070730e3cc7fbe78927d34db7ffa384aed42 | [
"MIT"
] | 11 | 2021-03-29T14:27:48.000Z | 2022-01-01T00:31:40.000Z | denorm/join_defer.py | rivethealth/denorm | c9b9070730e3cc7fbe78927d34db7ffa384aed42 | [
"MIT"
] | null | null | null | denorm/join_defer.py | rivethealth/denorm | c9b9070730e3cc7fbe78927d34db7ffa384aed42 | [
"MIT"
] | null | null | null | import typing
from pg_sql import SqlId, SqlNumber, SqlObject, SqlString, sql_list
from .format import format
from .join_common import JoinTarget, Key, Structure
from .join_key import KeyConsumer, TargetRefresh
from .sql import SqlTableExpr
from .sql_query import sync_query, upsert_query
from .string import indent
def create_refresh_function(
id: str,
structure: Structure,
refresh: TargetRefresh,
):
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
key_table = structure.key_table()
refresh_sql = refresh.sql(f"TABLE {key_table}", None)
yield f"""
CREATE FUNCTION {refresh_function} () RETURNS trigger
LANGUAGE plpgsql AS $$
BEGIN
-- analyze
ANALYZE {refresh_table};
-- refresh
{indent(str(refresh_sql), 2)}
-- clear refresh
DELETE FROM {refresh_table};
RETURN NULL;
END;
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {refresh_function} IS {SqlString(f'Refresh {id}')}
""".strip()
def create_setup_function(
structure: Structure,
id: str,
key: Key,
target: JoinTarget,
):
key_table = structure.key_table()
refresh_constraint = structure.refresh_constraint()
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
setup_function = structure.setup_function()
yield f"""
CREATE FUNCTION {setup_function} () RETURNS void
LANGUAGE plpgsql AS $$
BEGIN
IF to_regclass({SqlString(str(refresh_table))}) IS NOT NULL THEN
RETURN;
END IF;
CREATE TEMP TABLE {key_table}
ON COMMIT DELETE ROWS
AS {key.definition}
WITH NO DATA;
ALTER TABLE {key_table}
ADD PRIMARY KEY ({sql_list([SqlId(name) for name in key.names])});
CREATE TEMP TABLE {refresh_table} (
) ON COMMIT DELETE ROWS;
CREATE CONSTRAINT TRIGGER {refresh_constraint} AFTER INSERT ON {refresh_table}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE {refresh_function}();
END
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {setup_function} IS {SqlString(f"Set up temp tables for {id}")}
""".strip()
class DeferredKeys(KeyConsumer):
def __init__(self, key: typing.List[str], structure: Structure):
self._key = key
self._structure = structure
def sql(
self,
key_query: str,
table_id: str,
exprs: typing.List[SqlTableExpr] = [],
last_expr: typing.Optional[str] = None,
):
setup_function = self._structure.setup_function()
refresh_table = self._structure.refresh_table()
query = upsert_query(
columns=self._key,
key=self._key,
query=key_query,
target=self._structure.key_table(),
)
for expr in reversed(exprs):
query.prepend(expr)
if last_expr is not None:
query.append(SqlId("_other"), last_expr)
return f"""
PERFORM {setup_function}();
{query};
INSERT INTO {refresh_table}
SELECT
WHERE NOT EXISTS (TABLE {refresh_table});
""".strip()
| 24.428571 | 83 | 0.668291 | import typing
from pg_sql import SqlId, SqlNumber, SqlObject, SqlString, sql_list
from .format import format
from .join_common import JoinTarget, Key, Structure
from .join_key import KeyConsumer, TargetRefresh
from .sql import SqlTableExpr
from .sql_query import sync_query, upsert_query
from .string import indent
def create_refresh_function(
id: str,
structure: Structure,
refresh: TargetRefresh,
):
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
key_table = structure.key_table()
refresh_sql = refresh.sql(f"TABLE {key_table}", None)
yield f"""
CREATE FUNCTION {refresh_function} () RETURNS trigger
LANGUAGE plpgsql AS $$
BEGIN
-- analyze
ANALYZE {refresh_table};
-- refresh
{indent(str(refresh_sql), 2)}
-- clear refresh
DELETE FROM {refresh_table};
RETURN NULL;
END;
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {refresh_function} IS {SqlString(f'Refresh {id}')}
""".strip()
def create_setup_function(
structure: Structure,
id: str,
key: Key,
target: JoinTarget,
):
key_table = structure.key_table()
refresh_constraint = structure.refresh_constraint()
refresh_function = structure.refresh_function()
refresh_table = structure.refresh_table()
setup_function = structure.setup_function()
yield f"""
CREATE FUNCTION {setup_function} () RETURNS void
LANGUAGE plpgsql AS $$
BEGIN
IF to_regclass({SqlString(str(refresh_table))}) IS NOT NULL THEN
RETURN;
END IF;
CREATE TEMP TABLE {key_table}
ON COMMIT DELETE ROWS
AS {key.definition}
WITH NO DATA;
ALTER TABLE {key_table}
ADD PRIMARY KEY ({sql_list([SqlId(name) for name in key.names])});
CREATE TEMP TABLE {refresh_table} (
) ON COMMIT DELETE ROWS;
CREATE CONSTRAINT TRIGGER {refresh_constraint} AFTER INSERT ON {refresh_table}
DEFERRABLE INITIALLY DEFERRED
FOR EACH ROW EXECUTE PROCEDURE {refresh_function}();
END
$$
""".strip()
yield f"""
COMMENT ON FUNCTION {setup_function} IS {SqlString(f"Set up temp tables for {id}")}
""".strip()
class DeferredKeys(KeyConsumer):
def __init__(self, key: typing.List[str], structure: Structure):
self._key = key
self._structure = structure
def sql(
self,
key_query: str,
table_id: str,
exprs: typing.List[SqlTableExpr] = [],
last_expr: typing.Optional[str] = None,
):
setup_function = self._structure.setup_function()
refresh_table = self._structure.refresh_table()
query = upsert_query(
columns=self._key,
key=self._key,
query=key_query,
target=self._structure.key_table(),
)
for expr in reversed(exprs):
query.prepend(expr)
if last_expr is not None:
query.append(SqlId("_other"), last_expr)
return f"""
PERFORM {setup_function}();
{query};
INSERT INTO {refresh_table}
SELECT
WHERE NOT EXISTS (TABLE {refresh_table});
""".strip()
| true | true |
f7146a3b8bd5c67332e642ddcaf6c0846b506b4b | 1,568 | py | Python | harstorage/config/environment.py | beenanner/harstorage | c45e735d9d28cb951e70d0c783d5678996ef31ad | [
"Apache-2.0"
] | null | null | null | harstorage/config/environment.py | beenanner/harstorage | c45e735d9d28cb951e70d0c783d5678996ef31ad | [
"Apache-2.0"
] | null | null | null | harstorage/config/environment.py | beenanner/harstorage | c45e735d9d28cb951e70d0c783d5678996ef31ad | [
"Apache-2.0"
] | null | null | null | import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
import harstorage.lib.app_globals as app_globals
import harstorage.lib.helpers
from harstorage.config.routing import make_map
def load_environment(global_conf, app_conf):
"""Configure the Pylons environment via the ``pylons.config`` object"""
config = PylonsConfig()
# Pylons paths
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, "controllers"),
static_files=os.path.join(root, "public"),
templates=[os.path.join(root, "templates")])
# Initialize config with the basic options
config.init_app(global_conf, app_conf, package="harstorage", paths=paths)
config["routes.map"] = make_map(config)
config["pylons.app_globals"] = app_globals.Globals(config)
config["pylons.h"] = harstorage.lib.helpers
# Setup cache object as early as possible
import pylons
pylons.cache._push_object(config["pylons.app_globals"].cache)
# Create the Mako TemplateLookup, with the default auto-escaping
config["pylons.app_globals"].mako_lookup = TemplateLookup(
directories=paths["templates"],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf["cache_dir"], "templates"),
input_encoding="utf-8",
default_filters=["escape"],
imports=["from webhelpers.html import escape"])
return config
| 34.844444 | 77 | 0.711097 | import os
from mako.lookup import TemplateLookup
from pylons.configuration import PylonsConfig
from pylons.error import handle_mako_error
import harstorage.lib.app_globals as app_globals
import harstorage.lib.helpers
from harstorage.config.routing import make_map
def load_environment(global_conf, app_conf):
config = PylonsConfig()
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, "controllers"),
static_files=os.path.join(root, "public"),
templates=[os.path.join(root, "templates")])
config.init_app(global_conf, app_conf, package="harstorage", paths=paths)
config["routes.map"] = make_map(config)
config["pylons.app_globals"] = app_globals.Globals(config)
config["pylons.h"] = harstorage.lib.helpers
import pylons
pylons.cache._push_object(config["pylons.app_globals"].cache)
config["pylons.app_globals"].mako_lookup = TemplateLookup(
directories=paths["templates"],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf["cache_dir"], "templates"),
input_encoding="utf-8",
default_filters=["escape"],
imports=["from webhelpers.html import escape"])
return config
| true | true |
f7146ac1b88ddd7645aeea7aa509f16d3d4bf02a | 553 | py | Python | code/exampleStrats/forgivingGrimTrigger.py | Texashokies/PrisonersDilemmaTournament | 096c131bc774cc5efd29a3b723f9f73ab5a874be | [
"MIT"
] | null | null | null | code/exampleStrats/forgivingGrimTrigger.py | Texashokies/PrisonersDilemmaTournament | 096c131bc774cc5efd29a3b723f9f73ab5a874be | [
"MIT"
] | null | null | null | code/exampleStrats/forgivingGrimTrigger.py | Texashokies/PrisonersDilemmaTournament | 096c131bc774cc5efd29a3b723f9f73ab5a874be | [
"MIT"
] | null | null | null | # Strategy known as "Forrgviing Grim Trigger" or "Grudger".
# We will cooperate repeatedly until our opponent betrays us twice.
# Then, we will get angry and defect for the rest of time.
# Memory is the number of times the strategy has been wronged
def strategy(history, memory):
wronged = memory
if history.shape[1] ==0:
wronged = 0
if history.shape[1] >= 1 and history[1,-1] == 0: # Just got wronged.
wronged += 1
if wronged >= 2:
return 0, wronged
else:
return 1, wronged
| 32.529412 | 73 | 0.625678 |
def strategy(history, memory):
wronged = memory
if history.shape[1] ==0:
wronged = 0
if history.shape[1] >= 1 and history[1,-1] == 0:
wronged += 1
if wronged >= 2:
return 0, wronged
else:
return 1, wronged
| true | true |
f7146c422cdc8ea2780342f0d121cce5a78ee0fb | 516 | py | Python | src/apps/datasets/views.py | binfeng1018/competitions-v2 | 173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c | [
"Apache-2.0"
] | 19 | 2018-07-27T19:14:10.000Z | 2021-12-08T16:34:42.000Z | src/apps/datasets/views.py | binfeng1018/competitions-v2 | 173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c | [
"Apache-2.0"
] | 516 | 2017-07-27T15:45:43.000Z | 2022-02-10T07:57:46.000Z | src/apps/datasets/views.py | binfeng1018/competitions-v2 | 173ea6053b7eda5de3a9f1a687dfb0d43bfc4e9c | [
"Apache-2.0"
] | 16 | 2018-01-01T19:07:01.000Z | 2021-09-17T07:59:59.000Z | from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from datasets.models import Data
from utils.data import make_url_sassy
class DataManagement(LoginRequiredMixin, TemplateView):
template_name = 'datasets/management.html'
def download(request, key):
data = get_object_or_404(Data, key=key)
return HttpResponseRedirect(make_url_sassy(data.data_file.name))
| 30.352941 | 68 | 0.827519 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.views.generic import TemplateView
from datasets.models import Data
from utils.data import make_url_sassy
class DataManagement(LoginRequiredMixin, TemplateView):
template_name = 'datasets/management.html'
def download(request, key):
data = get_object_or_404(Data, key=key)
return HttpResponseRedirect(make_url_sassy(data.data_file.name))
| true | true |
f7146d93278ba64bba1fb20d47d61c403f8494f0 | 32,956 | py | Python | qa/rpc-tests/fundrawtransaction.py | Neslin247/Draupnir | 1ffd83f4d96be293a6bceb5620d6daf7cb892e42 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction.py | Neslin247/Draupnir | 1ffd83f4d96be293a6bceb5620d6daf7cb892e42 | [
"MIT"
] | null | null | null | qa/rpc-tests/fundrawtransaction.py | Neslin247/Draupnir | 1ffd83f4d96be293a6bceb5620d6daf7cb892e42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500000})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 22 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 26, self.nodes[1].getnewaddress() : 25 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
# Draupnir: Fee is exact, do not use tolerance
outputs = { self.nodes[0].getnewaddress() : Decimal(50) - fee }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(40) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_jsonrpc(-5, "changeAddress must be a valid draupnir address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
# Draupnir: Reduce this output so the fee doesn't leave us with no change
outputs = { self.nodes[0].getnewaddress() : Decimal(25) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_jsonrpc(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 10)
utx2 = get_unspent(self.nodes[2].listunspent(), 50)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 60, self.nodes[0].getnewaddress() : 10 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
listunspent = self.nodes[2].listunspent()
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 10}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():110,self.nodes[1].getnewaddress():120,self.nodes[1].getnewaddress():10,self.nodes[1].getnewaddress():130,self.nodes[1].getnewaddress():20,self.nodes[1].getnewaddress():30}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])
inputs = []
outputs = {mSigObj:1.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 11)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])
# send 1.2 BTC to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 12)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():11}
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawTx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('11.0000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.nodes[1].encryptwallet("test")
self.nodes.pop(1)
stop_node(self.nodes[0], 0)
stop_node(self.nodes[1], 2)
stop_node(self.nodes[2], 3)
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_jsonrpc(-4, "Insufficient funds", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].walletlock()
assert_raises_jsonrpc(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 12)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('500011.00000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 20)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
# Draupnir: TX size rounding gives us a fee of 4 RINGS
outputs = {self.nodes[0].getnewaddress():15,self.nodes[0].getnewaddress():4}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 2)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():15,self.nodes[0].getnewaddress():4}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawTx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('500019.00000000'), self.nodes[0].getbalance()) #19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(2000)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].generate(1)
self.sync_all()
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / round_tx_size(count_bytes(result['hex']))
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
#############################
# Test address reuse option #
#############################
result3 = self.nodes[3].fundrawtransaction(rawtx, {"reserveChangeKey": False})
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# frt should not have removed the key from the keypool
assert(changeaddress == nextaddr)
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 10}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (10, 11, 12, 13)}
keys = list(outputs.keys())
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 44.176944 | 220 | 0.571155 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.is_network_split=False
self.sync_all()
def run_test(self):
print("Mining blocks...")
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():500000})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 15)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 50)
self.nodes[0].generate(1)
self.sync_all()
ransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
| true | true |
f7146e28902a4ba376e0f04a00b98ce31bf4575b | 28,745 | py | Python | ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py | umbrio/attack_range | 8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6 | [
"Apache-2.0"
] | 12 | 2017-04-04T11:51:09.000Z | 2021-11-05T02:07:58.000Z | ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py | umbrio/attack_range | 8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6 | [
"Apache-2.0"
] | 21 | 2017-03-28T04:32:54.000Z | 2021-09-01T03:52:53.000Z | ansible/roles/search_head/files/timestamp_app/lib/splunklib/searchcommands/internals.py | umbrio/attack_range | 8bf1bbe5f6db051d3c8cf5d3f3c07cc38ca85bf6 | [
"Apache-2.0"
] | 9 | 2018-07-31T04:15:22.000Z | 2020-10-06T13:43:22.000Z | # coding=utf-8
#
# Copyright © 2011-2015 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
try:
from collections import OrderedDict # must be python 2.7
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
import warnings
from . import environment
csv.field_size_limit(10485760) # The default value is 128KB; upping to 10MB. See SPL-12117 for background on this issue
def set_binary_mode(fh):
""" Helper method to set up binary mode for file handles.
Emphasis being sys.stdin, sys.stdout, sys.stderr.
For python3, we want to return .buffer
For python2+windows we want to set os.O_BINARY
"""
typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
# check for file handle
if not isinstance(fh, typefile):
return fh
# check for python3 and buffer
if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
return fh.buffer
# check for python3
elif sys.version_info >= (3, 0):
pass
# check for windows python2. SPL-175233 -- python3 stdout is already binary
elif sys.platform == 'win32':
# Work around the fact that on Windows '\n' is mapped to '\r\n'. The typical solution is to simply open files in
# binary mode, but stdout is already open, thus this hack. 'CPython' and 'PyPy' work differently. We assume that
# all other Python implementations are compatible with 'CPython'. This might or might not be a valid assumption.
from platform import python_implementation
implementation = python_implementation()
if implementation == 'PyPy':
return os.fdopen(fh.fileno(), 'wb', 0)
else:
import msvcrt
msvcrt.setmode(fh.fileno(), os.O_BINARY)
return fh
class CommandLineParser(object):
r""" Parses the arguments to a search command.
A search command line is described by the following syntax.
**Syntax**::
command = command-name *[wsp option] *[wsp [dquote] field-name [dquote]]
command-name = alpha *( alpha / digit )
option = option-name [wsp] "=" [wsp] option-value
option-name = alpha *( alpha / digit / "_" )
option-value = word / quoted-string
word = 1*( %01-%08 / %0B / %0C / %0E-1F / %21 / %23-%FF ) ; Any character but DQUOTE and WSP
quoted-string = dquote *( word / wsp / "\" dquote / dquote dquote ) dquote
field-name = ( "_" / alpha ) *( alpha / digit / "_" / "." / "-" )
**Note:**
This syntax is constrained to an 8-bit character set.
**Note:**
This syntax does not show that `field-name` values may be comma-separated when in fact they can be. This is
because Splunk strips commas from the command line. A custom search command will never see them.
**Example:**
countmatches fieldname = word_count pattern = \w+ some_text_field
Option names are mapped to properties in the targeted ``SearchCommand``. It is the responsibility of the property
setters to validate the values they receive. Property setters may also produce side effects. For example,
setting the built-in `log_level` immediately changes the `log_level`.
"""
@classmethod
def parse(cls, command, argv):
""" Splits an argument list into an options dictionary and a fieldname
list.
The argument list, `argv`, must be of the form::
*[option]... *[<field-name>]
Options are validated and assigned to items in `command.options`. Field names are validated and stored in the
list of `command.fieldnames`.
#Arguments:
:param command: Search command instance.
:type command: ``SearchCommand``
:param argv: List of search command arguments.
:type argv: ``list``
:return: ``None``
#Exceptions:
``SyntaxError``: Argument list is incorrectly formed.
``ValueError``: Unrecognized option/field name, or an illegal field value.
"""
debug = environment.splunklib_logger.debug
command_class = type(command).__name__
# Prepare
debug('Parsing %s command line: %r', command_class, argv)
command.fieldnames = None
command.options.reset()
argv = ' '.join(argv)
command_args = cls._arguments_re.match(argv)
if command_args is None:
raise SyntaxError('Syntax error: {}'.format(argv))
# Parse options
for option in cls._options_re.finditer(command_args.group('options')):
name, value = option.group('name'), option.group('value')
if name not in command.options:
raise ValueError(
'Unrecognized {} command option: {}={}'.format(command.name, name, json_encode_string(value)))
command.options[name].value = cls.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) > 1:
raise ValueError(
'Values for these {} command options are required: {}'.format(command.name, ', '.join(missing)))
raise ValueError('A value for {} command option {} is required'.format(command.name, missing[0]))
# Parse field names
fieldnames = command_args.group('fieldnames')
if fieldnames is None:
command.fieldnames = []
else:
command.fieldnames = [cls.unquote(value.group(0)) for value in cls._fieldnames_re.finditer(fieldnames)]
debug(' %s: %s', command_class, command)
@classmethod
def unquote(cls, string):
""" Removes quotes from a quoted string.
Splunk search command quote rules are applied. The enclosing double-quotes, if present, are removed. Escaped
double-quotes ('\"' or '""') are replaced by a single double-quote ('"').
**NOTE**
We are not using a json.JSONDecoder because Splunk quote rules are different than JSON quote rules. A
json.JSONDecoder does not recognize a pair of double-quotes ('""') as an escaped quote ('"') and will
decode single-quoted strings ("'") in addition to double-quoted ('"') strings.
"""
if len(string) == 0:
return ''
if string[0] == '"':
if len(string) == 1 or string[-1] != '"':
raise SyntaxError('Poorly formed string literal: ' + string)
string = string[1:-1]
if len(string) == 0:
return ''
def replace(match):
value = match.group(0)
if value == '""':
return '"'
if len(value) < 2:
raise SyntaxError('Poorly formed string literal: ' + string)
return value[1]
result = re.sub(cls._escaped_character_re, replace, string)
return result
# region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:(?=\w)[^\d]\w*) # name
\s*=\s* # =
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s* # value
)*
)\s*
(?P<fieldnames> # Match a trailing set of field names
(?:
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s*
)*
)\s*$
""", re.VERBOSE | re.UNICODE)
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
_fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
(?P<name>(?:(?=\w)[^\d]\w*)) # name
\s*=\s* # =
(?P<value>"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+) # value
""", re.VERBOSE | re.UNICODE)
# endregion
class ConfigurationSettingsType(type):
""" Metaclass for constructing ConfigurationSettings classes.
Instances of :class:`ConfigurationSettingsType` construct :class:`ConfigurationSettings` classes from classes from
a base :class:`ConfigurationSettings` class and a dictionary of configuration settings. The settings in the
dictionary are validated against the settings in the base class. You cannot add settings, you can only change their
backing-field values and you cannot modify settings without backing-field values. These are considered fixed
configuration setting values.
This is an internal class used in two places:
+ :meth:`decorators.Configuration.__call__`
Adds a ConfigurationSettings attribute to a :class:`SearchCommand` class.
+ :meth:`reporting_command.ReportingCommand.fix_up`
Adds a ConfigurationSettings attribute to a :meth:`ReportingCommand.map` method, if there is one.
"""
def __new__(mcs, module, name, bases):
mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
cls.__module__ = module
@staticmethod
def validate_configuration_setting(specification, name, value):
if not isinstance(value, specification.type):
if isinstance(specification.type, type):
type_names = specification.type.__name__
else:
type_names = ', '.join(imap(lambda t: t.__name__, specification.type))
raise ValueError('Expected {} value, not {}={}'.format(type_names, name, repr(value)))
if specification.constraint and not specification.constraint(value):
raise ValueError('Illegal value: {}={}'.format(name, repr(value)))
return value
specification = namedtuple(
'ConfigurationSettingSpecification', (
'type',
'constraint',
'supporting_protocols'))
# P1 [ ] TODO: Review ConfigurationSettingsType.specification_matrix for completeness and correctness
specification_matrix = {
'clear_required_fields': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'distributed': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'generates_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'generating': specification(
type=bool,
constraint=None,
supporting_protocols=[1, 2]),
'local': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'required_fields': specification(
type=(list, set, tuple),
constraint=None,
supporting_protocols=[1, 2]),
'requires_preop': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'retainsevents': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'run_in_preview': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'streaming': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
type=(bytes, six.text_type),
constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
""" Describes the properties of Splunk CSV streams """
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
if sys.version_info >= (3, 0) and sys.platform == 'win32':
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
class InputHeader(dict):
""" Represents a Splunk input header as a collection of name/value pairs.
"""
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
""" Reads an input header from an input file.
The input header is read as a sequence of *<name>***:***<value>* pairs separated by a newline. The end of the
input header is signalled by an empty line or an end-of-file.
:param ifile: File-like object that supports iteration over lines.
"""
name, value = None, None
for line in ifile:
if line == '\n':
break
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
value += urllib.parse.unquote(line)
if name is not None:
self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
def __init__(self):
JSONDecoder.__init__(self, object_hook=self._object_hook)
@staticmethod
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
class MetadataEncoder(JSONEncoder):
def __init__(self):
JSONEncoder.__init__(self, separators=MetadataEncoder._separators)
def default(self, o):
return o.__dict__ if isinstance(o, ObjectView) else JSONEncoder.default(self, o)
_separators = (',', ':')
class ObjectView(object):
def __init__(self, dictionary):
self.__dict__ = dictionary
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class Recorder(object):
def __init__(self, path, f):
self._recording = gzip.open(path + '.gz', 'wb')
self._file = f
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
for line in self._file:
self._recording.write(line)
self._recording.flush()
yield line
def read(self, size=None):
value = self._file.read() if size is None else self._file.read(size)
self._recording.write(value)
self._recording.flush()
return value
def readline(self, size=None):
value = self._file.readline() if size is None else self._file.readline(size)
if len(value) > 0:
self._recording.write(value)
self._recording.flush()
return value
def record(self, *args):
for arg in args:
self._recording.write(arg)
def write(self, text):
self._recording.write(text)
self._file.write(text)
self._recording.flush()
class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
self._writer = csv.writer(self._buffer, dialect=CsvDialect)
self._writerow = self._writer.writerow
self._finished = False
self._flushed = False
self._inspector = OrderedDict()
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
@property
def is_flushed(self):
return self._flushed
@is_flushed.setter
def is_flushed(self, value):
self._flushed = True if value else False
@property
def ofile(self):
return self._ofile
@ofile.setter
def ofile(self, value):
self._ofile = set_binary_mode(value)
@property
def pending_record_count(self):
return self._pending_record_count
@property
def _record_count(self):
warnings.warn(
"_record_count will be deprecated soon. Use pending_record_count instead.",
PendingDeprecationWarning
)
return self.pending_record_count
@property
def committed_record_count(self):
return self._committed_record_count
@property
def _total_record_count(self):
warnings.warn(
"_total_record_count will be deprecated soon. Use committed_record_count instead.",
PendingDeprecationWarning
)
return self.committed_record_count
def write(self, data):
bytes_type = bytes if sys.version_info >= (3, 0) else str
if not isinstance(data, bytes_type):
data = data.encode('utf-8')
self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
assert partial is None or isinstance(partial, bool)
assert not (finished is None and partial is None)
assert finished is None or partial is None
self._ensure_validity()
def write_message(self, message_type, message_text, *args, **kwargs):
self._ensure_validity()
self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))
def write_record(self, record):
self._ensure_validity()
self._write_record(record)
def write_records(self, records):
self._ensure_validity()
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
assert self._record_count == 0 and len(self._inspector) == 0
raise RuntimeError('I/O operation on closed record writer')
def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
values = []
for fieldname in fieldnames:
value = get_value(fieldname, None)
if value is None:
values += (None, None)
continue
value_t = type(value)
if issubclass(value_t, (list, tuple)):
if len(value) == 0:
values += (None, None)
continue
if len(value) > 1:
value_list = value
sv = ''
mv = '$'
for value in value_list:
if value is None:
sv += '\n'
mv += '$;$'
continue
value_t = type(value)
if value_t is not bytes:
if value_t is bool:
value = str(value.real)
elif value_t is six.text_type:
value = value
elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
sv += value + '\n'
mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
value = value[0]
value_t = type(value)
if value_t is bool:
values += (str(value.real), None)
continue
if value_t is bytes:
values += (value, None)
continue
if value_t is six.text_type:
if six.PY2:
value = value.encode('utf-8')
values += (value, None)
continue
if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
if issubclass(value_t, dict):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
values += (repr(value), None)
self._writerow(values)
self._pending_record_count += 1
if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
# noinspection PyUnresolvedReferences
from _json import make_encoder
except ImportError:
# We may be running under PyPy 2.5 which does not include the _json module
_iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
else:
# Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
from json.encoder import encode_basestring_ascii
@staticmethod
def _default(o):
raise TypeError(repr(o) + ' is not JSON serializable')
_iterencode_json = make_encoder(
{}, # markers (for detecting circular references)
_default, # object_encoder
encode_basestring_ascii, # string_encoder
None, # indent
':', ',', # separators
False, # sort_keys
False, # skip_keys
True # allow_nan
)
del make_encoder
class RecordWriterV1(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
if self._chunk_count == 0:
# Messages are written to the messages header when we write the first chunk of data
# Guarantee: These messages are displayed by splunkweb and the job inspector
if messages is not None:
message_level = RecordWriterV1._message_level.get
for level, text in messages:
self.write(message_level(level, level))
self.write('=')
self.write(text)
self.write('\r\n')
self.write('\r\n')
elif messages is not None:
# Messages are written to the messages header when we write subsequent chunks of data
# Guarantee: These messages are displayed by splunkweb and the job inspector, if and only if the
# command is configured with
#
# stderr_dest = message
#
# stderr_dest is a static configuration setting. This means that it can only be set in commands.conf.
# It cannot be set in code.
stderr = sys.stderr
for level, text in messages:
print(level, text, file=stderr)
self.write(self._buffer.getvalue())
self._chunk_count += 1
self._committed_record_count += self.pending_record_count
self._clear()
self._finished = finished is True
_message_level = {
'DEBUG': 'debug_message',
'ERROR': 'error_message',
'FATAL': 'error_message',
'INFO': 'info_message',
'WARN': 'warn_message'
}
class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if partial or not finished:
# Don't flush partial chunks, since the SCP v2 protocol does not
# provide a way to send partial chunks yet.
return
if not self.is_flushed:
self.write_chunk(finished=True)
def write_chunk(self, finished=None):
inspector = self._inspector
self._committed_record_count += self.pending_record_count
self._chunk_count += 1
# TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
# ChunkedExternProcessor (See SPL-103525)
#
# We will need to replace the following block of code with this block:
#
# metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
#
# if partial is True:
# finished = False
if len(inspector) == 0:
inspector = None
metadata = [item for item in (('inspector', inspector), ('finished', finished))]
self._write_chunk(metadata, self._buffer.getvalue())
self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
self.write('\n')
self._clear()
def write_metric(self, name, value):
self._ensure_validity()
self._inspector['metric.' + name] = value
def _clear(self):
super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
if sys.version_info >= (3, 0):
metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
if sys.version_info >= (3, 0):
body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
self.write(start_line)
self.write(metadata)
self.write(body)
self._ofile.flush()
self._flushed = True
| 34.017751 | 120 | 0.586085 |
from __future__ import absolute_import, division, print_function
from io import TextIOWrapper
from collections import deque, namedtuple
from splunklib import six
try:
from collections import OrderedDict
except ImportError:
from ..ordereddict import OrderedDict
from splunklib.six.moves import StringIO
from itertools import chain
from splunklib.six.moves import map as imap
from json import JSONDecoder, JSONEncoder
from json.encoder import encode_basestring_ascii as json_encode_string
from splunklib.six.moves import urllib
import csv
import gzip
import os
import re
import sys
import warnings
from . import environment
csv.field_size_limit(10485760)
def set_binary_mode(fh):
typefile = TextIOWrapper if sys.version_info >= (3, 0) else file
if not isinstance(fh, typefile):
return fh
if sys.version_info >= (3, 0) and hasattr(fh, 'buffer'):
return fh.buffer
elif sys.version_info >= (3, 0):
pass
elif sys.platform == 'win32':
from platform import python_implementation
implementation = python_implementation()
if implementation == 'PyPy':
return os.fdopen(fh.fileno(), 'wb', 0)
else:
import msvcrt
msvcrt.setmode(fh.fileno(), os.O_BINARY)
return fh
class CommandLineParser(object):
@classmethod
def parse(cls, command, argv):
debug = environment.splunklib_logger.debug
command_class = type(command).__name__
debug('Parsing %s command line: %r', command_class, argv)
command.fieldnames = None
command.options.reset()
argv = ' '.join(argv)
command_args = cls._arguments_re.match(argv)
if command_args is None:
raise SyntaxError('Syntax error: {}'.format(argv))
for option in cls._options_re.finditer(command_args.group('options')):
name, value = option.group('name'), option.group('value')
if name not in command.options:
raise ValueError(
'Unrecognized {} command option: {}={}'.format(command.name, name, json_encode_string(value)))
command.options[name].value = cls.unquote(value)
missing = command.options.get_missing()
if missing is not None:
if len(missing) > 1:
raise ValueError(
'Values for these {} command options are required: {}'.format(command.name, ', '.join(missing)))
raise ValueError('A value for {} command option {} is required'.format(command.name, missing[0]))
fieldnames = command_args.group('fieldnames')
if fieldnames is None:
command.fieldnames = []
else:
command.fieldnames = [cls.unquote(value.group(0)) for value in cls._fieldnames_re.finditer(fieldnames)]
debug(' %s: %s', command_class, command)
@classmethod
def unquote(cls, string):
if len(string) == 0:
return ''
if string[0] == '"':
if len(string) == 1 or string[-1] != '"':
raise SyntaxError('Poorly formed string literal: ' + string)
string = string[1:-1]
if len(string) == 0:
return ''
def replace(match):
value = match.group(0)
if value == '""':
return '"'
if len(value) < 2:
raise SyntaxError('Poorly formed string literal: ' + string)
return value[1]
result = re.sub(cls._escaped_character_re, replace, string)
return result
# region Class variables
_arguments_re = re.compile(r"""
^\s*
(?P<options> # Match a leading set of name/value pairs
(?:
(?:(?=\w)[^\d]\w*) # name
\s*=\s* # =
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s* # value
)*
)\s*
(?P<fieldnames> # Match a trailing set of field names
(?:
(?:"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+)\s*
)*
)\s*$
""", re.VERBOSE | re.UNICODE)
_escaped_character_re = re.compile(r'(\\.|""|[\\"])')
_fieldnames_re = re.compile(r"""("(?:\\.|""|[^"])+"|(?:\\.|[^\s"])+)""")
_options_re = re.compile(r"""
# Captures a set of name/value pairs when used with re.finditer
(?P<name>(?:(?=\w)[^\d]\w*)) # name
\s*=\s* # =
(?P<value>"(?:\\.|""|[^"])*"|(?:\\.|[^\s"])+) # value
""", re.VERBOSE | re.UNICODE)
class ConfigurationSettingsType(type):
def __new__(mcs, module, name, bases):
mcs = super(ConfigurationSettingsType, mcs).__new__(mcs, str(name), bases, {})
return mcs
def __init__(cls, module, name, bases):
super(ConfigurationSettingsType, cls).__init__(name, bases, None)
cls.__module__ = module
@staticmethod
def validate_configuration_setting(specification, name, value):
if not isinstance(value, specification.type):
if isinstance(specification.type, type):
type_names = specification.type.__name__
else:
type_names = ', '.join(imap(lambda t: t.__name__, specification.type))
raise ValueError('Expected {} value, not {}={}'.format(type_names, name, repr(value)))
if specification.constraint and not specification.constraint(value):
raise ValueError('Illegal value: {}={}'.format(name, repr(value)))
return value
specification = namedtuple(
'ConfigurationSettingSpecification', (
'type',
'constraint',
'supporting_protocols'))
specification_matrix = {
'clear_required_fields': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'distributed': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'generates_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'generating': specification(
type=bool,
constraint=None,
supporting_protocols=[1, 2]),
'local': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'maxinputs': specification(
type=int,
constraint=lambda value: 0 <= value <= six.MAXSIZE,
supporting_protocols=[2]),
'overrides_timeorder': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'required_fields': specification(
type=(list, set, tuple),
constraint=None,
supporting_protocols=[1, 2]),
'requires_preop': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'retainsevents': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'run_in_preview': specification(
type=bool,
constraint=None,
supporting_protocols=[2]),
'streaming': specification(
type=bool,
constraint=None,
supporting_protocols=[1]),
'streaming_preop': specification(
type=(bytes, six.text_type),
constraint=None,
supporting_protocols=[1, 2]),
'type': specification(
type=(bytes, six.text_type),
constraint=lambda value: value in ('events', 'reporting', 'streaming'),
supporting_protocols=[2])}
class CsvDialect(csv.Dialect):
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
if sys.version_info >= (3, 0) and sys.platform == 'win32':
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
class InputHeader(dict):
def __str__(self):
return '\n'.join([name + ':' + value for name, value in six.iteritems(self)])
def read(self, ifile):
name, value = None, None
for line in ifile:
if line == '\n':
break
item = line.split(':', 1)
if len(item) == 2:
# start of a new item
if name is not None:
self[name] = value[:-1] # value sans trailing newline
name, value = item[0], urllib.parse.unquote(item[1])
elif name is not None:
# continuation of the current item
value += urllib.parse.unquote(line)
if name is not None:
self[name] = value[:-1] if value[-1] == '\n' else value
Message = namedtuple('Message', ('type', 'text'))
class MetadataDecoder(JSONDecoder):
def __init__(self):
JSONDecoder.__init__(self, object_hook=self._object_hook)
@staticmethod
def _object_hook(dictionary):
object_view = ObjectView(dictionary)
stack = deque()
stack.append((None, None, dictionary))
while len(stack):
instance, member_name, dictionary = stack.popleft()
for name, value in six.iteritems(dictionary):
if isinstance(value, dict):
stack.append((dictionary, name, value))
if instance is not None:
instance[member_name] = ObjectView(dictionary)
return object_view
class MetadataEncoder(JSONEncoder):
def __init__(self):
JSONEncoder.__init__(self, separators=MetadataEncoder._separators)
def default(self, o):
return o.__dict__ if isinstance(o, ObjectView) else JSONEncoder.default(self, o)
_separators = (',', ':')
class ObjectView(object):
def __init__(self, dictionary):
self.__dict__ = dictionary
def __repr__(self):
return repr(self.__dict__)
def __str__(self):
return str(self.__dict__)
class Recorder(object):
def __init__(self, path, f):
self._recording = gzip.open(path + '.gz', 'wb')
self._file = f
def __getattr__(self, name):
return getattr(self._file, name)
def __iter__(self):
for line in self._file:
self._recording.write(line)
self._recording.flush()
yield line
def read(self, size=None):
value = self._file.read() if size is None else self._file.read(size)
self._recording.write(value)
self._recording.flush()
return value
def readline(self, size=None):
value = self._file.readline() if size is None else self._file.readline(size)
if len(value) > 0:
self._recording.write(value)
self._recording.flush()
return value
def record(self, *args):
for arg in args:
self._recording.write(arg)
def write(self, text):
self._recording.write(text)
self._file.write(text)
self._recording.flush()
class RecordWriter(object):
def __init__(self, ofile, maxresultrows=None):
self._maxresultrows = 50000 if maxresultrows is None else maxresultrows
self._ofile = set_binary_mode(ofile)
self._fieldnames = None
self._buffer = StringIO()
self._writer = csv.writer(self._buffer, dialect=CsvDialect)
self._writerow = self._writer.writerow
self._finished = False
self._flushed = False
self._inspector = OrderedDict()
self._chunk_count = 0
self._pending_record_count = 0
self._committed_record_count = 0
@property
def is_flushed(self):
return self._flushed
@is_flushed.setter
def is_flushed(self, value):
self._flushed = True if value else False
@property
def ofile(self):
return self._ofile
@ofile.setter
def ofile(self, value):
self._ofile = set_binary_mode(value)
@property
def pending_record_count(self):
return self._pending_record_count
@property
def _record_count(self):
warnings.warn(
"_record_count will be deprecated soon. Use pending_record_count instead.",
PendingDeprecationWarning
)
return self.pending_record_count
@property
def committed_record_count(self):
return self._committed_record_count
@property
def _total_record_count(self):
warnings.warn(
"_total_record_count will be deprecated soon. Use committed_record_count instead.",
PendingDeprecationWarning
)
return self.committed_record_count
def write(self, data):
bytes_type = bytes if sys.version_info >= (3, 0) else str
if not isinstance(data, bytes_type):
data = data.encode('utf-8')
self.ofile.write(data)
def flush(self, finished=None, partial=None):
assert finished is None or isinstance(finished, bool)
assert partial is None or isinstance(partial, bool)
assert not (finished is None and partial is None)
assert finished is None or partial is None
self._ensure_validity()
def write_message(self, message_type, message_text, *args, **kwargs):
self._ensure_validity()
self._inspector.setdefault('messages', []).append((message_type, message_text.format(*args, **kwargs)))
def write_record(self, record):
self._ensure_validity()
self._write_record(record)
def write_records(self, records):
self._ensure_validity()
write_record = self._write_record
for record in records:
write_record(record)
def _clear(self):
self._buffer.seek(0)
self._buffer.truncate()
self._inspector.clear()
self._pending_record_count = 0
def _ensure_validity(self):
if self._finished is True:
assert self._record_count == 0 and len(self._inspector) == 0
raise RuntimeError('I/O operation on closed record writer')
def _write_record(self, record):
fieldnames = self._fieldnames
if fieldnames is None:
self._fieldnames = fieldnames = list(record.keys())
value_list = imap(lambda fn: (str(fn), str('__mv_') + str(fn)), fieldnames)
self._writerow(list(chain.from_iterable(value_list)))
get_value = record.get
values = []
for fieldname in fieldnames:
value = get_value(fieldname, None)
if value is None:
values += (None, None)
continue
value_t = type(value)
if issubclass(value_t, (list, tuple)):
if len(value) == 0:
values += (None, None)
continue
if len(value) > 1:
value_list = value
sv = ''
mv = '$'
for value in value_list:
if value is None:
sv += '\n'
mv += '$;$'
continue
value_t = type(value)
if value_t is not bytes:
if value_t is bool:
value = str(value.real)
elif value_t is six.text_type:
value = value
elif isinstance(value, six.integer_types) or value_t is float or value_t is complex:
value = str(value)
elif issubclass(value_t, (dict, list, tuple)):
value = str(''.join(RecordWriter._iterencode_json(value, 0)))
else:
value = repr(value).encode('utf-8', errors='backslashreplace')
sv += value + '\n'
mv += value.replace('$', '$$') + '$;$'
values += (sv[:-1], mv[:-2])
continue
value = value[0]
value_t = type(value)
if value_t is bool:
values += (str(value.real), None)
continue
if value_t is bytes:
values += (value, None)
continue
if value_t is six.text_type:
if six.PY2:
value = value.encode('utf-8')
values += (value, None)
continue
if isinstance(value, six.integer_types) or value_t is float or value_t is complex:
values += (str(value), None)
continue
if issubclass(value_t, dict):
values += (str(''.join(RecordWriter._iterencode_json(value, 0))), None)
continue
values += (repr(value), None)
self._writerow(values)
self._pending_record_count += 1
if self.pending_record_count >= self._maxresultrows:
self.flush(partial=True)
try:
# noinspection PyUnresolvedReferences
from _json import make_encoder
except ImportError:
# We may be running under PyPy 2.5 which does not include the _json module
_iterencode_json = JSONEncoder(separators=(',', ':')).iterencode
else:
# Creating _iterencode_json this way yields a two-fold performance improvement on Python 2.7.9 and 2.7.10
from json.encoder import encode_basestring_ascii
@staticmethod
def _default(o):
raise TypeError(repr(o) + ' is not JSON serializable')
_iterencode_json = make_encoder(
{}, # markers (for detecting circular references)
_default, # object_encoder
encode_basestring_ascii, # string_encoder
None, # indent
':', ',', # separators
False, # sort_keys
False, # skip_keys
True # allow_nan
)
del make_encoder
class RecordWriterV1(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if self.pending_record_count > 0 or (self._chunk_count == 0 and 'messages' in self._inspector):
messages = self._inspector.get('messages')
if self._chunk_count == 0:
# Messages are written to the messages header when we write the first chunk of data
# Guarantee: These messages are displayed by splunkweb and the job inspector
if messages is not None:
message_level = RecordWriterV1._message_level.get
for level, text in messages:
self.write(message_level(level, level))
self.write('=')
self.write(text)
self.write('\r\n')
self.write('\r\n')
elif messages is not None:
# Messages are written to the messages header when we write subsequent chunks of data
# Guarantee: These messages are displayed by splunkweb and the job inspector, if and only if the
# command is configured with
#
# stderr_dest = message
#
# stderr_dest is a static configuration setting. This means that it can only be set in commands.conf.
# It cannot be set in code.
stderr = sys.stderr
for level, text in messages:
print(level, text, file=stderr)
self.write(self._buffer.getvalue())
self._chunk_count += 1
self._committed_record_count += self.pending_record_count
self._clear()
self._finished = finished is True
_message_level = {
'DEBUG': 'debug_message',
'ERROR': 'error_message',
'FATAL': 'error_message',
'INFO': 'info_message',
'WARN': 'warn_message'
}
class RecordWriterV2(RecordWriter):
def flush(self, finished=None, partial=None):
RecordWriter.flush(self, finished, partial) # validates arguments and the state of this instance
if partial or not finished:
# Don't flush partial chunks, since the SCP v2 protocol does not
# provide a way to send partial chunks yet.
return
if not self.is_flushed:
self.write_chunk(finished=True)
def write_chunk(self, finished=None):
inspector = self._inspector
self._committed_record_count += self.pending_record_count
self._chunk_count += 1
# TODO: DVPL-6448: splunklib.searchcommands | Add support for partial: true when it is implemented in
# ChunkedExternProcessor (See SPL-103525)
#
# We will need to replace the following block of code with this block:
#
# metadata = [item for item in (('inspector', inspector), ('finished', finished), ('partial', partial))]
#
# if partial is True:
# finished = False
if len(inspector) == 0:
inspector = None
metadata = [item for item in (('inspector', inspector), ('finished', finished))]
self._write_chunk(metadata, self._buffer.getvalue())
self._clear()
def write_metadata(self, configuration):
self._ensure_validity()
metadata = chain(six.iteritems(configuration), (('inspector', self._inspector if self._inspector else None),))
self._write_chunk(metadata, '')
self.write('\n')
self._clear()
def write_metric(self, name, value):
self._ensure_validity()
self._inspector['metric.' + name] = value
def _clear(self):
super(RecordWriterV2, self)._clear()
self._fieldnames = None
def _write_chunk(self, metadata, body):
if metadata:
metadata = str(''.join(self._iterencode_json(dict([(n, v) for n, v in metadata if v is not None]), 0)))
if sys.version_info >= (3, 0):
metadata = metadata.encode('utf-8')
metadata_length = len(metadata)
else:
metadata_length = 0
if sys.version_info >= (3, 0):
body = body.encode('utf-8')
body_length = len(body)
if not (metadata_length > 0 or body_length > 0):
return
start_line = 'chunked 1.0,%s,%s\n' % (metadata_length, body_length)
self.write(start_line)
self.write(metadata)
self.write(body)
self._ofile.flush()
self._flushed = True
| true | true |
f7146fa04286aa454a33161690efd3bc2e7b2b70 | 5,851 | py | Python | src/compas/robots/model/tool.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/robots/model/tool.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | src/compas/robots/model/tool.py | ricardoavelino/compas | e3c7f004b8839f96bf01f9f6b21a75786c3f59fa | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import Frame
from compas.geometry import Transformation
from compas.robots.model.robot import RobotModel
class ToolModel(RobotModel):
"""Represents a tool to be attached to the robot's flange.
Attributes
----------
visual : :class:`~compas.datastructures.Mesh`
The visual mesh of the tool.
frame : :class:`~compas.geometry.Frame`
The frame of the tool in tool0 frame.
collision : :class:`~compas.datastructures.Mesh`
The collision mesh representation of the tool.
name : str
The name of the `ToolModel`. Defaults to 'attached_tool'.
link_name : str
The name of the `Link` to which the tool is attached. Defaults to ``None``.
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
"""
def __init__(self, visual, frame_in_tool0_frame, collision=None, name="attached_tool", link_name=None):
collision = collision or visual
super(ToolModel, self).__init__(name)
self.add_link("attached_tool_link", visual_mesh=visual, collision_mesh=collision)
self._rebuild_tree()
self._create(self.root, Transformation())
self.frame = frame_in_tool0_frame
self.link_name = link_name
@classmethod
def from_robot_model(cls, robot, frame_in_tool0_frame, link_name=None):
"""Creates a ``ToolModel`` from a :class:`~compas.robots.RobotModel` instance.
Parameters
----------
robot : :class:`~compas.robots.RobotModel`
frame_in_tool0_frame : str
The frame of the tool in tool0 frame.
link_name : str
The name of the `Link` to which the tool is attached.
Defaults to ``None``.
"""
data = robot.data
data['frame'] = frame_in_tool0_frame.data
data['link_name'] = link_name
return cls.from_data(data)
@property
def data(self):
"""Returns the data dictionary that represents the tool.
Returns
-------
dict
The tool data.
"""
return self._get_data()
def _get_data(self):
data = super(ToolModel, self)._get_data()
data['frame'] = self.frame.data
data['link_name'] = self.link_name
return data
@data.setter
def data(self, data):
self._set_data(data)
def _set_data(self, data):
super(ToolModel, self)._set_data(data)
self.frame = Frame.from_data(data['frame'])
self.name = self.name or 'attached_tool'
self.link_name = data['link_name'] if 'link_name' in data else None
@classmethod
def from_data(cls, data):
"""Construct a `ToolModel` from its data representation.
To be used in conjunction with the :meth:`to_data` method.
Parameters
----------
data : dict
The data dictionary.
Returns
-------
:class:`ToolModel`
The constructed `ToolModel`.
"""
tool = cls(None, None)
tool.data = data
return tool
def from_tcf_to_t0cf(self, frames_tcf):
"""Converts a list of frames at the robot's tool tip (tcf frame) to frames at the robot's flange (tool0 frame).
Parameters
----------
frames_tcf : list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's tool tip (tcf).
Returns
-------
list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's flange (tool0).
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
>>> frames_tcf = [Frame((-0.309, -0.046, -0.266), (0.276, 0.926, -0.256), (0.879, -0.136, 0.456))]
>>> tool.from_tcf_to_t0cf(frames_tcf)
[Frame(Point(-0.363, 0.003, -0.147), Vector(0.388, -0.351, -0.852), Vector(0.276, 0.926, -0.256))]
"""
Te = Transformation.from_frame_to_frame(self.frame, Frame.worldXY())
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_tcf]
def from_t0cf_to_tcf(self, frames_t0cf):
"""Converts frames at the robot's flange (tool0 frame) to frames at the robot's tool tip (tcf frame).
Parameters
----------
frames_t0cf : list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's flange (tool0).
Returns
-------
list[:class:`~compas.geometry.Frame`]
Frames (in WCF) at the robot's tool tip (tcf).
Examples
--------
>>> import compas
>>> from compas.datastructures import Mesh
>>> from compas.geometry import Frame
>>> mesh = Mesh.from_stl(compas.get('cone.stl'))
>>> frame = Frame([0.14, 0, 0], [0, 1, 0], [0, 0, 1])
>>> tool = ToolModel(mesh, frame)
>>> frames_t0cf = [Frame((-0.363, 0.003, -0.147), (0.388, -0.351, -0.852), (0.276, 0.926, -0.256))]
>>> tool.from_t0cf_to_tcf(frames_t0cf)
[Frame(Point(-0.309, -0.046, -0.266), Vector(0.276, 0.926, -0.256), Vector(0.879, -0.136, 0.456))]
"""
Te = Transformation.from_frame_to_frame(Frame.worldXY(), self.frame)
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_t0cf]
| 33.820809 | 119 | 0.592719 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compas.geometry import Frame
from compas.geometry import Transformation
from compas.robots.model.robot import RobotModel
class ToolModel(RobotModel):
def __init__(self, visual, frame_in_tool0_frame, collision=None, name="attached_tool", link_name=None):
collision = collision or visual
super(ToolModel, self).__init__(name)
self.add_link("attached_tool_link", visual_mesh=visual, collision_mesh=collision)
self._rebuild_tree()
self._create(self.root, Transformation())
self.frame = frame_in_tool0_frame
self.link_name = link_name
@classmethod
def from_robot_model(cls, robot, frame_in_tool0_frame, link_name=None):
data = robot.data
data['frame'] = frame_in_tool0_frame.data
data['link_name'] = link_name
return cls.from_data(data)
@property
def data(self):
return self._get_data()
def _get_data(self):
data = super(ToolModel, self)._get_data()
data['frame'] = self.frame.data
data['link_name'] = self.link_name
return data
@data.setter
def data(self, data):
self._set_data(data)
def _set_data(self, data):
super(ToolModel, self)._set_data(data)
self.frame = Frame.from_data(data['frame'])
self.name = self.name or 'attached_tool'
self.link_name = data['link_name'] if 'link_name' in data else None
@classmethod
def from_data(cls, data):
tool = cls(None, None)
tool.data = data
return tool
def from_tcf_to_t0cf(self, frames_tcf):
Te = Transformation.from_frame_to_frame(self.frame, Frame.worldXY())
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_tcf]
def from_t0cf_to_tcf(self, frames_t0cf):
Te = Transformation.from_frame_to_frame(Frame.worldXY(), self.frame)
return [Frame.from_transformation(Transformation.from_frame(f) * Te) for f in frames_t0cf]
| true | true |
f714701d95b7e531e20aa26475084c4b139b2eb8 | 417 | py | Python | mainapp/migrations/0004_auto_20181224_1636.py | ploggingdev/finitecoins | 60f69cc563e1a26be8c659d4400579025219a223 | [
"MIT"
] | null | null | null | mainapp/migrations/0004_auto_20181224_1636.py | ploggingdev/finitecoins | 60f69cc563e1a26be8c659d4400579025219a223 | [
"MIT"
] | 5 | 2020-02-11T23:31:23.000Z | 2021-06-10T21:03:24.000Z | mainapp/migrations/0004_auto_20181224_1636.py | ploggingdev/finitecoins | 60f69cc563e1a26be8c659d4400579025219a223 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.4 on 2018-12-24 16:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_game_description_html'),
]
operations = [
migrations.AlterField(
model_name='game',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| 21.947368 | 75 | 0.613909 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0003_game_description_html'),
]
operations = [
migrations.AlterField(
model_name='game',
name='description',
field=models.CharField(blank=True, max_length=1000, null=True),
),
]
| true | true |
f7147066a672f718ec342f39cba6fdc815170b9d | 2,054 | py | Python | motion_detector/main.py | Mark-Kinyua/python_public | 25c4eff3a6f93c35a949f94a2f9c3df3202a3113 | [
"MIT"
] | null | null | null | motion_detector/main.py | Mark-Kinyua/python_public | 25c4eff3a6f93c35a949f94a2f9c3df3202a3113 | [
"MIT"
] | null | null | null | motion_detector/main.py | Mark-Kinyua/python_public | 25c4eff3a6f93c35a949f94a2f9c3df3202a3113 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
# A motion detecetor, yup... lol.
# Remember to use an old python version < 3.6
image_path = 'room_people.jpg' # Photo
# The model was already formulated, just need to loaad it into the system.
prototxt_path = 'models/MobileNetSSD_deploy.prototxt' # Load Model
model_path = 'models/MobileNetSSD_deploy.caffemodel'
min_confidence = 0.2
# Things it can identify
classes = ["background","aeroplane","bicycle","bird","boat","bottle","bus","car","cat","chair","cow","diningtable","dog","horse",
"motorbike","person","pottedplant","sheep","sofa","train","tvmonitor"]
np.random.seed(543210) # Same Colors
colors = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
# img = cv2.imread(image_path)
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
height, width = img.shape[0], img.shape[1]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 0.007, (300,300), 130)
net.setInput(blob)
detected_objects = net.forward()
for i in range(detected_objects.shape[2]):
confidence = detected_objects[0][0][i][2]
if confidence > min_confidence:
class_index = int(detected_objects[0,0,i,1])
upper_left_x = int(detected_objects[0, 0, i, 3] * width)
upper_left_y = int(detected_objects[0, 0, i, 3] * height)
lower_right_x = int(detected_objects[0, 0, i, 5] * width)
lower_right_y = int(detected_objects[0, 0, i, 6] * height)
prediction_text = f"{classes[class_index]}: {confidence:.2f}%"
cv2.rectangle(img, (upper_left_x, upper_left_y), (lower_right_x, lower_right_y), colors[class_index], 3)
cv2.putText(img, prediction_text, (upper_left_x,
upper_left_y- 15 if upper_left_y > 30 else upper_left_y + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[class_index], 2)
cv2.imshow("Detected Objects", img)
cv2.waitKey(5)
cv2.destroyAllWindows()
cap.release()
| 32.09375 | 129 | 0.656767 | import numpy as np
import cv2
image_path = 'room_people.jpg'
prototxt_path = 'models/MobileNetSSD_deploy.prototxt'
model_path = 'models/MobileNetSSD_deploy.caffemodel'
min_confidence = 0.2
classes = ["background","aeroplane","bicycle","bird","boat","bottle","bus","car","cat","chair","cow","diningtable","dog","horse",
"motorbike","person","pottedplant","sheep","sofa","train","tvmonitor"]
np.random.seed(543210)
colors = np.random.uniform(0, 255, size=(len(classes), 3))
net = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
height, width = img.shape[0], img.shape[1]
blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 0.007, (300,300), 130)
net.setInput(blob)
detected_objects = net.forward()
for i in range(detected_objects.shape[2]):
confidence = detected_objects[0][0][i][2]
if confidence > min_confidence:
class_index = int(detected_objects[0,0,i,1])
upper_left_x = int(detected_objects[0, 0, i, 3] * width)
upper_left_y = int(detected_objects[0, 0, i, 3] * height)
lower_right_x = int(detected_objects[0, 0, i, 5] * width)
lower_right_y = int(detected_objects[0, 0, i, 6] * height)
prediction_text = f"{classes[class_index]}: {confidence:.2f}%"
cv2.rectangle(img, (upper_left_x, upper_left_y), (lower_right_x, lower_right_y), colors[class_index], 3)
cv2.putText(img, prediction_text, (upper_left_x,
upper_left_y- 15 if upper_left_y > 30 else upper_left_y + 15),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, colors[class_index], 2)
cv2.imshow("Detected Objects", img)
cv2.waitKey(5)
cv2.destroyAllWindows()
cap.release()
| true | true |
f714719a188b3cff193941c546d8d5b9cbeb3c7f | 227 | py | Python | chia/wallet/puzzles/rom_bootstrap_generator.py | ForestCrazy/chia-blockchain-remote-plot | 0ba838b7a8ea2b5410d438ac70295df699a30dae | [
"Apache-2.0"
] | 11,902 | 2019-12-05T00:14:29.000Z | 2022-03-31T23:25:37.000Z | chia/wallet/puzzles/rom_bootstrap_generator.py | jcteng/ext9-blockchain | 46506bc5778e14cbc373de39438b0c6f794a49c5 | [
"Apache-2.0"
] | 5,246 | 2019-12-05T04:00:03.000Z | 2022-03-31T21:33:30.000Z | chia/wallet/puzzles/rom_bootstrap_generator.py | jcteng/ext9-blockchain | 46506bc5778e14cbc373de39438b0c6f794a49c5 | [
"Apache-2.0"
] | 2,149 | 2019-12-05T11:12:53.000Z | 2022-03-31T06:08:34.000Z | from chia.types.blockchain_format.program import SerializedProgram
from .load_clvm import load_clvm
MOD = SerializedProgram.from_bytes(load_clvm("rom_bootstrap_generator.clvm").as_bin())
def get_generator():
return MOD
| 22.7 | 86 | 0.814978 | from chia.types.blockchain_format.program import SerializedProgram
from .load_clvm import load_clvm
MOD = SerializedProgram.from_bytes(load_clvm("rom_bootstrap_generator.clvm").as_bin())
def get_generator():
return MOD
| true | true |
f71471f3e75074020a7c0fdf86353776fcede027 | 3,324 | py | Python | src/Yowsup/ConnectionIO/connectionengine.py | philklc/yowsup | a1736ccbdadfccbf9066964f3a9cb51f3337c840 | [
"MIT"
] | 1 | 2018-12-27T23:35:52.000Z | 2018-12-27T23:35:52.000Z | src/Yowsup/ConnectionIO/connectionengine.py | philklc/yowsup | a1736ccbdadfccbf9066964f3a9cb51f3337c840 | [
"MIT"
] | null | null | null | src/Yowsup/ConnectionIO/connectionengine.py | philklc/yowsup | a1736ccbdadfccbf9066964f3a9cb51f3337c840 | [
"MIT"
] | null | null | null | '''
Copyright (c) <2012> Tarek Galal <tare2.galal@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR
A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import socket;
import sys
from bintreenode import BinTreeNodeReader, BinTreeNodeWriter
from Yowsup.Common.debugger import Debugger
from ioexceptions import ConnectionClosedException
class ConnectionEngine(socket.socket):
def __init__(self):
Debugger.attach(self)
self.reader = BinTreeNodeReader(self)
self.writer = BinTreeNodeWriter(self)
self.readSize = 1;
self.buf = [];
self.maxBufRead = 0;
self.connected = 0
self.jid = ""
super(ConnectionEngine,self).__init__(socket.AF_INET, socket.SOCK_STREAM);
def getId(self):
return self.id
def setId(self, idx):
self.id = idx
def flush(self):
'''FLUSH'''
self.write();
def getBuffer(self):
return self.buffer;
def reset(self):
self.buffer = "";
def write(self,data):
if type(data) is int:
try:
self.sendall(chr(data));
except:
raise ConnectionClosedException("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
else:
tmp = "";
for d in data:
tmp += chr(d)
try:
self.sendall(tmp);
except:
raise ConnectionClosedException("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
def setReadSize(self,size):
self.readSize = size;
def read(self, socketOnly = 0):
x = ""
try:
x = self.recv(self.readSize);
except:
raise ConnectionClosedException("socket read crashed, reason %s " % sys.exc_info()[1])
self._d("socket read crashed, reason %s " % sys.exc_info()[1])
#x= self.recvX(self.readSize);
if len(x) == 1:
#Utilities.debug("GOT "+str(ord((x))));
return ord(x);
else:
raise ConnectionClosedException("Got 0 bytes, connection closed");
#return x;
def read2(self,b,off,length):
'''reads into a buffer'''
if off < 0 or length < 0 or (off+length)>len(b):
raise Exception("Out of bounds");
if length == 0:
return 0;
if b is None:
raise Exception("XNull pointerX");
count = 0;
while count < length:
#self.read();
#print "OKIIIIIIIIIIII";
#exit();
b[off+count]=self.read(0);
count= count+1;
return count;
| 25.569231 | 93 | 0.691637 |
import socket;
import sys
from bintreenode import BinTreeNodeReader, BinTreeNodeWriter
from Yowsup.Common.debugger import Debugger
from ioexceptions import ConnectionClosedException
class ConnectionEngine(socket.socket):
def __init__(self):
Debugger.attach(self)
self.reader = BinTreeNodeReader(self)
self.writer = BinTreeNodeWriter(self)
self.readSize = 1;
self.buf = [];
self.maxBufRead = 0;
self.connected = 0
self.jid = ""
super(ConnectionEngine,self).__init__(socket.AF_INET, socket.SOCK_STREAM);
def getId(self):
return self.id
def setId(self, idx):
self.id = idx
def flush(self):
self.write();
def getBuffer(self):
return self.buffer;
def reset(self):
self.buffer = "";
def write(self,data):
if type(data) is int:
try:
self.sendall(chr(data));
except:
raise ConnectionClosedException("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 1 write crashed, reason: %s" % sys.exc_info()[1])
else:
tmp = "";
for d in data:
tmp += chr(d)
try:
self.sendall(tmp);
except:
raise ConnectionClosedException("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
self._d("socket 2 write crashed, reason: %s" % sys.exc_info()[1])
def setReadSize(self,size):
self.readSize = size;
def read(self, socketOnly = 0):
x = ""
try:
x = self.recv(self.readSize);
except:
raise ConnectionClosedException("socket read crashed, reason %s " % sys.exc_info()[1])
self._d("socket read crashed, reason %s " % sys.exc_info()[1])
if len(x) == 1:
return ord(x);
else:
raise ConnectionClosedException("Got 0 bytes, connection closed");
def read2(self,b,off,length):
if off < 0 or length < 0 or (off+length)>len(b):
raise Exception("Out of bounds");
if length == 0:
return 0;
if b is None:
raise Exception("XNull pointerX");
count = 0;
while count < length:
b[off+count]=self.read(0);
count= count+1;
return count;
| true | true |
f7147205a286f1365d68f46e5f1a2e5554c58b41 | 1,577 | py | Python | share/rpcauth/rpcauth.py | bitcointallkcoin/bitcointalkcoin | 1c4edf2a6397531581663a9d3110a53fee87ca0b | [
"MIT"
] | null | null | null | share/rpcauth/rpcauth.py | bitcointallkcoin/bitcointalkcoin | 1c4edf2a6397531581663a9d3110a53fee87ca0b | [
"MIT"
] | null | null | null | share/rpcauth/rpcauth.py | bitcointallkcoin/bitcointalkcoin | 1c4edf2a6397531581663a9d3110a53fee87ca0b | [
"MIT"
] | 1 | 2020-02-27T15:51:47.000Z | 2020-02-27T15:51:47.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Talkcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to talkcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 33.553191 | 134 | 0.714648 |
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
return hexlify(urandom(size)).decode()
def generate_password():
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to talkcoin.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| true | true |
f71472389ed45e2198b8808678490dfe4d7a408f | 787 | py | Python | script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/sklearn_like_toolkit/warpper/skClf_wrapper/skMultinomial_NBClf.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | from hyperopt import hp
from sklearn.naive_bayes import MultinomialNB as _skMultinomialNB
from script.sklearn_like_toolkit.warpper.base.BaseWrapperClf import BaseWrapperClf
from script.sklearn_like_toolkit.warpper.base.MixIn import MetaBaseWrapperClfWithABC
class skMultinomial_NBClf(BaseWrapperClf, _skMultinomialNB, metaclass=MetaBaseWrapperClfWithABC):
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
_skMultinomialNB.__init__(self, alpha, fit_prior, class_prior)
BaseWrapperClf.__init__(self)
HyperOpt_space = {
'alpha': hp.loguniform('alpha', -8, 1),
}
tuning_grid = {
'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0],
# 'class_prior': None,
# 'fit_prior': True
}
| 35.772727 | 98 | 0.70521 | from hyperopt import hp
from sklearn.naive_bayes import MultinomialNB as _skMultinomialNB
from script.sklearn_like_toolkit.warpper.base.BaseWrapperClf import BaseWrapperClf
from script.sklearn_like_toolkit.warpper.base.MixIn import MetaBaseWrapperClfWithABC
class skMultinomial_NBClf(BaseWrapperClf, _skMultinomialNB, metaclass=MetaBaseWrapperClfWithABC):
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
_skMultinomialNB.__init__(self, alpha, fit_prior, class_prior)
BaseWrapperClf.__init__(self)
HyperOpt_space = {
'alpha': hp.loguniform('alpha', -8, 1),
}
tuning_grid = {
'alpha': [0.00001, 0.0001, 0.001, 0.01, 0.1, 1.0, 10.0],
}
| true | true |
f71472b726fa211be0ca1ee4ef01ed7553c09623 | 8,432 | py | Python | pandas/tests/indexes/multi/test_formats.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:18.000Z | 2022-03-01T03:48:47.000Z | pandas/tests/indexes/multi/test_formats.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 1 | 2021-12-01T03:10:17.000Z | 2021-12-23T20:27:21.000Z | pandas/tests/indexes/multi/test_formats.py | umangino/pandas | c492672699110fe711b7f76ded5828ff24bce5ab | [
"BSD-3-Clause"
] | 2 | 2022-02-27T04:02:19.000Z | 2022-03-01T03:49:21.000Z | import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
def test_format(idx):
idx.format()
idx[:0].format()
def test_format_integer_names():
index = MultiIndex(
levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]
)
index.format(names=True)
def test_format_sparse_config(idx):
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
# GH1538
with pd.option_context("display.multi_sparse", False):
result = idx.format()
assert result[1] == "foo two"
warnings.filters = warn_filters
def test_format_sparse_display():
index = MultiIndex(
levels=[[0, 1], [0, 1], [0, 1], [0]],
codes=[
[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
],
)
result = index.format()
assert result[3] == "1 0 0 0"
def test_repr_with_unicode_data():
with pd.option_context("display.encoding", "UTF-8"):
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\" not in repr(index) # we don't want unicode-escaped
def test_repr_roundtrip_raises():
mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
eval(repr(mi))
def test_unicode_string_with_unicode():
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
str(idx)
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
class TestRepr:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self, narrow_multi_index):
mi = narrow_multi_index
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self, wide_multi_index):
mi = wide_multi_index
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
| 36.66087 | 95 | 0.468098 | import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
def test_format(idx):
idx.format()
idx[:0].format()
def test_format_integer_names():
index = MultiIndex(
levels=[[0, 1], [0, 1]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=[0, 1]
)
index.format(names=True)
def test_format_sparse_config(idx):
warn_filters = warnings.filters
warnings.filterwarnings("ignore", category=FutureWarning, module=".*format")
with pd.option_context("display.multi_sparse", False):
result = idx.format()
assert result[1] == "foo two"
warnings.filters = warn_filters
def test_format_sparse_display():
index = MultiIndex(
levels=[[0, 1], [0, 1], [0, 1], [0]],
codes=[
[0, 0, 0, 1, 1, 1],
[0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0],
],
)
result = index.format()
assert result[3] == "1 0 0 0"
def test_repr_with_unicode_data():
with pd.option_context("display.encoding", "UTF-8"):
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
index = pd.DataFrame(d).set_index(["a", "b"]).index
assert "\\" not in repr(index)
def test_repr_roundtrip_raises():
mi = MultiIndex.from_product([list("ab"), range(3)], names=["first", "second"])
msg = "Must pass both levels and codes"
with pytest.raises(TypeError, match=msg):
eval(repr(mi))
def test_unicode_string_with_unicode():
d = {"a": ["\u05d0", 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}
idx = pd.DataFrame(d).set_index(["a", "b"]).index
str(idx)
def test_repr_max_seq_item_setting(idx):
# GH10182
idx = idx.repeat(50)
with pd.option_context("display.max_seq_items", None):
repr(idx)
assert "..." not in str(idx)
class TestRepr:
def test_unicode_repr_issues(self):
levels = [Index(["a/\u03c3", "b/\u03c3", "c/\u03c3"]), Index([0, 1])]
codes = [np.arange(3).repeat(2), np.tile(np.arange(2), 3)]
index = MultiIndex(levels=levels, codes=codes)
repr(index.levels)
repr(index.get_level_values(1))
def test_repr_max_seq_items_equal_to_n(self, idx):
# display.max_seq_items == n
with pd.option_context("display.max_seq_items", 6):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
def test_repr(self, idx):
result = idx[:1].__repr__()
expected = """\
MultiIndex([('foo', 'one')],
names=['first', 'second'])"""
assert result == expected
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
('bar', 'one'),
('baz', 'two'),
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'])"""
assert result == expected
with pd.option_context("display.max_seq_items", 5):
result = idx.__repr__()
expected = """\
MultiIndex([('foo', 'one'),
('foo', 'two'),
...
('qux', 'one'),
('qux', 'two')],
names=['first', 'second'], length=6)"""
assert result == expected
# display.max_seq_items == 1
with pd.option_context("display.max_seq_items", 1):
result = idx.__repr__()
expected = """\
MultiIndex([...
('qux', 'two')],
names=['first', ...], length=6)"""
assert result == expected
def test_rjust(self, narrow_multi_index):
mi = narrow_multi_index
result = mi[:1].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi[::500].__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:08:20'),
('abc', 10, '2000-01-01 00:16:40'),
('abc', 10, '2000-01-01 00:25:00')],
names=['a', 'b', 'dti'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00'),
( 'a', 9, '2000-01-01 00:00:01'),
( 'a', 9, '2000-01-01 00:00:02'),
( 'a', 9, '2000-01-01 00:00:03'),
( 'a', 9, '2000-01-01 00:00:04'),
( 'a', 9, '2000-01-01 00:00:05'),
( 'a', 9, '2000-01-01 00:00:06'),
( 'a', 9, '2000-01-01 00:00:07'),
( 'a', 9, '2000-01-01 00:00:08'),
( 'a', 9, '2000-01-01 00:00:09'),
...
('abc', 10, '2000-01-01 00:33:10'),
('abc', 10, '2000-01-01 00:33:11'),
('abc', 10, '2000-01-01 00:33:12'),
('abc', 10, '2000-01-01 00:33:13'),
('abc', 10, '2000-01-01 00:33:14'),
('abc', 10, '2000-01-01 00:33:15'),
('abc', 10, '2000-01-01 00:33:16'),
('abc', 10, '2000-01-01 00:33:17'),
('abc', 10, '2000-01-01 00:33:18'),
('abc', 10, '2000-01-01 00:33:19')],
names=['a', 'b', 'dti'], length=2000)"""
assert result == expected
def test_tuple_width(self, wide_multi_index):
mi = wide_multi_index
result = mi[:1].__repr__()
expected = """MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi[:10].__repr__()
expected = """\
MultiIndex([('a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
('a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
('a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
('a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
('a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
('a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
('a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
('a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
('a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
('a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'])"""
assert result == expected
result = mi.__repr__()
expected = """\
MultiIndex([( 'a', 9, '2000-01-01 00:00:00', '2000-01-01 00:00:00', ...),
( 'a', 9, '2000-01-01 00:00:01', '2000-01-01 00:00:01', ...),
( 'a', 9, '2000-01-01 00:00:02', '2000-01-01 00:00:02', ...),
( 'a', 9, '2000-01-01 00:00:03', '2000-01-01 00:00:03', ...),
( 'a', 9, '2000-01-01 00:00:04', '2000-01-01 00:00:04', ...),
( 'a', 9, '2000-01-01 00:00:05', '2000-01-01 00:00:05', ...),
( 'a', 9, '2000-01-01 00:00:06', '2000-01-01 00:00:06', ...),
( 'a', 9, '2000-01-01 00:00:07', '2000-01-01 00:00:07', ...),
( 'a', 9, '2000-01-01 00:00:08', '2000-01-01 00:00:08', ...),
( 'a', 9, '2000-01-01 00:00:09', '2000-01-01 00:00:09', ...),
...
('abc', 10, '2000-01-01 00:33:10', '2000-01-01 00:33:10', ...),
('abc', 10, '2000-01-01 00:33:11', '2000-01-01 00:33:11', ...),
('abc', 10, '2000-01-01 00:33:12', '2000-01-01 00:33:12', ...),
('abc', 10, '2000-01-01 00:33:13', '2000-01-01 00:33:13', ...),
('abc', 10, '2000-01-01 00:33:14', '2000-01-01 00:33:14', ...),
('abc', 10, '2000-01-01 00:33:15', '2000-01-01 00:33:15', ...),
('abc', 10, '2000-01-01 00:33:16', '2000-01-01 00:33:16', ...),
('abc', 10, '2000-01-01 00:33:17', '2000-01-01 00:33:17', ...),
('abc', 10, '2000-01-01 00:33:18', '2000-01-01 00:33:18', ...),
('abc', 10, '2000-01-01 00:33:19', '2000-01-01 00:33:19', ...)],
names=['a', 'b', 'dti_1', 'dti_2', 'dti_3'], length=2000)"""
assert result == expected
| true | true |
f71474cfffbbc364e0e62d899c3954f135303af3 | 8,393 | py | Python | included_dependencies/cloudscraper/interpreters/native.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 82 | 2020-03-28T02:24:38.000Z | 2022-03-30T04:18:42.000Z | included_dependencies/cloudscraper/interpreters/native.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 118 | 2020-03-14T17:34:11.000Z | 2022-03-30T07:07:45.000Z | included_dependencies/cloudscraper/interpreters/native.py | AlexRiina/FanFicFare | 2cd6f53f766e74052c6ca7ab5c2eabff24f59742 | [
"Apache-2.0"
] | 30 | 2020-06-20T15:31:53.000Z | 2022-03-06T06:23:55.000Z | from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
# ------------------------------------------------------------------------------- #
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
# ------------------------------------------------------------------------------- #
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
# ------------------------------------------------------------------------------- #
def visit_Num(self, node):
return node.n
# ------------------------------------------------------------------------------- #
def visit_Expr(self, node):
return self.visit(node.value)
# ------------------------------------------------------------------------------- #
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
# ------------------------------------------------------------------------------- #
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
# ------------------------------------------------------------------------------- #
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
# ------------------------------------------------------------------------------- #
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
# ------------------------------------------------------------------------------- #
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
# ------------------------------------------------------------------------------- #
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
# ------------------------------------------------------------------------------- #
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
# ------------------------------------------------------------------------------- #
def jsfuckToNumber(jsFuck):
# "Clean Up" JSFuck
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
# Hackery Parser for Math
stack = []
bstack = []
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
# ------------------------------------------------------------------------------- #
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
# ------------------------------------------------------------------------------- #
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
# ------------------------------------------------------------------------------- #
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
# ------------------------------------------------------------------------------- #
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
# ------------------------------------------------------------------------------- #
# if not jsfuckChallenge['k'] and '+ t.length' in body:
# jschl_answer += len(domain)
# ------------------------------------------------------------------------------- #
return '{0:.10f}'.format(jschl_answer)
# ------------------------------------------------------------------------------- #
return challengeSolve(body, domain)
# ------------------------------------------------------------------------------- #
ChallengeInterpreter()
| 35.867521 | 124 | 0.398546 | from __future__ import absolute_import
import ast
import re
import operator as op
import pyparsing
from ..exceptions import CloudflareSolveError
from . import JavaScriptInterpreter
_OP_MAP = {
ast.Add: op.add,
ast.Sub: op.sub,
ast.Mult: op.mul,
ast.Div: op.truediv,
ast.Invert: op.neg,
}
class Calc(ast.NodeVisitor):
def visit_BinOp(self, node):
return _OP_MAP[type(node.op)](self.visit(node.left), self.visit(node.right))
def visit_Num(self, node):
return node.n
def visit_Expr(self, node):
return self.visit(node.value)
@classmethod
def doMath(cls, expression):
tree = ast.parse(expression)
calc = cls()
return calc.visit(tree.body[0])
class Parentheses(object):
def fix(self, s):
res = []
self.visited = set([s])
self.dfs(s, self.invalid(s), res)
return res
def dfs(self, s, n, res):
if n == 0:
res.append(s)
return
for i in range(len(s)):
if s[i] in ['(', ')']:
s_new = s[:i] + s[i + 1:]
if s_new not in self.visited and self.invalid(s_new) < n:
self.visited.add(s_new)
self.dfs(s_new, self.invalid(s_new), res)
def invalid(self, s):
plus = minus = 0
memo = {"(": 1, ")": -1}
for c in s:
plus += memo.get(c, 0)
minus += 1 if plus < 0 else 0
plus = max(0, plus)
return plus + minus
class ChallengeInterpreter(JavaScriptInterpreter):
def __init__(self):
super(ChallengeInterpreter, self).__init__('native')
def eval(self, body, domain):
operators = {
'+': op.add,
'-': op.sub,
'*': op.mul,
'/': op.truediv
}
def flatten(lists):
return sum(map(flatten, lists), []) if isinstance(lists, list) else [lists]
def jsfuckToNumber(jsFuck):
jsFuck = jsFuck.replace('!+[]', '1').replace('!![]', '1').replace('[]', '0')
jsFuck = jsFuck.lstrip('+').replace('(+', '(').replace(' ', '')
jsFuck = Parentheses().fix(jsFuck)[0]
stack = []
bstack = []
for i in flatten(pyparsing.nestedExpr().parseString(jsFuck).asList()):
if i == '+':
stack.append(bstack)
bstack = []
continue
bstack.append(i)
stack.append(bstack)
return int(''.join([str(Calc.doMath(''.join(i))) for i in stack]))
def divisorMath(payload, needle, domain):
jsfuckMath = payload.split('/')
if needle in jsfuckMath[1]:
expression = re.findall(r"^(.*?)(.)\(function", jsfuckMath[1])[0]
expression_value = operators[expression[1]](
float(jsfuckToNumber(expression[0])),
float(ord(domain[jsfuckToNumber(jsfuckMath[1][
jsfuckMath[1].find('"("+p+")")}') + len('"("+p+")")}'):-2
])]))
)
else:
expression_value = jsfuckToNumber(jsfuckMath[1])
expression_value = jsfuckToNumber(jsfuckMath[0]) / float(expression_value)
return expression_value
def challengeSolve(body, domain):
jschl_answer = 0
try:
jsfuckChallenge = re.search(
r"setTimeout\(function\(\){\s+var.*?f,\s*(?P<variable>\w+).*?:(?P<init>\S+)};"
r".*?\('challenge-form'\);.*?;(?P<challenge>.*?a\.value)\s*=\s*\S+\.toFixed\(10\);",
body,
re.DOTALL | re.MULTILINE
).groupdict()
except AttributeError:
raise CloudflareSolveError('There was an issue extracting "jsfuckChallenge" from the Cloudflare challenge.')
kJSFUCK = re.search(r'(;|)\s*k.=(?P<kJSFUCK>\S+);', jsfuckChallenge['challenge'], re.S | re.M)
if kJSFUCK:
try:
kJSFUCK = jsfuckToNumber(kJSFUCK.group('kJSFUCK'))
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kJSFUCK" from the Cloudflare challenge.')
try:
kID = re.search(r"\s*k\s*=\s*'(?P<kID>\S+)';", body).group('kID')
except IndexError:
raise CloudflareSolveError('There was an issue extracting "kID" from the Cloudflare challenge.')
try:
r = re.compile(r'<div id="{}(?P<id>\d+)">\s*(?P<jsfuck>[^<>]*)</div>'.format(kID))
kValues = {}
for m in r.finditer(body):
kValues[int(m.group('id'))] = m.group('jsfuck')
jsfuckChallenge['k'] = kValues[kJSFUCK]
except (AttributeError, IndexError):
raise CloudflareSolveError('There was an issue extracting "kValues" from the Cloudflare challenge.')
jsfuckChallenge['challenge'] = re.finditer(
r'{}.*?([+\-*/])=(.*?);(?=a\.value|{})'.format(
jsfuckChallenge['variable'],
jsfuckChallenge['variable']
),
jsfuckChallenge['challenge']
)
if '/' in jsfuckChallenge['init']:
val = jsfuckChallenge['init'].split('/')
jschl_answer = jsfuckToNumber(val[0]) / float(jsfuckToNumber(val[1]))
else:
jschl_answer = jsfuckToNumber(jsfuckChallenge['init'])
for expressionMatch in jsfuckChallenge['challenge']:
oper, expression = expressionMatch.groups()
if '/' in expression:
expression_value = divisorMath(expression, 'function(p)', domain)
else:
if 'Element' in expression:
expression_value = divisorMath(jsfuckChallenge['k'], '"("+p+")")}', domain)
else:
expression_value = jsfuckToNumber(expression)
jschl_answer = operators[oper](jschl_answer, expression_value)
return '{0:.10f}'.format(jschl_answer)
return challengeSolve(body, domain)
ChallengeInterpreter()
| true | true |
f714758643422dfaacbc46a0e387395c3f0c97c1 | 10,009 | py | Python | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py | StewartW/aws-deployment-framework | 7511241664c946ce3b045db211a4931b1dbaac6d | [
"Apache-2.0"
] | 1 | 2021-11-28T09:27:16.000Z | 2021-11-28T09:27:16.000Z | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py | StewartW/aws-deployment-framework | 7511241664c946ce3b045db211a4931b1dbaac6d | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/initial_commit/bootstrap_repository/adf-build/shared/schema_validation.py | StewartW/aws-deployment-framework | 7511241664c946ce3b045db211a4931b1dbaac6d | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
Schema Validation for Deployment map files
"""
from schema import Schema, And, Use, Or, Optional, Regex
from logger import configure_logger
LOGGER = configure_logger(__name__)
NOTIFICATION_PROPS = {
Optional("target"): str,
Optional("type") : Or("lambda", "chat_bot")
}
# Pipeline Params
PARAM_SCHEMA = {
Optional("notification_endpoint"): Or(str, NOTIFICATION_PROPS),
Optional("schedule"): str,
Optional("restart_execution_on_update"): bool,
Optional("pipeline_type", default="default"): Or("default"),
}
AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
And(
Or(int, str),
Use(str),
Regex(
AWS_ACCOUNT_ID_REGEX_STR,
error=(
"The specified account id is incorrect. "
"This typically happens when you specify the account id as a "
"number, while the account id starts with a zero. If this is "
"the case, please wrap the account id in quotes to make it a "
"string. An AWS Account Id is a number of 12 digits, which "
"should start with a zero if the Account Id has a zero at "
"the start too. "
"The number shown to not match the regular expression could "
"be interpreted as an octal number due to the leading zero. "
"Therefore, it might not match the account id as specified "
"in the deployment map."
)
)
)
)
# CodeCommit Source
CODECOMMIT_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
Optional("repository"): str,
Optional("branch"): str,
Optional("poll_for_changes"): bool,
Optional("owner"): str,
Optional("role"): str,
Optional("trigger_on_changes"): bool,
Optional("output_artifact_format", default=None): Or("CODEBUILD_CLONE_REF", "CODE_ZIP", None)
}
CODECOMMIT_SOURCE = {
"provider": 'codecommit',
"properties": CODECOMMIT_SOURCE_PROPS
}
# GitHub Source
GITHUB_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"oauth_token_path": str,
"json_field": str,
Optional("trigger_on_changes"): bool,
}
GITHUB_SOURCE = {
"provider": 'github',
"properties": GITHUB_SOURCE_PROPS
}
# CodeStar Source
CODESTAR_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"codestar_connection_path": str
}
CODESTAR_SOURCE = {
"provider": 'codestar',
"properties": CODESTAR_SOURCE_PROPS
}
# S3 Source
S3_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
"bucket_name": str,
"object_key": str,
Optional("trigger_on_changes"): bool,
}
S3_SOURCE = {
"provider": 's3',
"properties": S3_SOURCE_PROPS
}
# CodeBuild
CODEBUILD_IMAGE_PROPS = {
"repository_arn": str, # arn:aws:ecr:region:012345678910:repository/test
Optional("tag"): str, # defaults to latest
}
CODEBUILD_PROPS = {
Optional("image"): Or(str, CODEBUILD_IMAGE_PROPS),
Optional("size"): Or('small', 'medium', 'large'),
Optional("spec_filename"): str,
Optional("environment_variables"): {Optional(str): Or(str, bool, int, object)},
Optional("role"): str,
Optional("timeout"): int,
Optional("privileged"): bool,
Optional("spec_inline"): object,
}
DEFAULT_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("enabled"): bool,
Optional("properties"): CODEBUILD_PROPS
}
STAGE_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("properties"): CODEBUILD_PROPS
}
# Jenkins
JENKINS_PROPS = {
Optional("project_name"): str,
Optional("server_url"): str,
Optional("provider_name"): str
}
JENKINS_BUILD = {
Optional("provider"): 'jenkins',
Optional("enabled"): bool,
Optional("properties"): JENKINS_PROPS
}
# CloudFormation
PARAM_OVERRIDE_SCHEMA = {
"inputs": str,
"param": str,
"key_name": str
}
CLOUDFORMATION_ACTIONS = Or(
'CHANGE_SET_EXECUTE',
'CHANGE_SET_REPLACE',
'CREATE_UPDATE',
'DELETE_ONLY',
'REPLACE_ON_FAILURE',
'change_set_execute',
'change_set_replace',
'create_update',
'delete_only',
'replace_on_failure'
)
CLOUDFORMATION_PROPS = {
Optional("stack_name"): str,
Optional("template_filename"): str,
Optional("root_dir"): str,
Optional("role"): str,
Optional("action"): CLOUDFORMATION_ACTIONS,
Optional("outputs"): str,
Optional("change_set_approval"): bool,
Optional("param_overrides"): [PARAM_OVERRIDE_SCHEMA]
}
# No need for a stage schema since CFN takes all optional props
DEFAULT_CLOUDFORMATION_DEPLOY = {
"provider": 'cloudformation',
Optional("properties"): CLOUDFORMATION_PROPS
}
# CodeDeploy
CODEDEPLOY_PROPS = {
"application_name": str,
"deployment_group_name": str,
Optional("role"): str
}
STAGE_CODEDEPLOY_DEPLOY = {
Optional("provider"): 'codedeploy',
"properties": CODEDEPLOY_PROPS
}
DEFAULT_CODEDEPLOY_DEPLOY = {
"provider": 'codedeploy',
Optional("properties"): CODEDEPLOY_PROPS
}
# S3
S3_DEPLOY_PROPS = {
"bucket_name": str,
"object_key": str,
Optional("extract"): bool,
Optional("role"): str
}
STAGE_S3_DEPLOY = {
Optional("provider"): 's3',
"properties": S3_DEPLOY_PROPS
}
DEFAULT_S3_DEPLOY = {
"provider": 's3',
Optional("properties"): S3_DEPLOY_PROPS
}
# Service Catalog
SERVICECATALOG_PROPS = {
"product_id": str,
Optional("configuration_file_path"): str
}
STAGE_SERVICECATALOG_DEPLOY = {
Optional("provider"): 'service_catalog',
"properties": SERVICECATALOG_PROPS
}
DEFAULT_SERVICECATALOG_DEPLOY = {
"provider": 'service_catalog',
Optional("properties"): SERVICECATALOG_PROPS
}
# Lambda
LAMBDA_PROPS = {
"function_name": str,
Optional("input"): Or(str, object),
Optional("role"): str
}
STAGE_LAMBDA_INVOKE = {
Optional("provider"): 'lambda',
"properties": LAMBDA_PROPS
}
DEFAULT_LAMBDA_INVOKE = {
"provider": 'lambda',
Optional("properties"): LAMBDA_PROPS
}
# Approval
APPROVAL_PROPS = {
Optional("message"): str,
Optional("notification_endpoint"): str,
Optional("sns_topic_arn"): str
}
DEFAULT_APPROVAL = {
"provider": 'approval',
"properties": APPROVAL_PROPS
}
# Core Schema
PROVIDER_SOURCE_SCHEMAS = {
'codecommit': Schema(CODECOMMIT_SOURCE),
'github': Schema(GITHUB_SOURCE),
's3': Schema(S3_SOURCE),
'codestar': Schema(CODESTAR_SOURCE),
}
PROVIDER_BUILD_SCHEMAS = {
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
'jenkins': Schema(JENKINS_BUILD),
}
PROVIDER_DEPLOY_SCHEMAS = {
'cloudformation': Schema(DEFAULT_CLOUDFORMATION_DEPLOY),
's3': Schema(DEFAULT_S3_DEPLOY),
'codedeploy': Schema(DEFAULT_CODEDEPLOY_DEPLOY),
'lambda': Schema(DEFAULT_LAMBDA_INVOKE),
'service_catalog': Schema(DEFAULT_SERVICECATALOG_DEPLOY),
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
}
PROVIDER_SCHEMA = {
'source': And(
{
'provider': Or('codecommit', 'github', 's3', 'codestar'),
'properties': dict,
},
lambda x: PROVIDER_SOURCE_SCHEMAS[x['provider']].validate(x), #pylint: disable=W0108
),
Optional('build'): And(
{
Optional('provider'): Or('codebuild', 'jenkins'),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_BUILD_SCHEMAS[x.get('provider', 'codebuild')].validate(x), #pylint: disable=W0108
),
Optional('deploy'): And(
{
'provider': Or(
'cloudformation', 's3', 'codedeploy', 'lambda',
'service_catalog', 'codebuild'
),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_DEPLOY_SCHEMAS[x['provider']].validate(x), #pylint: disable=W0108
),
}
REGION_SCHEMA = Or(
str,
list
)
TARGET_LIST_SCHEMA = [Or(
str,
int
)]
TARGET_WAVE_SCHEME = {
Optional("size", default=50): int,
}
# Pipeline Params
TARGET_SCHEMA = {
Optional("path"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("tags"): {And(str, Regex(r"\A.{1,128}\Z")): And(str, Regex(r"\A.{0,256}\Z"))},
Optional("target"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("name"): str,
Optional("provider"): Or('lambda', 's3', 'codedeploy', 'cloudformation', 'service_catalog', 'approval', 'codebuild', 'jenkins'),
Optional("properties"): Or(CODEBUILD_PROPS, JENKINS_PROPS, CLOUDFORMATION_PROPS, CODEDEPLOY_PROPS, S3_DEPLOY_PROPS, SERVICECATALOG_PROPS, LAMBDA_PROPS, APPROVAL_PROPS),
Optional("regions"): REGION_SCHEMA,
Optional("exclude", default=[]): [str],
Optional("wave", default={"size": 50}): TARGET_WAVE_SCHEME
}
COMPLETION_TRIGGERS_SCHEMA = {
"pipelines": [str]
}
PIPELINE_TRIGGERS_SCHEMA = {
Optional("code_artifact"): {
"repository": str,
Optional("package"): str,
}
}
TRIGGERS_SCHEMA = {
Optional("on_complete"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggered_by"): [PIPELINE_TRIGGERS_SCHEMA],
}
PIPELINE_SCHEMA = {
"name": And(str, len),
"default_providers": PROVIDER_SCHEMA,
Optional("params"): PARAM_SCHEMA,
Optional("tags"): dict,
Optional("targets"): [Or(str, int, TARGET_SCHEMA, TARGET_LIST_SCHEMA)],
Optional("regions"): REGION_SCHEMA,
Optional("completion_trigger"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggers"): TRIGGERS_SCHEMA
}
TOP_LEVEL_SCHEMA = {
"pipelines": [PIPELINE_SCHEMA],
# Allow any toplevel key starting with "x-" or "x_".
# ADF will ignore these, but users can use them to define anchors in one place.
Optional(Regex('^[x][-_].*')): object
}
class SchemaValidation:
def __init__(self, map_input: dict):
self.validated = Schema(TOP_LEVEL_SCHEMA).validate(map_input)
| 28.194366 | 172 | 0.654911 |
from schema import Schema, And, Use, Or, Optional, Regex
from logger import configure_logger
LOGGER = configure_logger(__name__)
NOTIFICATION_PROPS = {
Optional("target"): str,
Optional("type") : Or("lambda", "chat_bot")
}
PARAM_SCHEMA = {
Optional("notification_endpoint"): Or(str, NOTIFICATION_PROPS),
Optional("schedule"): str,
Optional("restart_execution_on_update"): bool,
Optional("pipeline_type", default="default"): Or("default"),
}
AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
And(
Or(int, str),
Use(str),
Regex(
AWS_ACCOUNT_ID_REGEX_STR,
error=(
"The specified account id is incorrect. "
"This typically happens when you specify the account id as a "
"number, while the account id starts with a zero. If this is "
"the case, please wrap the account id in quotes to make it a "
"string. An AWS Account Id is a number of 12 digits, which "
"should start with a zero if the Account Id has a zero at "
"the start too. "
"The number shown to not match the regular expression could "
"be interpreted as an octal number due to the leading zero. "
"Therefore, it might not match the account id as specified "
"in the deployment map."
)
)
)
)
CODECOMMIT_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
Optional("repository"): str,
Optional("branch"): str,
Optional("poll_for_changes"): bool,
Optional("owner"): str,
Optional("role"): str,
Optional("trigger_on_changes"): bool,
Optional("output_artifact_format", default=None): Or("CODEBUILD_CLONE_REF", "CODE_ZIP", None)
}
CODECOMMIT_SOURCE = {
"provider": 'codecommit',
"properties": CODECOMMIT_SOURCE_PROPS
}
GITHUB_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"oauth_token_path": str,
"json_field": str,
Optional("trigger_on_changes"): bool,
}
GITHUB_SOURCE = {
"provider": 'github',
"properties": GITHUB_SOURCE_PROPS
}
CODESTAR_SOURCE_PROPS = {
Optional("repository"): str,
Optional("branch"): str,
"owner": str,
"codestar_connection_path": str
}
CODESTAR_SOURCE = {
"provider": 'codestar',
"properties": CODESTAR_SOURCE_PROPS
}
S3_SOURCE_PROPS = {
"account_id": AWS_ACCOUNT_ID_SCHEMA,
"bucket_name": str,
"object_key": str,
Optional("trigger_on_changes"): bool,
}
S3_SOURCE = {
"provider": 's3',
"properties": S3_SOURCE_PROPS
}
CODEBUILD_IMAGE_PROPS = {
"repository_arn": str,
Optional("tag"): str,
}
CODEBUILD_PROPS = {
Optional("image"): Or(str, CODEBUILD_IMAGE_PROPS),
Optional("size"): Or('small', 'medium', 'large'),
Optional("spec_filename"): str,
Optional("environment_variables"): {Optional(str): Or(str, bool, int, object)},
Optional("role"): str,
Optional("timeout"): int,
Optional("privileged"): bool,
Optional("spec_inline"): object,
}
DEFAULT_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("enabled"): bool,
Optional("properties"): CODEBUILD_PROPS
}
STAGE_CODEBUILD_BUILD = {
Optional("provider"): 'codebuild',
Optional("properties"): CODEBUILD_PROPS
}
JENKINS_PROPS = {
Optional("project_name"): str,
Optional("server_url"): str,
Optional("provider_name"): str
}
JENKINS_BUILD = {
Optional("provider"): 'jenkins',
Optional("enabled"): bool,
Optional("properties"): JENKINS_PROPS
}
PARAM_OVERRIDE_SCHEMA = {
"inputs": str,
"param": str,
"key_name": str
}
CLOUDFORMATION_ACTIONS = Or(
'CHANGE_SET_EXECUTE',
'CHANGE_SET_REPLACE',
'CREATE_UPDATE',
'DELETE_ONLY',
'REPLACE_ON_FAILURE',
'change_set_execute',
'change_set_replace',
'create_update',
'delete_only',
'replace_on_failure'
)
CLOUDFORMATION_PROPS = {
Optional("stack_name"): str,
Optional("template_filename"): str,
Optional("root_dir"): str,
Optional("role"): str,
Optional("action"): CLOUDFORMATION_ACTIONS,
Optional("outputs"): str,
Optional("change_set_approval"): bool,
Optional("param_overrides"): [PARAM_OVERRIDE_SCHEMA]
}
DEFAULT_CLOUDFORMATION_DEPLOY = {
"provider": 'cloudformation',
Optional("properties"): CLOUDFORMATION_PROPS
}
CODEDEPLOY_PROPS = {
"application_name": str,
"deployment_group_name": str,
Optional("role"): str
}
STAGE_CODEDEPLOY_DEPLOY = {
Optional("provider"): 'codedeploy',
"properties": CODEDEPLOY_PROPS
}
DEFAULT_CODEDEPLOY_DEPLOY = {
"provider": 'codedeploy',
Optional("properties"): CODEDEPLOY_PROPS
}
S3_DEPLOY_PROPS = {
"bucket_name": str,
"object_key": str,
Optional("extract"): bool,
Optional("role"): str
}
STAGE_S3_DEPLOY = {
Optional("provider"): 's3',
"properties": S3_DEPLOY_PROPS
}
DEFAULT_S3_DEPLOY = {
"provider": 's3',
Optional("properties"): S3_DEPLOY_PROPS
}
SERVICECATALOG_PROPS = {
"product_id": str,
Optional("configuration_file_path"): str
}
STAGE_SERVICECATALOG_DEPLOY = {
Optional("provider"): 'service_catalog',
"properties": SERVICECATALOG_PROPS
}
DEFAULT_SERVICECATALOG_DEPLOY = {
"provider": 'service_catalog',
Optional("properties"): SERVICECATALOG_PROPS
}
LAMBDA_PROPS = {
"function_name": str,
Optional("input"): Or(str, object),
Optional("role"): str
}
STAGE_LAMBDA_INVOKE = {
Optional("provider"): 'lambda',
"properties": LAMBDA_PROPS
}
DEFAULT_LAMBDA_INVOKE = {
"provider": 'lambda',
Optional("properties"): LAMBDA_PROPS
}
APPROVAL_PROPS = {
Optional("message"): str,
Optional("notification_endpoint"): str,
Optional("sns_topic_arn"): str
}
DEFAULT_APPROVAL = {
"provider": 'approval',
"properties": APPROVAL_PROPS
}
PROVIDER_SOURCE_SCHEMAS = {
'codecommit': Schema(CODECOMMIT_SOURCE),
'github': Schema(GITHUB_SOURCE),
's3': Schema(S3_SOURCE),
'codestar': Schema(CODESTAR_SOURCE),
}
PROVIDER_BUILD_SCHEMAS = {
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
'jenkins': Schema(JENKINS_BUILD),
}
PROVIDER_DEPLOY_SCHEMAS = {
'cloudformation': Schema(DEFAULT_CLOUDFORMATION_DEPLOY),
's3': Schema(DEFAULT_S3_DEPLOY),
'codedeploy': Schema(DEFAULT_CODEDEPLOY_DEPLOY),
'lambda': Schema(DEFAULT_LAMBDA_INVOKE),
'service_catalog': Schema(DEFAULT_SERVICECATALOG_DEPLOY),
'codebuild': Schema(DEFAULT_CODEBUILD_BUILD),
}
PROVIDER_SCHEMA = {
'source': And(
{
'provider': Or('codecommit', 'github', 's3', 'codestar'),
'properties': dict,
},
lambda x: PROVIDER_SOURCE_SCHEMAS[x['provider']].validate(x),
),
Optional('build'): And(
{
Optional('provider'): Or('codebuild', 'jenkins'),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_BUILD_SCHEMAS[x.get('provider', 'codebuild')].validate(x),
),
Optional('deploy'): And(
{
'provider': Or(
'cloudformation', 's3', 'codedeploy', 'lambda',
'service_catalog', 'codebuild'
),
Optional('enabled'): bool,
Optional('properties'): dict,
},
lambda x: PROVIDER_DEPLOY_SCHEMAS[x['provider']].validate(x),
),
}
REGION_SCHEMA = Or(
str,
list
)
TARGET_LIST_SCHEMA = [Or(
str,
int
)]
TARGET_WAVE_SCHEME = {
Optional("size", default=50): int,
}
TARGET_SCHEMA = {
Optional("path"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("tags"): {And(str, Regex(r"\A.{1,128}\Z")): And(str, Regex(r"\A.{0,256}\Z"))},
Optional("target"): Or(str, int, TARGET_LIST_SCHEMA),
Optional("name"): str,
Optional("provider"): Or('lambda', 's3', 'codedeploy', 'cloudformation', 'service_catalog', 'approval', 'codebuild', 'jenkins'),
Optional("properties"): Or(CODEBUILD_PROPS, JENKINS_PROPS, CLOUDFORMATION_PROPS, CODEDEPLOY_PROPS, S3_DEPLOY_PROPS, SERVICECATALOG_PROPS, LAMBDA_PROPS, APPROVAL_PROPS),
Optional("regions"): REGION_SCHEMA,
Optional("exclude", default=[]): [str],
Optional("wave", default={"size": 50}): TARGET_WAVE_SCHEME
}
COMPLETION_TRIGGERS_SCHEMA = {
"pipelines": [str]
}
PIPELINE_TRIGGERS_SCHEMA = {
Optional("code_artifact"): {
"repository": str,
Optional("package"): str,
}
}
TRIGGERS_SCHEMA = {
Optional("on_complete"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggered_by"): [PIPELINE_TRIGGERS_SCHEMA],
}
PIPELINE_SCHEMA = {
"name": And(str, len),
"default_providers": PROVIDER_SCHEMA,
Optional("params"): PARAM_SCHEMA,
Optional("tags"): dict,
Optional("targets"): [Or(str, int, TARGET_SCHEMA, TARGET_LIST_SCHEMA)],
Optional("regions"): REGION_SCHEMA,
Optional("completion_trigger"): COMPLETION_TRIGGERS_SCHEMA,
Optional("triggers"): TRIGGERS_SCHEMA
}
TOP_LEVEL_SCHEMA = {
"pipelines": [PIPELINE_SCHEMA],
Optional(Regex('^[x][-_].*')): object
}
class SchemaValidation:
def __init__(self, map_input: dict):
self.validated = Schema(TOP_LEVEL_SCHEMA).validate(map_input)
| true | true |
f71475d212b144b5142445b5a0db415640369fb9 | 23 | py | Python | instance/config.py | randilfernando/bot | a193c557a9ce3d9bc9d542e29e50f3077ba716df | [
"MIT"
] | null | null | null | instance/config.py | randilfernando/bot | a193c557a9ce3d9bc9d542e29e50f3077ba716df | [
"MIT"
] | 3 | 2020-04-15T16:06:19.000Z | 2020-04-15T16:07:49.000Z | instance/config.py | randilfernando/bot | a193c557a9ce3d9bc9d542e29e50f3077ba716df | [
"MIT"
] | 1 | 2021-01-27T10:32:47.000Z | 2021-01-27T10:32:47.000Z | INTENT_THRESHOLD = 0.5
| 11.5 | 22 | 0.782609 | INTENT_THRESHOLD = 0.5
| true | true |
f7147793a2e6c2dd68fdd7d5efb9db0e5d179701 | 14,417 | py | Python | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
@package mi.dataset.parser A collection of parsers that strip data blocks
out of files and feed them into the system.
@file mi/dataset/parser.py
@author Steve Foley
@brief Base classes for data set agent parsers
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
""" abstract class to show API needed for plugin poller objects """
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
# Build class from module and class name, then set the state
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
# if there is more than one particle class for this parser, this cannot be used, need to hard code the
# particle class in the driver
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
"""
Returns a list of particles (following the instrument driver structure).
"""
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
"""
Publish the samples with the given publishing callback.
@param samples The list of data particle to publish up to the system
"""
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
"""
Extract sample from a response line if present and publish
parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample if regex
is none then process every line
@param raw_data data to input into this particle.
@retval return a raw particle if a sample was found, else None
"""
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
# need to actually parse the particle fields to find out of there are errors
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
"""
This class loads data values into a record buffer, then offers up
records from this buffer as they are requested. Parsers dont have
to operate this way, but it can keep memory in check and smooth out
stream inputs if they dont all come at once.
"""
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
"""
Go ahead and execute the data parsing loop up to a point. This involves
getting data from the file, stuffing it in to the chunker, then parsing
it and publishing.
@param num_records The number of records to gather
@retval Return the list of particles requested, [] if none available
"""
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
"""
Confirm that the chunker does not have any extra bytes left at the end of the file
"""
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk)
def _yank_particles(self, num_records):
"""
Get particles out of the buffer and publish them. Update the state
of what has been published, too.
@param num_records The number of particles to remove from the buffer
@retval A list with num_records elements from the buffer. If num_records
cannot be collected (perhaps due to an EOF), the list will have the
elements it was able to collect.
"""
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1] # state side of tuple of last entry
# strip the state info off of them now that we have what we need
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
# file has been read completely and all records pulled out of the record buffer
file_ingested = True
self._state_callback(self._state, file_ingested) # push new state to driver
return return_list
def _load_particle_buffer(self):
"""
Load up the internal record buffer with some particles based on a
gather from the get_block method.
"""
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
"""
Get a block of characters for processing
@param size The size of the block to try to read
@retval The length of data retreived
@throws EOFError when the end of the file is reached
"""
# read in some more data
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else: # EOF
self.file_complete = True
raise EOFError
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state (ie "(sample, state)"). An empty list of
nothing was parsed.
"""
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
"""
Initialize the simple parser, which does not use state or the chunker
and sieve functions.
@param config: The parser configuration dictionary
@param stream_handle: The stream handle of the file to parse
@param exception_callback: The callback to use when an exception occurs
"""
# the record buffer which will store all parsed particles
self._record_buffer = []
# a flag indicating if the file has been parsed or not
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None, # state not used
None, # sieve_fn not used
None, # state_callback not used
None, # publish_callback not used
exception_callback)
def parse_file(self):
"""
This method must be overridden. This method should open and read the file and parser the data within, and at
the end of this method self._record_buffer will be filled with all the particles in the file.
"""
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
"""
Initiate parsing the file if it has not been done already, and pop particles off the record buffer to
return as many as requested if they are available in the buffer.
@param number_requested the number of records requested to be returned
@return an array of particles, with a length of the number requested or less
"""
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| 43.820669 | 119 | 0.634667 |
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk)
def _yank_particles(self, num_records):
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1]
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
file_ingested = True
self._state_callback(self._state, file_ingested)
return return_list
def _load_particle_buffer(self):
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else:
self.file_complete = True
raise EOFError
def parse_chunks(self):
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
self._record_buffer = []
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None,
None,
None,
None,
exception_callback)
def parse_file(self):
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| true | true |
f7147794bdcdc9985403994d39b45297ef13f697 | 4,281 | py | Python | python/algo_mdo_tit_for_2_tat.py | Mark-MDO47/PrisonDilemmaTourney | 8be7127c2c8b506429031dc1b9a2e441370307f4 | [
"Unlicense"
] | null | null | null | python/algo_mdo_tit_for_2_tat.py | Mark-MDO47/PrisonDilemmaTourney | 8be7127c2c8b506429031dc1b9a2e441370307f4 | [
"Unlicense"
] | null | null | null | python/algo_mdo_tit_for_2_tat.py | Mark-MDO47/PrisonDilemmaTourney | 8be7127c2c8b506429031dc1b9a2e441370307f4 | [
"Unlicense"
] | null | null | null | # Author: Mark Olson 2021-11-06 https://github.com/Mark-MDO47/PrisonDilemmaTourney
#
# algo_mdo_tit_for_2_tat.py - Prisoner's Dilemma tournament algorithm file
#
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT within the last two moves, it returns choices.DEFECT this move
# else it returns choices.COOPERATE this move
#
# For an algorithm python routine in a file (i.e. with filename algo_mdo_something.py), the calling sequence is
# algo_mdo_something(selfHist, oppHist, ID))
# I recommend adding your initials (mine are mdo) to your file/algorithm name so we don't have name collisions
# NOTE that the function name is the same as the python filename with the *.py removed
# This template file is named algorithm_template.py so the function name is algorithm_template
# Each call to the algorithm will have the following for parameters:
# list of history all the choices made by both parties in reverse order (latest choice before this is [0], prev [1])
# Thus the opponent choice made in previous move, assuming this isn't the first move, is oppChoices[0].
# if len(oppChoices) > 0, there was at least one prior move.
# note: len(oppChoices) should be identical to len(myChoices)
# value of each entry in xxxHist is one of choices.DEFECT or choices.COOPERATE
#
# The algorithm will return
# choices.DEFECT or choices.COOPERATE
#
# See https://en.wikipedia.org/wiki/Prisoner%27s_dilemma
# See https://cs.stanford.edu/people/eroberts/courses/soco/projects/1998-99/game-theory/axelrod.html
#
# Merrill Flood and Melvin Dresher from RAND corporation framed the concept in 1950 to show why two completely rational
# individuals might not cooperate, even if it appears that it is in their best interests to do so.
#
# There are many scenarios that can be mapped to this concept, but the famous mapping by Albert W. Tucker called the
# "Prisoner's Dilemma" revolves around two prisoners, "A" and "B", guilty of the same crime and being held in
# separate interrogation rooms.
#
# Due to weak evidence held by the police, if both cooperate (do not betray the other), that will lead to a small sentence
# for each of them. If one cooperates and the other defects, the defector gets off free and the cooperator gets a
# large sentence. If they both defect, they both get an intermediate sentence.
# (spoiler alert) If the game is played exactly one time, the game-theory best choice for each player is to
# defect (or betray the other player).
#
# Robert Axelrod, professor of political science at the University of Michigan, held a tournament of competing
# strategies for the famous Prisoner's Dilemma in 1980.
#
# He had the insight that if the game is played many times in succession, then the history of play allows each player
# to take into account the "reputation" of the other player in making their choice of behavior.
# He invited some game theorists to submit algorithms that would be competed against each other in a computer tournament.
# Later he held another tournament and invited anyone to submit algorithms.
# The "Tit-For-Tat" algorithm seemed to do the best.
import sys
import PrisonersDilemmaTournament as choices # pick up choices.DEFECT and choices.COOPERATE
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT in the last two moves, we return choices.DEFECT this move
# else we return choices.COOPERATE this move
#
# note: the function name should be exactly the same as the filename but without the ".py"
# note: len(selfHist) and len(oppHist) should always be the same
#
def algo_mdo_tit_for_2_tat(selfHist, oppHist, ID):
if len(selfHist) <= 1: # first two moves
return choices.COOPERATE
else:
if (choices.DEFECT == oppHist[1]) or (choices.DEFECT == oppHist[0]):
return choices.DEFECT
else:
return oppHist[0]
if __name__ == "__main__":
sys.stderr.write("ERROR - algo_mdo_tit_for_2_tat.py is not intended to be run stand-alone\n")
exit(-1)
| 57.08 | 122 | 0.749825 |
#
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT within the last two moves, it returns choices.DEFECT this move
# else it returns choices.COOPERATE this move
#
# For an algorithm python routine in a file (i.e. with filename algo_mdo_something.py), the calling sequence is
# algo_mdo_something(selfHist, oppHist, ID))
# I recommend adding your initials (mine are mdo) to your file/algorithm name so we don't have name collisions
# if len(oppChoices) > 0, there was at least one prior move.
# note: len(oppChoices) should be identical to len(myChoices)
# value of each entry in xxxHist is one of choices.DEFECT or choices.COOPERATE
#
# The algorithm will return
# choices.DEFECT or choices.COOPERATE
#
# See https://en.wikipedia.org/wiki/Prisoner%27s_dilemma
# See https://cs.stanford.edu/people/eroberts/courses/soco/projects/1998-99/game-theory/axelrod.html
#
# Merrill Flood and Melvin Dresher from RAND corporation framed the concept in 1950 to show why two completely rational
# individuals might not cooperate, even if it appears that it is in their best interests to do so.
#
# There are many scenarios that can be mapped to this concept, but the famous mapping by Albert W. Tucker called the
# "Prisoner's Dilemma" revolves around two prisoners, "A" and "B", guilty of the same crime and being held in
#
# He had the insight that if the game is played many times in succession, then the history of play allows each player
# to take into account the "reputation" of the other player in making their choice of behavior.
# He invited some game theorists to submit algorithms that would be competed against each other in a computer tournament.
# Later he held another tournament and invited anyone to submit algorithms.
# The "Tit-For-Tat" algorithm seemed to do the best.
import sys
import PrisonersDilemmaTournament as choices # pick up choices.DEFECT and choices.COOPERATE
# The algo_mdo_tit_for_2_tat algorithm behaves as follows:
# On the first two moves it returns choices.COOPERATE
# On all subsequent moves:
# if the opponent did choices.DEFECT in the last two moves, we return choices.DEFECT this move
# else we return choices.COOPERATE this move
#
# note: the function name should be exactly the same as the filename but without the ".py"
# note: len(selfHist) and len(oppHist) should always be the same
#
def algo_mdo_tit_for_2_tat(selfHist, oppHist, ID):
if len(selfHist) <= 1: # first two moves
return choices.COOPERATE
else:
if (choices.DEFECT == oppHist[1]) or (choices.DEFECT == oppHist[0]):
return choices.DEFECT
else:
return oppHist[0]
if __name__ == "__main__":
sys.stderr.write("ERROR - algo_mdo_tit_for_2_tat.py is not intended to be run stand-alone\n")
exit(-1)
| true | true |
f714788157a6abe32fba2b5d294f2ebd1935e271 | 93 | py | Python | app/config.py | SilentFan/EvenThing | b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876 | [
"MIT"
] | null | null | null | app/config.py | SilentFan/EvenThing | b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876 | [
"MIT"
] | 1 | 2015-10-23T14:43:09.000Z | 2015-10-23T14:43:09.000Z | app/config.py | SilentFan/EvenThing | b32054a3c9b53d8dfbdc6a808ff9d00d88d1c876 | [
"MIT"
] | null | null | null | __author__ = 'meli'
HOST = '0.0.0.0'
STATIC_PATH = "../static"
TEPLATE_PATH = "../templates" | 18.6 | 29 | 0.645161 | __author__ = 'meli'
HOST = '0.0.0.0'
STATIC_PATH = "../static"
TEPLATE_PATH = "../templates" | true | true |
f7147b09abce445b07986e1ce4221073dcb14461 | 2,384 | py | Python | dynd/nd/test/test_functional.py | mwiebe/dynd-python | 45ffecaf7887761a5634140f0ed120b33ace58a3 | [
"BSD-2-Clause"
] | 93 | 2015-01-29T14:00:57.000Z | 2021-11-23T14:37:27.000Z | dynd/nd/test/test_functional.py | ContinuumIO/dynd-python | bae7afb8eb604b0bce09befc9e896c8ec8357aaa | [
"BSD-2-Clause"
] | 143 | 2015-01-04T12:30:24.000Z | 2016-09-29T18:36:22.000Z | dynd/nd/test/test_functional.py | ContinuumIO/dynd-python | bae7afb8eb604b0bce09befc9e896c8ec8357aaa | [
"BSD-2-Clause"
] | 20 | 2015-06-08T11:54:46.000Z | 2021-03-09T07:57:25.000Z | import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from dynd import annotate, nd, ndt
@unittest.skip('Test disabled since callables were reworked')
class TestApply(unittest.TestCase):
def test_object(self):
@nd.functional.apply(jit = False)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
@nd.functional.apply(jit = False)
@annotate(ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.scalar))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@nd.functional.apply(jit = False)
@annotate(ndt.int32, ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.int32))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
"""
def test_numba(self):
try:
import numba
except ImportError as error:
raise unittest.SkipTest(error)
@nd.functional.apply(jit = True)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
self.assertEqual(0, f(0))
@nd.functional.apply(jit = True)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
self.assertEqual(0, f(0))
"""
@unittest.skip('Test disabled since callables were reworked')
class TestElwise(unittest.TestCase):
def test_unary(self):
@nd.functional.elwise
@annotate(ndt.int32)
def f(x):
return 2 * x
# self.assertEqual(nd.array([2, 4, 6]), f([1, 2, 3]))
@unittest.skip('Test disabled since callables were reworked')
class TestReduction(unittest.TestCase):
def test_unary(self):
@nd.functional.reduction
@annotate(ndt.int32)
def f(x, y):
return max(x, y)
self.assertEqual(3, f([1, 2, 3]))
self.assertEqual(6, f([[1, 2, 3], [4, 5, 6]]))
"""
def multigen(func):
return lambda x: x
class TestMultidispatch(unittest.TestCase):
def test_unary(self):
@nd.functional.multidispatch()
def callables():
yield 5
print callables(3)
"""
if __name__ == '__main__':
unittest.main(verbosity=2)
| 25.361702 | 70 | 0.588926 | import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
from dynd import annotate, nd, ndt
@unittest.skip('Test disabled since callables were reworked')
class TestApply(unittest.TestCase):
def test_object(self):
@nd.functional.apply(jit = False)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.scalar, ndt.scalar))
@nd.functional.apply(jit = False)
@annotate(ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.scalar))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@nd.functional.apply(jit = False)
@annotate(ndt.int32, ndt.int32)
def f(x):
return x
self.assertEqual(f.type, ndt.callable(ndt.int32, ndt.int32))
self.assertEqual(0, f(0))
self.assertEqual(1, f(1))
@unittest.skip('Test disabled since callables were reworked')
class TestElwise(unittest.TestCase):
def test_unary(self):
@nd.functional.elwise
@annotate(ndt.int32)
def f(x):
return 2 * x
@unittest.skip('Test disabled since callables were reworked')
class TestReduction(unittest.TestCase):
def test_unary(self):
@nd.functional.reduction
@annotate(ndt.int32)
def f(x, y):
return max(x, y)
self.assertEqual(3, f([1, 2, 3]))
self.assertEqual(6, f([[1, 2, 3], [4, 5, 6]]))
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f7147bee2514ba0ec8ce68a3df5e77e31205a226 | 214 | py | Python | test.py | BlackPhoenixSlo/vislice | 48fc9160bd857656cd383c7dd0e562bfae5ebf3a | [
"MIT"
] | null | null | null | test.py | BlackPhoenixSlo/vislice | 48fc9160bd857656cd383c7dd0e562bfae5ebf3a | [
"MIT"
] | null | null | null | test.py | BlackPhoenixSlo/vislice | 48fc9160bd857656cd383c7dd0e562bfae5ebf3a | [
"MIT"
] | null | null | null | import math
def pra(n):
for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| 19.454545 | 43 | 0.481308 | import math
def pra(n):
for i in range (2,(int)(math.sqrt(n))):
if (n%i==0):
return False
return True
def izpisi():
for i in range (2,200):
if (pra(i)):
print(i)
| true | true |
f7147c16c55d6759428fde88ad86145632dfa7ae | 5,378 | py | Python | backend/app/literature/crud/cross_reference_crud.py | alliance-genome/agr_literature_service | 2278316422d5c3ab65e21bb97d91e861e48853c5 | [
"MIT"
] | null | null | null | backend/app/literature/crud/cross_reference_crud.py | alliance-genome/agr_literature_service | 2278316422d5c3ab65e21bb97d91e861e48853c5 | [
"MIT"
] | 39 | 2021-10-18T17:02:49.000Z | 2022-03-28T20:56:24.000Z | backend/app/literature/crud/cross_reference_crud.py | alliance-genome/agr_literature_service | 2278316422d5c3ab65e21bb97d91e861e48853c5 | [
"MIT"
] | 1 | 2021-10-21T00:11:18.000Z | 2021-10-21T00:11:18.000Z | from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data['curie']} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
| 42.015625 | 170 | 0.697285 | from sqlalchemy.orm import Session
from datetime import datetime
from fastapi import HTTPException
from fastapi import status
from fastapi.encoders import jsonable_encoder
from literature.schemas import CrossReferenceSchema
from literature.schemas import CrossReferenceSchemaUpdate
from literature.models import CrossReferenceModel
from literature.models import ReferenceModel
from literature.models import ResourceModel
from literature.models import ResourceDescriptorModel
from literature.crud.reference_resource import create_obj, add_reference_resource
def create(db: Session, cross_reference: CrossReferenceSchema) -> str:
cross_reference_data = jsonable_encoder(cross_reference)
if db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == cross_reference_data['curie']).first():
raise HTTPException(status_code=status.HTTP_409_CONFLICT,
detail=f"CrossReference with curie {cross_reference_data['curie']} already exists")
db_obj = create_obj(db, CrossReferenceModel, cross_reference_data)
db.add(db_obj)
db.commit()
return "created"
def destroy(db: Session, curie: str) -> None:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
db.delete(cross_reference)
db.commit()
return None
def patch(db: Session, curie: str, cross_reference_update: CrossReferenceSchemaUpdate) -> dict:
cross_reference_data = jsonable_encoder(cross_reference_update)
cross_reference_db_obj = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference_db_obj:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} not found")
add_reference_resource(db, cross_reference_db_obj, cross_reference_update, non_fatal=True)
for field, value in cross_reference_data.items():
setattr(cross_reference_db_obj, field, value)
cross_reference_db_obj.date_updated = datetime.utcnow()
db.commit()
return {"message": "updated"}
def show(db: Session, curie: str, indirect=True) -> dict:
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"CrossReference with the curie {curie} is not available")
cross_reference_data = jsonable_encoder(cross_reference)
if cross_reference_data['resource_id']:
cross_reference_data['resource_curie'] = db.query(ResourceModel.curie).filter(ResourceModel.resource_id == cross_reference_data['resource_id']).first().curie
del cross_reference_data['resource_id']
if cross_reference_data['reference_id']:
cross_reference_data['reference_curie'] = db.query(ReferenceModel.curie).filter(ReferenceModel.reference_id == cross_reference_data['reference_id']).first().curie
del cross_reference_data['reference_id']
author_ids = []
editor_ids = []
if not indirect:
for author in cross_reference.authors:
author_ids.append(author.author_id)
for editor in cross_reference.editors:
editor_ids.append(editor.editor_id)
cross_reference_data['author_ids'] = author_ids
cross_reference_data['editor_ids'] = editor_ids
[db_prefix, local_id] = curie.split(":", 1)
resource_descriptor = db.query(ResourceDescriptorModel).filter(ResourceDescriptorModel.db_prefix == db_prefix).first()
if resource_descriptor:
default_url = resource_descriptor.default_url.replace("[%s]", local_id)
cross_reference_data['url'] = default_url
if cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
page_url = ""
for rd_page in resource_descriptor.pages:
if rd_page.name == cr_page:
page_url = rd_page.url
break
pages_data.append({"name": cr_page,
"url": page_url.replace("[%s]", local_id)})
cross_reference_data['pages'] = pages_data
elif cross_reference_data['pages']:
pages_data = []
for cr_page in cross_reference_data['pages']:
pages_data.append({"name": cr_page})
cross_reference_data['pages'] = pages_data
return cross_reference_data
def show_changesets(db: Session, curie: str):
cross_reference = db.query(CrossReferenceModel).filter(CrossReferenceModel.curie == curie).first()
if not cross_reference:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,
detail=f"Cross Reference with curie {curie} is not available")
history = []
for version in cross_reference.versions:
tx = version.transaction
history.append({'transaction': {'id': tx.id,
'issued_at': tx.issued_at,
'user_id': tx.user_id},
'changeset': version.changeset})
return history
| true | true |
f7147c364dc2a6ceaf51a2404c99136f6f4bc427 | 3,123 | py | Python | tensorflow_graphics/nn/metric/tests/fscore_test.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/nn/metric/tests/fscore_test.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/nn/metric/tests/fscore_test.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | 1 | 2020-04-11T10:37:36.000Z | 2020-04-11T10:37:36.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the fscore metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
# Precision = 0.5, Recall = 0.25.
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
# Precision = 1, Recall = 1.
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
# Precision = 0, Recall = 0.
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
"""Tests that the shape exception is raised."""
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| 34.7 | 80 | 0.707973 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow_graphics.nn.metric import fscore
from tensorflow_graphics.nn.metric import precision
from tensorflow_graphics.nn.metric import recall
from tensorflow_graphics.util import test_case
def random_tensor(tensor_shape):
return np.random.uniform(low=0.0, high=1.0, size=tensor_shape)
def random_tensor_shape():
tensor_size = np.random.randint(5) + 1
return np.random.randint(1, 10, size=(tensor_size)).tolist()
def binary_precision_function(ground_truth, predictions):
return precision.evaluate(ground_truth, predictions, classes=[1])
def binary_recall_function(ground_truth, predictions):
return recall.evaluate(ground_truth, predictions, classes=[1])
class FscoreTest(test_case.TestCase):
@parameterized.parameters(
((0, 1, 1, 1, 1), (1, 1, 0, 0, 0), 2 * (0.5 * 0.25) / (0.5 + 0.25)),
((0, 0, 0, 1, 1, 1, 0, 1), (0, 0, 0, 1, 1, 1, 0, 1), 1),
((0, 1, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0))
def test_evaluate_preset(self, ground_truth, predictions, expected_fscore):
tensor_shape = random_tensor_shape()
ground_truth_labels = np.tile(ground_truth, tensor_shape + [1])
predicted_labels = np.tile(predictions, tensor_shape + [1])
expected = np.tile(expected_fscore, tensor_shape)
result = fscore.evaluate(
ground_truth_labels,
predicted_labels,
precision_function=binary_precision_function,
recall_function=binary_recall_function)
self.assertAllClose(expected, result)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (1, 5, 3), (4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 4), (2, 4, 5)),
)
def test_evaluate_shape_exception_raised(self, error_msg, *shape):
self.assert_exception_is_raised(fscore.evaluate, error_msg, shape)
@parameterized.parameters(
((1, 5, 3), (2, 5, 1)),
((None, 2, 6), (4, 2, None)),
((3, 1, 1, 2), (3, 5, 8, 2)),
)
def test_evaluate_shape_exception_not_raised(self, *shapes):
self.assert_exception_is_not_raised(fscore.evaluate, shapes)
if __name__ == "__main__":
test_case.main()
| true | true |
f7147c381b59f0fc0e28d456483711cc73e3d0d7 | 10,454 | py | Python | stylegan2/run_training.py | arita37/pic-recon | 703f80eb6d191f68441ce71bc0f388556cb3e1bc | [
"MIT"
] | 8 | 2021-06-16T20:04:27.000Z | 2021-12-17T18:57:37.000Z | stylegan2/run_training.py | comp-imaging-sci/pic-recon | 703f80eb6d191f68441ce71bc0f388556cb3e1bc | [
"MIT"
] | null | null | null | stylegan2/run_training.py | comp-imaging-sci/pic-recon | 703f80eb6d191f68441ce71bc0f388556cb3e1bc | [
"MIT"
] | 2 | 2021-06-19T18:00:58.000Z | 2021-07-14T05:08:16.000Z | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
#
# Modified my Varun A. Kelkar - vak2@illinois.edu
import argparse
import copy
import os
import sys
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
#----------------------------------------------------------------------------
_valid_configs = [
# Table 1
'config-a', # Baseline StyleGAN
'config-b', # + Weight demodulation
'config-c', # + Lazy regularization
'config-d', # + Path length regularization
'config-e', # + No growing, new G & D arch.
'config-f', # + Large networks (default)
'config-g', # + zero sectioning in dlatent
'config-h', # f + selu nonlinearity
'config-i', # + zero sectioning in dlatent in a different way
'config-j', # h + mix all styles
# Table 2
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
'config-frgb', 'config-hrgb', 'config-jrgb',
# No latent noise series
'config-frgb-nonoise',
]
#----------------------------------------------------------------------------
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume, resume_path, stall):
train = EasyDict(run_func_name='training.training_loop.training_loop') # Options for training loop.
G = EasyDict(func_name='training.networks_stylegan2.G_main') # Options for generator network.
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for generator optimizer.
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8) # Options for discriminator optimizer.
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg') # Options for generator loss.
D_loss = EasyDict(func_name='training.loss.D_logistic_r1') # Options for discriminator loss.
sched = EasyDict() # Options for TrainingSchedule.
grid = EasyDict(size='8k', layout='random') # Options for setup_snapshot_image_grid().
sc = dnnlib.SubmitConfig() # Options for dnnlib.submit_run().
tf_config = {'rnd.np_random_seed': 1000} # Options for tflib.init_tf().
if resume:
train.resume_pkl = resume_path
train.resume_kimg = resume
train.stall = stall
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = f'stylegan2-lr-{sched.D_lrate_base}'
desc += '-' + dataset
# nonoise series
if '-nonoise' in config_id:
desc += '-nonoise'
G.if_latent_noise = False
config_id = config_id.strip('-nonoise')
# for rgb images
if 'rgb' in config_id:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='uint8', dynamic_range=[0,255])
config_id = config_id.strip('rgb')
else:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='float32', dynamic_range=[0,1])
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
# Configs A-E: Shrink networks to match original StyleGAN.
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
# Config E: Set gamma to 100 and override G & D architecture.
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip' # (default)
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet' # (default)
# Configs A-D: Enable progressive growing and switch to networks that support it.
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32 # (default)
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4 # (default)
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
# Configs A-C: Disable path length regularization.
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
# Configs A-B: Disable lazy regularization.
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
# Config A: Switch to original StyleGAN networks.
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
# Config G: Zero sectioning in dlatent
if config_id == 'config-g':
G.zero_section = 2
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
# Config H: Use ELU nonlinearity, and no zero sectioning
if config_id == 'config-h':
G.nonlinearity = 'lsoftplus'
# Config I: Zero sectioning in dlatent in a different way
if config_id == 'config-i':
G.zero_section = [0.1, 0.2, 0.3, 0.45, 0.6, 0.8, 0.9, 1.]
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-j':
G.mix_all = 1
# G.nonlinearity = 'lsoftplus'
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
_examples = '''examples:
# Train StyleGAN2 using the FFHQ dataset
python %(prog)s --num-gpus=8 --data-dir=~/datasets --config=config-f --dataset=ffhq --mirror-augment=true
valid configs:
''' + ', '.join(_valid_configs) + '''
valid metrics:
''' + ', '.join(sorted([x for x in metric_defaults.keys()])) + '''
'''
def main():
parser = argparse.ArgumentParser(
description='Train StyleGAN2.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser.add_argument('--data-dir', help='Dataset root directory', required=True)
parser.add_argument('--dataset', help='Training dataset', required=True)
parser.add_argument('--config', help='Training config (default: %(default)s)', default='config-f', required=True, dest='config_id', metavar='CONFIG')
parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)', default=1, type=int, metavar='N')
parser.add_argument('--total-kimg', help='Training length in thousands of images (default: %(default)s)', metavar='KIMG', default=25000, type=int)
parser.add_argument('--gamma', help='R1 regularization weight (default is config dependent)', default=None, type=float)
parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default=None, type=_parse_comma_sep)
parser.add_argument('--resume', help='Resume training from. (default: %(default)s)', default=0, type=float, metavar='N')
parser.add_argument('--resume_path', help='Resume training from pkl. (default: %(default)s)', default='', type=str, metavar='N')
parser.add_argument('--stall', help='Pause training (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
args = parser.parse_args()
if not os.path.exists(args.data_dir):
print ('Error: dataset root directory does not exist.')
sys.exit(1)
if args.config_id not in _valid_configs:
print ('Error: --config value must be one of: ', ', '.join(_valid_configs))
sys.exit(1)
for metric in args.metrics:
if metric not in metric_defaults:
print ('Error: unknown metric \'%s\'' % metric)
sys.exit(1)
run(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 42.323887 | 153 | 0.621772 |
import argparse
import copy
import os
import sys
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
_valid_configs = [
'config-a',
'config-b',
'config-c',
'config-d',
'config-e',
'config-f',
'config-g',
'config-h',
'config-i',
'config-j',
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
'config-frgb', 'config-hrgb', 'config-jrgb',
'config-frgb-nonoise',
]
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics, resume, resume_path, stall):
train = EasyDict(run_func_name='training.training_loop.training_loop')
G = EasyDict(func_name='training.networks_stylegan2.G_main')
D = EasyDict(func_name='training.networks_stylegan2.D_stylegan2')
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
G_loss = EasyDict(func_name='training.loss.G_logistic_ns_pathreg')
D_loss = EasyDict(func_name='training.loss.D_logistic_r1')
sched = EasyDict()
grid = EasyDict(size='8k', layout='random')
sc = dnnlib.SubmitConfig()
tf_config = {'rnd.np_random_seed': 1000}
if resume:
train.resume_pkl = resume_path
train.resume_kimg = resume
train.stall = stall
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = f'stylegan2-lr-{sched.D_lrate_base}'
desc += '-' + dataset
if '-nonoise' in config_id:
desc += '-nonoise'
G.if_latent_noise = False
config_id = config_id.strip('-nonoise')
if 'rgb' in config_id:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='uint8', dynamic_range=[0,255])
config_id = config_id.strip('rgb')
else:
dataset_args = EasyDict(tfrecord_dir=dataset, dtype='float32', dynamic_range=[0,1])
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
desc += '-' + config_id
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip'
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet'
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
if config_id == 'config-g':
G.zero_section = 2
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-h':
G.nonlinearity = 'lsoftplus'
if config_id == 'config-i':
G.zero_section = [0.1, 0.2, 0.3, 0.45, 0.6, 0.8, 0.9, 1.]
G.nonlinearity = 'lsoftplus'
G.latent_size = 2048
G.dlatent_size = 2048
if config_id == 'config-j':
G.mix_all = 1
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
_examples = '''examples:
# Train StyleGAN2 using the FFHQ dataset
python %(prog)s --num-gpus=8 --data-dir=~/datasets --config=config-f --dataset=ffhq --mirror-augment=true
valid configs:
''' + ', '.join(_valid_configs) + '''
valid metrics:
''' + ', '.join(sorted([x for x in metric_defaults.keys()])) + '''
'''
def main():
parser = argparse.ArgumentParser(
description='Train StyleGAN2.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser.add_argument('--data-dir', help='Dataset root directory', required=True)
parser.add_argument('--dataset', help='Training dataset', required=True)
parser.add_argument('--config', help='Training config (default: %(default)s)', default='config-f', required=True, dest='config_id', metavar='CONFIG')
parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)', default=1, type=int, metavar='N')
parser.add_argument('--total-kimg', help='Training length in thousands of images (default: %(default)s)', metavar='KIMG', default=25000, type=int)
parser.add_argument('--gamma', help='R1 regularization weight (default is config dependent)', default=None, type=float)
parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default=None, type=_parse_comma_sep)
parser.add_argument('--resume', help='Resume training from. (default: %(default)s)', default=0, type=float, metavar='N')
parser.add_argument('--resume_path', help='Resume training from pkl. (default: %(default)s)', default='', type=str, metavar='N')
parser.add_argument('--stall', help='Pause training (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
args = parser.parse_args()
if not os.path.exists(args.data_dir):
print ('Error: dataset root directory does not exist.')
sys.exit(1)
if args.config_id not in _valid_configs:
print ('Error: --config value must be one of: ', ', '.join(_valid_configs))
sys.exit(1)
for metric in args.metrics:
if metric not in metric_defaults:
print ('Error: unknown metric \'%s\'' % metric)
sys.exit(1)
run(**vars(args))
if __name__ == "__main__":
main()
| true | true |
f7147c4f2008f9ebc6684280d9bd9c43be116fb6 | 1,759 | py | Python | application/commonApp/markdown_it_extensions.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | application/commonApp/markdown_it_extensions.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | application/commonApp/markdown_it_extensions.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | #
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <marcelotsvaz@gmail.com>
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
'''
Add target and rel attributes to links.
'''
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
'''
Plugin for rendering image galleries using Django UserImage.
Syntax: #[cssClass1 cssClass2](identifier1, identifier2, identifier3)
'''
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
'''
Rule for image gallery.
'''
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
# Only run the regex if the first two characters match.
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
return False
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True | 22.551282 | 84 | 0.683911 |
from functools import partial
import re
from django.template import loader
def linkAttributes( self, tokens, index, options, env ):
tokens[index].attrSet( 'rel', 'noopener' )
return self.renderToken( tokens, index, options, env )
def imageGalleryPlugin( md, markdownImages ):
md.block.ruler.before(
'paragraph',
'imageGallery',
partial( imageGallery, markdownImages = markdownImages ),
{ 'alt': [ 'paragraph', 'reference', 'blockquote', 'list' ] }
)
def imageGallery( state, startLine, endLine, silent, markdownImages ):
lineContent = state.getLines( startLine, startLine + 1, 0, False ).strip()
if not lineContent.startswith( '#[' ):
return False
match = re.match( r'^#\[(.*)\]\((.*)\)$', lineContent )
if not match:
return False
cssClasses = match[1]
identifiers = match[2]
if not silent:
state.line = startLine + 1
if identifiers.strip() == '*':
images = markdownImages
else:
identifiers = { identifier.strip() for identifier in identifiers.split( ',' ) }
images = [ image for image in markdownImages if image.identifier in identifiers ]
renderedTemplate = loader.render_to_string(
'commonApp/image_gallery.html',
{ 'images': images, 'cssClasses': cssClasses },
)
token = state.push( 'html_block', '', 0 )
token.content = renderedTemplate
token.map = [startLine, state.line]
return True | true | true |
f7147f6d980b3546daeecc96d4d8fba4b04a0b48 | 14,109 | py | Python | src/src/create_tf_record.py | zhaodi-Wen/Child_skin_disease_detect | e95045341e8c27161eebb2c9c3b68026a4ea247b | [
"Apache-2.0"
] | null | null | null | src/src/create_tf_record.py | zhaodi-Wen/Child_skin_disease_detect | e95045341e8c27161eebb2c9c3b68026a4ea247b | [
"Apache-2.0"
] | null | null | null | src/src/create_tf_record.py | zhaodi-Wen/Child_skin_disease_detect | e95045341e8c27161eebb2c9c3b68026a4ea247b | [
"Apache-2.0"
] | null | null | null | # -*-coding: utf-8 -*-
"""
@Project: create_tfrecord
@File : create_tfrecord.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2018-07-27 17:19:54
@desc : 将图片数据保存为单个tfrecord文件
"""
##########################################################################
import tensorflow as tf
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import random
from PIL import Image
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
train_path = './train_new/img'
test_path = './test_new/img'
list = set(os.listdir(test_path))
classes=sorted(list,key=str.lower)
print(classes)
##########################################################################
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# 生成字符串型的属性
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# 生成实数型的属性
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def get_example_nums(tf_records_filenames):
'''
统计tf_records图像的个数(example)个数
:param tf_records_filenames: tf_records文件路径
:return:
'''
nums= 0
for record in tf.python_io.tf_record_iterator(tf_records_filenames):
nums += 1
return nums
def show_image(title,image):
'''
显示图片
:param title: 图像标题
:param image: 图像的数据
:return:
'''
# plt.figure("show_image")
# print(image.dtype)
plt.imshow(image)
plt.axis('on') # 关掉坐标轴为 off
plt.title(title) # 图像题目
plt.show()
# def load_labels_file(filename,labels_num=1,shuffle=False):
# '''
# 载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
# :param filename:
# :param labels_num :labels个数
# :param shuffle :是否打乱顺序
# :return:images type->list
# :return:labels type->list
# '''
# images=[]
# labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
def load_labels_file(filename,num=1,shuffle=False):
'''
载图txt文件,文件中每行为一个图片信息,且以空格隔开:图像路径 标签1 标签2,如:test_image/1.jpg 0 2
:param filename:
:param labels_num :labels个数
:param shuffle :是否打乱顺序
:return:images type->list
:return:labels type->list
'''
images=[]
labels=[]
# with open(filename) as f:
# lines_list=f.readlines()
# if shuffle:
# random.shuffle(lines_list)
#
# for lines in lines_list:
# line=lines.rstrip().split(' ')
# label=[]
# for i in range(labels_num):
# label.append(int(line[i+1]))
# images.append(line[0])
# labels.append(label)
# return images,labels
for index,name in enumerate(classes):
# print(index,name)
class_path = filename+'/'+name+'/'
# print(class_path)
for img_name in os.listdir(class_path):
img_path = class_path+img_name
# print(img_path)
images.append(img_path)
labels.append(index)
# img = Image.open(img_path)
# img = img.resize((224,224))
# img_raw = img.tobytes()
# with open(train_label,'a') as f:
# f.write(str(index)+'\n')
randnum = random.randint(0, 100)
random.seed(randnum)
random.shuffle(images)
random.seed(randnum)
random.shuffle(labels)
return images,labels
def read_image(filename, resize_height, resize_width,normalization=False):
'''
读取图片数据,默认返回的是uint8,[0,255]
:param filename:
:param resize_height:
:param resize_width:
:param normalization:是否归一化到[0.,1.0]
:return: 返回的图片数据
'''
bgr_image = cv2.imread(filename)
if len(bgr_image.shape)==2:#若是灰度图则转为三通道
print("Warning:gray image",filename)
bgr_image = cv2.cvtColor(bgr_image, cv2.COLOR_GRAY2BGR)
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)#将BGR转为RGB
# show_image(filename,rgb_image)
# rgb_image=Image.open(filename)
if resize_height>0 and resize_width>0:
rgb_image=cv2.resize(rgb_image,(resize_width,resize_height))
rgb_image=np.asanyarray(rgb_image)
if normalization:
# 不能写成:rgb_image=rgb_image/255
rgb_image=rgb_image/255.0
# show_image("src resize image",image)
return rgb_image
def get_batch_images(images,labels,batch_size,labels_nums,one_hot=False,shuffle=False,num_threads=64):
'''
:param images:图像
:param labels:标签
:param batch_size:
:param labels_nums:标签个数
:param one_hot:是否将labels转为one_hot的形式
:param shuffle:是否打乱顺序,一般train时shuffle=True,验证时shuffle=False
:return:返回batch的images和labels
'''
min_after_dequeue = 200
capacity = min_after_dequeue + 3 * batch_size # 保证capacity必须大于min_after_dequeue参数值
if shuffle:
images_batch, labels_batch = tf.train.shuffle_batch([images,labels],
batch_size=batch_size,
capacity=capacity,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads)
else:
images_batch, labels_batch = tf.train.batch([images,labels],
batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
if one_hot:
labels_batch = tf.one_hot(labels_batch, labels_nums, 1, 0)
return images_batch,labels_batch
def read_records(filename,resize_height, resize_width,type=None):
'''
解析record文件:源文件的图像数据是RGB,uint8,[0,255],一般作为训练数据时,需要归一化到[0,1]
:param filename:
:param resize_height:
:param resize_width:
:param type:选择图像数据的返回类型
None:默认将uint8-[0,255]转为float32-[0,255]
normalization:归一化float32-[0,1]
centralization:归一化float32-[0,1],再减均值中心化
:return:
'''
# 创建文件队列,不限读取的数量
filename_queue = tf.train.string_input_producer([filename])
# create a reader from file queue
reader = tf.TFRecordReader()
# reader从文件队列中读入一个序列化的样本
_, serialized_example = reader.read(filename_queue)
# get feature from serialized example
# 解析符号化的样本
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'depth': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64)
}
)
tf_image = tf.decode_raw(features['image_raw'], tf.uint8)#获得图像原始的数据
tf_height = features['height']
tf_width = features['width']
tf_depth = features['depth']
tf_label = tf.cast(features['label'], tf.int32)
# PS:恢复原始图像数据,reshape的大小必须与保存之前的图像shape一致,否则出错
# tf_image=tf.reshape(tf_image, [-1]) # 转换为行向量
tf_image=tf.reshape(tf_image, [resize_height, resize_width, 3]) # 设置图像的维度
# 恢复数据后,才可以对图像进行resize_images:输入uint->输出float32
# tf_image=tf.image.resize_images(tf_image,[224, 224])
# 存储的图像类型为uint8,tensorflow训练时数据必须是tf.float32
if type is None:
tf_image = tf.cast(tf_image, tf.float32)
elif type=='normalization':# [1]若需要归一化请使用:
# 仅当输入数据是uint8,才会归一化[0,255]
# tf_image = tf.image.convert_image_dtype(tf_image, tf.float32)
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255.0) # 归一化
elif type=='centralization':
# 若需要归一化,且中心化,假设均值为0.5,请使用:
tf_image = tf.cast(tf_image, tf.float32) * (1. / 255) - 0.5 #中心化
# 这里仅仅返回图像和标签
# return tf_image, tf_height,tf_width,tf_depth,tf_label
return tf_image,tf_label
def create_records(image_dir, output_record_dir, resize_height, resize_width,shuffle,log=5):
'''
实现将图像原始数据,label,长,宽等信息保存为record文件
注意:读取的图像数据默认是uint8,再转为tf的字符串型BytesList保存,解析请需要根据需要转换类型
:param image_dir:原始图像的目录
:param file:输入保存图片信息的txt文件(image_dir+file构成图片的路径)
:param output_record_dir:保存record文件的路径
:param resize_height:
:param resize_width:
PS:当resize_height或者resize_width=0是,不执行resize
:param shuffle:是否打乱顺序
:param log:log信息打印间隔
'''
# 加载文件,仅获取一个label
images_list, labels_list=load_labels_file(image_dir,1,shuffle)
writer = tf.python_io.TFRecordWriter(output_record_dir)
for i, [image_name, labels] in enumerate(zip(images_list, labels_list)):
image_path=image_name
# print(image_path)
# print(labels)
if not os.path.exists(image_path):
print('Err:no image',image_path)
continue
image = read_image(image_path, resize_height, resize_width)
image_raw = image.tostring()
if i%log==0 or i==len(images_list)-1:
print('------------processing:%d-th------------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
# 这里仅保存一个label,多label适当增加"'label': _int64_feature(label)"项
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
'''
解析record文件,并显示show_nums张图片,主要用于验证生成record文件是否成功
:param tfrecord_file: record文件路径
:return:
'''
# 读取record函数
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
# 显示前4个图片
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label]) # 在会话中取出image和label
# image = tf_image.eval()
# 直接从record解析的image是一个向量,需要reshape显示
# image = image.reshape([height,width,depth])
#print('shape:{},tpye:{},labels:{}'.format(image.shape,image.dtype,label))
# pilimg = Image.fromarray(np.asarray(image_eval_reshape))
# pilimg.show()
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
'''
:param record_file: record文件路径
:param resize_height:
:param resize_width:
:return:
:PS:image_batch, label_batch一般作为网络的输入
'''
# 读取record函数
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess: # 开始一个会话
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
# 在会话中取出images和labels
images, labels = sess.run([image_batch, label_batch])
# 这里仅显示每个batch里第一张图片
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
# 停止所有线程
coord.request_stop()
coord.join(threads)
# if __name__ == '__main__':
# # 参数设置
#
# resize_height = 224 # 指定存储图片高度
# resize_width = 224 # 指定存储图片宽度
# shuffle=True
# log=5
# # 产生train.record文件
# image_dir='dataset/train'
# train_labels = 'dataset/train.txt' # 图片路径
# train_record_output = 'dataset/record/train.tfrecords'
# create_records(image_dir,train_labels, train_record_output, resize_height, resize_width,shuffle,log)
# train_nums=get_example_nums(train_record_output)
# print("save train example nums={}".format(train_nums))
#
# # 产生val.record文件
# image_dir='dataset/val'
# val_labels = 'dataset/val.txt' # 图片路径
# val_record_output = 'dataset/record/val.tfrecords'
# create_records(image_dir,val_labels, val_record_output, resize_height, resize_width,shuffle,log)
# val_nums=get_example_nums(val_record_output)
# print("save val example nums={}".format(val_nums))
#
# # 测试显示函数
# # disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width)
if __name__ == '__main__':
# 参数设置
resize_height = 224 # 指定存储图片高度
resize_width = 224 # 指定存储图片宽度
shuffle=True
log=5
# 产生train.record文件
image_dir='./train_new/img'
# train_labels = './onsets/train.txt' # 图片路径
train_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
# 产生val.record文件
image_dir='./test_new/img'
# val_labels = './onsets/val.txt' # 图片路径
val_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
# 测试显示函数
# disp_records(train_record_output,resize_height, resize_width)
# batch_test(train_record_output,resize_height, resize_width) | 35.628788 | 120 | 0.633851 |
---------' % (i))
print('current image_path=%s' % (image_path),'shape:{}'.format(image.shape),'labels:{}'.format(labels))
label=labels
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(image_raw),
'height': _int64_feature(image.shape[0]),
'width': _int64_feature(image.shape[1]),
'depth': _int64_feature(image.shape[2]),
'labels': _int64_feature(label)
}))
writer.write(example.SerializeToString())
writer.close()
def disp_records(record_file,resize_height, resize_width,show_nums=4):
tf_image, tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(show_nums):
image,label = sess.run([tf_image,tf_label])
show_image("image:%d"%(label),image)
coord.request_stop()
coord.join(threads)
def batch_test(record_file,resize_height, resize_width):
tf_image,tf_label = read_records(record_file,resize_height,resize_width,type='normalization')
image_batch, label_batch= get_batch_images(tf_image,tf_label,batch_size=4,labels_nums=5,one_hot=False,shuffle=False)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(4):
images, labels = sess.run([image_batch, label_batch])
show_image("image", images[0, :, :, :])
print('shape:{},tpye:{},labels:{}'.format(images.shape,images.dtype,labels))
coord.request_stop()
coord.join(threads)
image_dir='./train_new/img'
rain_record_output = 'train.tfrecord'
create_records(image_dir, train_record_output, resize_height, resize_width,shuffle,log)
train_nums=get_example_nums(train_record_output)
print("save train example nums={}".format(train_nums))
image_dir='./test_new/img'
al_record_output = 'val.tfrecord'
create_records(image_dir, val_record_output, resize_height, resize_width,shuffle,log)
val_nums=get_example_nums(val_record_output)
print("save val example nums={}".format(val_nums))
| true | true |
f714815478b554b66febb336cf04f3a3d3a923e6 | 10,162 | py | Python | melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | 1 | 2021-07-08T10:26:06.000Z | 2021-07-08T10:31:11.000Z | melodic/lib/python2.7/dist-packages/geographic_msgs/msg/_RouteSegment.py | Dieptranivsr/Ros_Diep | d790e75e6f5da916701b11a2fdf3e03b6a47086b | [
"MIT"
] | null | null | null | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from geographic_msgs/RouteSegment.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import uuid_msgs.msg
class RouteSegment(genpy.Message):
_md5sum = "8583d1e2ddf1891c3934a5d2ed9a799c"
_type = "geographic_msgs/RouteSegment"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# Route network segment.
#
# This is one directed edge of a RouteNetwork graph. It represents a
# known path from one way point to another. If the path is two-way,
# there will be another RouteSegment with "start" and "end" reversed.
uuid_msgs/UniqueID id # Unique identifier for this segment
uuid_msgs/UniqueID start # beginning way point of segment
uuid_msgs/UniqueID end # ending way point of segment
KeyValue[] props # segment properties
================================================================================
MSG: uuid_msgs/UniqueID
# A universally unique identifier (UUID).
#
# http://en.wikipedia.org/wiki/Universally_unique_identifier
# http://tools.ietf.org/html/rfc4122.html
uint8[16] uuid
================================================================================
MSG: geographic_msgs/KeyValue
# Geographic map tag (key, value) pair
#
# This is equivalent to diagnostic_msgs/KeyValue, repeated here to
# avoid introducing a trivial stack dependency.
string key # tag label
string value # corresponding value
"""
__slots__ = ['id','start','end','props']
_slot_types = ['uuid_msgs/UniqueID','uuid_msgs/UniqueID','uuid_msgs/UniqueID','geographic_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
id,start,end,props
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RouteSegment, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = []
else:
self.id = uuid_msgs.msg.UniqueID()
self.start = uuid_msgs.msg.UniqueID()
self.end = uuid_msgs.msg.UniqueID()
self.props = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.id.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.id.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
# - if encoded as a list instead, serialize as bytes instead of string
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16B = None
def _get_struct_16B():
global _struct_16B
if _struct_16B is None:
_struct_16B = struct.Struct("<16B")
return _struct_16B
_struct_16s = None
def _get_struct_16s():
global _struct_16s
if _struct_16s is None:
_struct_16s = struct.Struct("<16s")
return _struct_16s
| 33.986622 | 145 | 0.605688 |
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import uuid_msgs.msg
class RouteSegment(genpy.Message):
_md5sum = "8583d1e2ddf1891c3934a5d2ed9a799c"
_type = "geographic_msgs/RouteSegment"
_has_header = False
_full_text = """# Route network segment.
#
# This is one directed edge of a RouteNetwork graph. It represents a
# known path from one way point to another. If the path is two-way,
# there will be another RouteSegment with "start" and "end" reversed.
uuid_msgs/UniqueID id # Unique identifier for this segment
uuid_msgs/UniqueID start # beginning way point of segment
uuid_msgs/UniqueID end # ending way point of segment
KeyValue[] props # segment properties
================================================================================
MSG: uuid_msgs/UniqueID
# A universally unique identifier (UUID).
#
# http://en.wikipedia.org/wiki/Universally_unique_identifier
# http://tools.ietf.org/html/rfc4122.html
uint8[16] uuid
================================================================================
MSG: geographic_msgs/KeyValue
# Geographic map tag (key, value) pair
#
# This is equivalent to diagnostic_msgs/KeyValue, repeated here to
# avoid introducing a trivial stack dependency.
string key # tag label
string value # corresponding value
"""
__slots__ = ['id','start','end','props']
_slot_types = ['uuid_msgs/UniqueID','uuid_msgs/UniqueID','uuid_msgs/UniqueID','geographic_msgs/KeyValue[]']
def __init__(self, *args, **kwds):
if args or kwds:
super(RouteSegment, self).__init__(*args, **kwds)
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = []
else:
self.id = uuid_msgs.msg.UniqueID()
self.start = uuid_msgs.msg.UniqueID()
self.end = uuid_msgs.msg.UniqueID()
self.props = []
def _get_types(self):
return self._slot_types
def serialize(self, buff):
try:
_x = self.id.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
def serialize_numpy(self, buff, numpy):
try:
_x = self.id.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.start.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
_x = self.end.uuid
if type(_x) in [list, tuple]:
buff.write(_get_struct_16B().pack(*_x))
else:
buff.write(_get_struct_16s().pack(_x))
length = len(self.props)
buff.write(_struct_I.pack(length))
for val1 in self.props:
_x = val1.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = val1.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.id is None:
self.id = uuid_msgs.msg.UniqueID()
if self.start is None:
self.start = uuid_msgs.msg.UniqueID()
if self.end is None:
self.end = uuid_msgs.msg.UniqueID()
if self.props is None:
self.props = None
end = 0
start = end
end += 16
self.id.uuid = str[start:end]
start = end
end += 16
self.start.uuid = str[start:end]
start = end
end += 16
self.end.uuid = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.props = []
for i in range(0, length):
val1 = geographic_msgs.msg.KeyValue()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.key = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.value = str[start:end].decode('utf-8', 'rosmsg')
else:
val1.value = str[start:end]
self.props.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e)
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_16B = None
def _get_struct_16B():
global _struct_16B
if _struct_16B is None:
_struct_16B = struct.Struct("<16B")
return _struct_16B
_struct_16s = None
def _get_struct_16s():
global _struct_16s
if _struct_16s is None:
_struct_16s = struct.Struct("<16s")
return _struct_16s
| true | true |
f7148181728209c0820ba3d6bb65094941aa250a | 4,393 | py | Python | fairseq/tasks/__init__.py | HYUNMIN-HWANG/fairseq | 8094376456f586f119ffe5b83d7af5979066197d | [
"MIT"
] | null | null | null | fairseq/tasks/__init__.py | HYUNMIN-HWANG/fairseq | 8094376456f586f119ffe5b83d7af5979066197d | [
"MIT"
] | null | null | null | fairseq/tasks/__init__.py | HYUNMIN-HWANG/fairseq | 8094376456f586f119ffe5b83d7af5979066197d | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask # noqa
# register dataclass
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
TASK_DATACLASS_REGISTRY
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = dc.from_namespace(cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| 31.833333 | 153 | 0.619622 |
import argparse
import importlib
import os
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import merge_with_parent
from hydra.core.config_store import ConfigStore
from .fairseq_task import FairseqTask, LegacyFairseqTask
TASK_DATACLASS_REGISTRY = {}
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(cfg: FairseqDataclass, **kwargs):
TASK_DATACLASS_REGISTRY
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
task = TASK_REGISTRY[task_name]
if task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = dc.from_namespace(cfg)
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert (
task is not None
), f"Could not infer task type from {cfg}. Available argparse tasks: {TASK_REGISTRY.keys()}. Available hydra tasks: {TASK_DATACLASS_REGISTRY.keys()}"
return task.setup_task(cfg, **kwargs)
def register_task(name, dataclass=None):
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError("Cannot register duplicate task ({})".format(name))
if not issubclass(cls, FairseqTask):
raise ValueError(
"Task ({}: {}) must extend FairseqTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError(
"Cannot register task with duplicate class name ({})".format(
cls.__name__
)
)
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
if dataclass is not None:
TASK_DATACLASS_REGISTRY[name] = dataclass
cs = ConfigStore.instance()
node = dataclass()
node._name = name
cs.store(name=name, group="task", node=node, provider="fairseq")
return cls
return register_task_cls
def get_task(name):
return TASK_REGISTRY[name]
def import_tasks(tasks_dir, namespace):
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
task_name = file[: file.find(".py")] if file.endswith(".py") else file
importlib.import_module(namespace + "." + task_name)
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group("Task name")
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
group_args = parser.add_argument_group(
"Additional command-line arguments"
)
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + "_parser"] = parser
tasks_dir = os.path.dirname(__file__)
import_tasks(tasks_dir, "fairseq.tasks")
| true | true |
f71482183f909628cf759af5837725bdda0f8c43 | 2,686 | py | Python | py/phl/phlsys_workingdircommand__t.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 150 | 2015-01-21T15:52:22.000Z | 2021-11-09T05:53:36.000Z | py/phl/phlsys_workingdircommand__t.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 72 | 2015-05-08T04:33:08.000Z | 2017-01-27T09:37:36.000Z | py/phl/phlsys_workingdircommand__t.py | aevri/phabricator-tools | ef7501bcaee83e98d168d16f64b3f73e744d3336 | [
"Apache-2.0"
] | 38 | 2015-01-30T10:33:47.000Z | 2021-11-09T05:53:30.000Z | """Test suite for phlsys_workingdircommand."""
# =============================================================================
# TEST PLAN
# -----------------------------------------------------------------------------
# Here we detail the things we are concerned to test and specify which tests
# cover those concerns.
#
# Concerns:
# [ A] command is executed correctly
# [ A] working directory is restored after command execution
# -----------------------------------------------------------------------------
# Tests:
# [ A] test_A_command_with_working_directory
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import unittest
import phlsys_fs
import phlsys_workingdircommand
_PYCAT_COMMAND = """
#! /bin/sh
echo "Hello $1!"
""" .lstrip()
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_command_with_working_directory(self):
working_dir = tempfile.mkdtemp()
with phlsys_fs.chtmpdir_context():
tmp_dir = os.getcwd()
pycat_script_path = os.path.join(tmp_dir, 'pycat.sh')
phlsys_fs.write_text_file(pycat_script_path, _PYCAT_COMMAND)
mode = os.stat(pycat_script_path).st_mode
os.chmod(pycat_script_path, mode | stat.S_IEXEC)
self.assertEqual(os.getcwd(), tmp_dir)
command = phlsys_workingdircommand.CommandWithWorkingDirectory(
pycat_script_path, working_dir)
result = command('Alice')
# [ A] command is executed correctly
self.assertEqual('Hello Alice!\n', result)
# [ A] working directory is restored after command execution
self.assertEqual(os.getcwd(), tmp_dir)
# -----------------------------------------------------------------------------
# Copyright (C) 2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| 34.883117 | 79 | 0.57446 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import stat
import tempfile
import unittest
import phlsys_fs
import phlsys_workingdircommand
_PYCAT_COMMAND = """
#! /bin/sh
echo "Hello $1!"
""" .lstrip()
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_A_command_with_working_directory(self):
working_dir = tempfile.mkdtemp()
with phlsys_fs.chtmpdir_context():
tmp_dir = os.getcwd()
pycat_script_path = os.path.join(tmp_dir, 'pycat.sh')
phlsys_fs.write_text_file(pycat_script_path, _PYCAT_COMMAND)
mode = os.stat(pycat_script_path).st_mode
os.chmod(pycat_script_path, mode | stat.S_IEXEC)
self.assertEqual(os.getcwd(), tmp_dir)
command = phlsys_workingdircommand.CommandWithWorkingDirectory(
pycat_script_path, working_dir)
result = command('Alice')
self.assertEqual('Hello Alice!\n', result)
self.assertEqual(os.getcwd(), tmp_dir)
| true | true |
f7148301c9b51fcf16d83d7b786a76b3a79b2e1f | 31,201 | py | Python | wifiphisher/tests/test_deauth.py | burakbozdag/wifiphisher-docker | e5f373f63786c1a6cfeed4e9f5b00d0f986ade58 | [
"Apache-2.0"
] | 3 | 2020-09-04T14:54:30.000Z | 2022-03-24T19:09:48.000Z | wifiphisher/tests/test_deauth.py | burakbozdag/wifiphisher-docker | e5f373f63786c1a6cfeed4e9f5b00d0f986ade58 | [
"Apache-2.0"
] | null | null | null | wifiphisher/tests/test_deauth.py | burakbozdag/wifiphisher-docker | e5f373f63786c1a6cfeed4e9f5b00d0f986ade58 | [
"Apache-2.0"
] | 1 | 2020-12-26T20:11:22.000Z | 2020-12-26T20:11:22.000Z | # pylint: skip-file
""" This module tests the deauth module in extensions """
import collections
import unittest
from collections import defaultdict
import mock
import scapy.layers.dot11 as dot11
import wifiphisher.common.constants as constants
import wifiphisher.extensions.deauth as deauth
class TestDeauth(unittest.TestCase):
""" Tests Deauth class """
def setUp(self):
""" Set up the tests """
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
self.packet = dot11.RadioTap() / dot11.Dot11() / essid / rates / dsset
custom_tuple = collections.namedtuple("test",
("target_ap_bssid target_ap_channel rogue_ap_mac args "
"target_ap_essid is_freq_hop_allowed"))
self.target_channel = "6"
self.target_bssid = "BB:BB:BB:BB:BB:BB"
self.rogue_mac = "CC:CC:CC:CC:CC:CC"
self.target_essid = "Evil"
self.args = mock.Mock()
self.args.deauth_essid = False
self.args.channel_monitor = False
self.args.deauth_channels = []
data0 = custom_tuple(self.target_bssid, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
data1 = custom_tuple(None, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
self.deauth_obj0 = deauth.Deauth(data0)
self.deauth_obj1 = deauth.Deauth(data1)
# test for --deauth-essid
self.deauth_obj0._deauth_bssids = dict()
self.deauth_obj1._deauth_bssids = dict()
def test_craft_packet_normal_expected(self):
"""
Test _craft_packet method when given all the normal arguments and
expecting normal results
"""
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "00:00:00:00:00:00"
result = self.deauth_obj0._craft_packet(sender, receiver, bssid)
message0 = "Failed to craft a packet for disassociation"
message1 = "Failed to craft a packet for deauthentication"
# check the disassociation packet
self.assertEqual(result[0].addr1, receiver, message0)
self.assertEqual(result[0].addr2, sender, message0)
self.assertEqual(result[0].addr3, bssid, message0)
# check the deauthentication packet
self.assertEqual(result[1].addr1, receiver, message1)
self.assertEqual(result[1].addr2, sender, message1)
self.assertEqual(result[1].addr3, bssid, message1)
def test_get_packet_broadcast(self):
"""
Test get_packet method for crafting the broadcast frame
"""
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid / rates / dsset
packet.addr1 = receiver
packet.addr2 = sender
packet.addr3 = self.target_bssid
packet.FCfield = 0x0
# run the method
pkts_to_send = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel: target channel should be one key of
# the result
self.assertEqual(self.target_channel in pkts_to_send, True,
message0)
# check the packets
# check the disassoction packet
result = pkts_to_send[self.target_channel]
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[0].addr2, self.target_bssid, message1)
self.assertEqual(result[0].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[1].addr2, self.target_bssid, message1)
self.assertEqual(result[1].addr3, self.target_bssid, message1)
def test_get_packet_second_run_non_releavent_client_empty(self):
"""
Test get_packet method for the second time when given a packet which
is not related to the target access point and --essid is not used.
The expected result are an channel list containing target channel and
an empty packet list
"""
# setup the packets
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = "55:55:55:55:55:55"
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method twice
self.deauth_obj0.get_packet(self.packet)
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
# if the bssid is not in self._deauth_bssids, return empty channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_second_run_our_ap_empty(self):
"""
Test get_packet method for the second time when given a packet which
is from our own rouge ap to the target access point and --essid is
not used. The expected result are an channel list containing target
channel and an empty packet list
"""
# setup the packets
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = self.rogue_mac
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method twice
self.deauth_obj0.get_packet(self.packet)
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
# return empty channel if the frame is invalid
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_multiple_clients_multiple_packets(self):
"""
Test get_packet method when run multiple times with valid cleints.
--essid is not used. The expected result are the channel of the
target AP followed by the broadcast packet for the target AP and
all the client packets
"""
# setup the packet
sender0 = self.target_bssid
receiver0 = "11:11:11:11:11:11"
bssid0 = self.target_bssid
sender1 = "33:33:33:33:33:33"
receiver1 = self.target_bssid
bssid1 = self.target_bssid
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# add target_bssid in the self._deauth_bssids
self.deauth_obj0._deauth_bssids[self.target_bssid] = self.target_channel
# run the method
pkts_to_send0 = self.deauth_obj0.get_packet(self.packet)
result0 = pkts_to_send0[self.target_channel]
# change the values for the next run
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
# result1 will accumulate the result from result 0
pkts_to_send1 = self.deauth_obj0.get_packet(self.packet)
result1 = pkts_to_send1[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(self.target_channel in pkts_to_send0, True,
message0)
# check the packets for the first client
# check the disassociation packet
self.assertEqual(result0[0].subtype, 10, message1)
self.assertEqual(result0[0].addr1, self.target_bssid, message1)
self.assertEqual(result0[0].addr2, receiver0, message1)
self.assertEqual(result0[0].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result0[1].subtype, 12, message1)
self.assertEqual(result0[1].addr1, self.target_bssid, message1)
self.assertEqual(result0[1].addr2, receiver0, message1)
self.assertEqual(result0[1].addr3, self.target_bssid, message1)
# check the disassociation packet
self.assertEqual(result0[2].subtype, 10, message1)
self.assertEqual(result0[2].addr1, receiver0, message1)
self.assertEqual(result0[2].addr2, self.target_bssid, message1)
self.assertEqual(result0[2].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result0[3].subtype, 12, message1)
self.assertEqual(result0[3].addr1, receiver0, message1)
self.assertEqual(result0[3].addr2, self.target_bssid, message1)
self.assertEqual(result0[3].addr3, self.target_bssid, message1)
# check the packets for the second client
# check the disassociation packet
self.assertEqual(result1[4].subtype, 10, message1)
self.assertEqual(result1[4].addr1, sender1, message1)
self.assertEqual(result1[4].addr2, self.target_bssid, message1)
self.assertEqual(result1[4].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result1[5].subtype, 12, message1)
self.assertEqual(result1[5].addr1, sender1, message1)
self.assertEqual(result1[5].addr2, self.target_bssid, message1)
self.assertEqual(result1[5].addr3, self.target_bssid, message1)
# check the disassociation packet
self.assertEqual(result1[6].subtype, 10, message1)
self.assertEqual(result1[6].addr1, self.target_bssid, message1)
self.assertEqual(result1[6].addr2, sender1, message1)
self.assertEqual(result1[6].addr3, self.target_bssid, message1)
# check the deauthentication packet
self.assertEqual(result1[7].subtype, 12, message1)
self.assertEqual(result1[7].addr1, self.target_bssid, message1)
self.assertEqual(result1[7].addr2, sender1, message1)
self.assertEqual(result1[7].addr3, self.target_bssid, message1)
def test_get_packet_essid_flag_client_client_packet(self):
"""
Test get_packet method when --essid flag is given. A new
client is given as input and the proper packets and the
clients channel is expected
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# add the bssid to the deauth_bssid set
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
# run the method
pkts_to_send = self.deauth_obj1.get_packet(self.packet)
result = pkts_to_send[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(self.target_channel in pkts_to_send, True, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, sender, message1)
self.assertEqual(result[0].addr2, receiver, message1)
self.assertEqual(result[0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, sender, message1)
self.assertEqual(result[1].addr2, receiver, message1)
self.assertEqual(result[1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[2].subtype, 10, message1)
self.assertEqual(result[2].addr1, receiver, message1)
self.assertEqual(result[2].addr2, sender, message1)
self.assertEqual(result[2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[3].subtype, 12, message1)
self.assertEqual(result[3].addr1, receiver, message1)
self.assertEqual(result[3].addr2, sender, message1)
self.assertEqual(result[3].addr3, bssid, message1)
def test_get_packet_essid_flag_our_own_ap_empty_list(self):
"""
Test get_packet method when --essid flag is given. Our own
client is given as input. An empty list for both channel and
packets
"""
# setup the packet
sender = "00:00:00:00:00:00"
receiver = self.rogue_mac
bssid = self.rogue_mac
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed0_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. An empty
list for both channel and packets. This test the TypeError case
"""
mock_ord.side_effect = TypeError
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed1_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. An empty
list for both channel and packets. This tests the IndexError case
"""
mock_ord.side_effect = IndexError
# setup the packet
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed2_channel_empty_list(self, mock_ord):
"""
Test get_packet method when --essid flag is given. This is the
case when a packet is malformed in the channel section. In this case
the channel reported is out of range and an empty list for both
channel and packets
"""
mock_ord.return_value = 200
# setup the packet
sender = "33:33:33:33:33:33"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1], [], message1)
def test_add_client_invalid_sender_none(self):
"""
Test _add_client when the given sender is in the non_client_address.
The expected output is None
"""
# setup the arguments
sender = constants.WIFI_INVALID
receiver = "11:11:11:11:11:11"
bssid = receiver
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_invalid_receiver_none(self):
"""
Test _add_client when the given receiver is in the non_client_address.
The expected output is None
"""
# setup the arguments
sender = "11:11:11:11:11:11"
receiver = constants.WIFI_INVALID
bssid = sender
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_invalid_sender_receiver_none(self):
"""
Test _add_client when the given sender and receiver are in the
non_client_address. The expected output is None
"""
# setup the arguments
sender = constants.WIFI_INVALID
receiver = constants.WIFI_INVALID
bssid = "22:22:22:22:22:22:22"
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_irrelevent_sender_receiver_none(self):
"""
Test _add_client when neither sender nor receiver is the
BSSID. The expected output is None
"""
# setup the arguments
sender = "11:11:11:11:11:11"
receiver = "33:33:33:33:33:33"
bssid = "22:22:22:22:22:22:22"
# run the method
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
# check the result
self.assertIsNone(result)
def test_add_client_receiver_is_bssid_packets(self):
"""
Test _add_client when the given receiver is the bssid. The
expected output is proper packets for both sender and receiver
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
# run the method
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
# check the client
self.assertEqual(result[0], sender, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_add_client_sender_is_bssid_packets(self):
"""
Test _add_client when the given sender is the bssid. The
expected output is proper packets for both sender and receiver
"""
# setup the packet
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = sender
# run the method
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
# check the client
self.assertEqual(result[0], receiver, message0)
# check the packets
# check the disassociation packet
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
# check the disassociation packet
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
# check the deauthentication packet
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_send_output_no_client_proper(self):
"""
Test send_output method when no client has been detected.
The expected result is an empty message list
"""
message = "Failed to send the proper output"
self.assertEqual(self.deauth_obj1.send_output(), [], message)
def test_send_output_single_client_proper(self):
"""
Test send_output method when a client has been already
detected. The expected result is the proper output
containing that client
"""
# setup the packet
sender = "44:44:44:44:44:44"
receiver = "55:55:55:55:55:55"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
# run the method
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected = "DEAUTH/DISAS - {}".format(sender)
message = "Failed to send the proper output"
self.assertEqual(expected, actual[0], message)
def test_send_output_multiple_client_proper(self):
"""
Test send_output method when multiple client has been already
detected. The expected result is the proper output
containing that clients
"""
# setup the packet
sender0 = "22:22:22:22:22:22"
receiver0 = "11:11:11:11:11:11"
bssid0 = receiver0
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = sender1
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
# run the method
self.deauth_obj1._deauth_bssids[bssid0] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
# change the packet details
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
# run the method again
self.deauth_obj1._deauth_bssids[bssid1] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected0 = "DEAUTH/DISAS - {}".format(sender0)
expected1 = "DEAUTH/DISAS - {}".format(receiver1)
self.assertIn(expected0, actual)
self.assertIn(expected1, actual)
def test_send_channels_non_frenzy_target_channel(self):
"""
Test send_channels method when --essid is not given. The
expected result is the target AP's channel
"""
actual = self.deauth_obj0.send_channels()
message = "Failed to send target AP's channel"
expected = [self.target_channel]
self.assertEqual(expected, actual, message)
def test_send_channels_frenzy_all_channels(self):
"""
Test send_channels method when --essid is given. The expected
result is all channels
"""
actual = self.deauth_obj1.send_channels()
message = "Failed to send all the channels"
expected = [str(ch) for ch in range(1, 14)]
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_1_addr2(self):
"""
Test _extract_bssid when to_ds is 1 and from_ds is 0.
The case should return packet.addr2
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 2
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 2"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr2
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_1_from_ds_0_addr1(self):
"""
Test _extract_bssid when to_ds is 1 and from_ds is 0.
The case should return packet.addr2
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 1
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 1"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr1
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_0_addr3(self):
"""
Test _extract_bssid when to_ds is 0 and from_ds is 0.
The case should return packet.addr3
"""
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 0
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 3"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr3
self.assertEqual(expected, actual, message)
def test_get_packet_to_ds_1_from_ds_1_empty(self):
"""
Drop the WDS frame in get_packet
"""
self.packet.FCfield = 3
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_address_malform_empty(self):
"""
Drop the frame if the address is malformed
"""
packet = mock.Mock(spec=[])
result = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_is_target_target_ap_bssid_true(self):
"""
Get the target attacking bssid for the speficic ESSID
when --essid is not used
"""
essid = dot11.Dot11Elt(ID='SSID', info="Evil")
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
self.deauth_obj0._data.args.deauth_essid = "Evil"
result = self.deauth_obj0._is_target(packet)
expected = True
message = "Fail to check the attacking essid: " + self.target_essid
self.assertEqual(result, expected, message)
def test_is_target_essid_non_decodable_error(self):
"""
Assign essid to a constant when it is utf-8 non-decodable
"""
essid = dot11.Dot11Elt(ID='SSID', info='\x99\x87\x33')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
result = self.deauth_obj0._is_target(packet)
expected = False
message = 'Fail to raise the UnicodeDecodeError for non-printable essid'
self.assertEqual(result, expected, message)
def test_channel_deauth(self):
"""
Test that we are deauthing on the right channels each time.
"""
# In obj0 we are targetting a specific AP
# Default behavior (e.g. through AP selection phase)
result = self.deauth_obj0.send_channels()
expected = [str(self.deauth_obj0._data.target_ap_channel)]
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
# In obj1 we set --deauth-channels 1 2 3 4
self.deauth_obj1._data.args.deauth_channels = [1, 2, 3, 4]
result = self.deauth_obj1.send_channels()
expected = ['1', '2', '3', '4']
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
| 36.154114 | 101 | 0.639659 |
import collections
import unittest
from collections import defaultdict
import mock
import scapy.layers.dot11 as dot11
import wifiphisher.common.constants as constants
import wifiphisher.extensions.deauth as deauth
class TestDeauth(unittest.TestCase):
def setUp(self):
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
self.packet = dot11.RadioTap() / dot11.Dot11() / essid / rates / dsset
custom_tuple = collections.namedtuple("test",
("target_ap_bssid target_ap_channel rogue_ap_mac args "
"target_ap_essid is_freq_hop_allowed"))
self.target_channel = "6"
self.target_bssid = "BB:BB:BB:BB:BB:BB"
self.rogue_mac = "CC:CC:CC:CC:CC:CC"
self.target_essid = "Evil"
self.args = mock.Mock()
self.args.deauth_essid = False
self.args.channel_monitor = False
self.args.deauth_channels = []
data0 = custom_tuple(self.target_bssid, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
data1 = custom_tuple(None, self.target_channel, self.rogue_mac,
self.args, self.target_essid, True)
self.deauth_obj0 = deauth.Deauth(data0)
self.deauth_obj1 = deauth.Deauth(data1)
self.deauth_obj0._deauth_bssids = dict()
self.deauth_obj1._deauth_bssids = dict()
def test_craft_packet_normal_expected(self):
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "00:00:00:00:00:00"
result = self.deauth_obj0._craft_packet(sender, receiver, bssid)
message0 = "Failed to craft a packet for disassociation"
message1 = "Failed to craft a packet for deauthentication"
self.assertEqual(result[0].addr1, receiver, message0)
self.assertEqual(result[0].addr2, sender, message0)
self.assertEqual(result[0].addr3, bssid, message0)
self.assertEqual(result[1].addr1, receiver, message1)
self.assertEqual(result[1].addr2, sender, message1)
self.assertEqual(result[1].addr3, bssid, message1)
def test_get_packet_broadcast(self):
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
essid = dot11.Dot11Elt(ID='SSID', info="")
rates = dot11.Dot11Elt(ID='Rates', info="\x03\x12\x96\x18\x24\x30\x48\x60")
dsset = dot11.Dot11Elt(ID='DSset', info='\x06')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid / rates / dsset
packet.addr1 = receiver
packet.addr2 = sender
packet.addr3 = self.target_bssid
packet.FCfield = 0x0
pkts_to_send = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send, True,
message0)
result = pkts_to_send[self.target_channel]
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[0].addr2, self.target_bssid, message1)
self.assertEqual(result[0].addr3, self.target_bssid, message1)
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, constants.WIFI_BROADCAST, message1)
self.assertEqual(result[1].addr2, self.target_bssid, message1)
self.assertEqual(result[1].addr3, self.target_bssid, message1)
def test_get_packet_second_run_non_releavent_client_empty(self):
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = "55:55:55:55:55:55"
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_get_packet_second_run_our_ap_empty(self):
sender0 = "00:00:00:00:00:00"
receiver0 = "11:11:11:11:11:11"
bssid0 = "22:22:22:22:22:22:22"
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = self.rogue_mac
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_get_packet_multiple_clients_multiple_packets(self):
sender0 = self.target_bssid
receiver0 = "11:11:11:11:11:11"
bssid0 = self.target_bssid
sender1 = "33:33:33:33:33:33"
receiver1 = self.target_bssid
bssid1 = self.target_bssid
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj0._deauth_bssids[self.target_bssid] = self.target_channel
pkts_to_send0 = self.deauth_obj0.get_packet(self.packet)
result0 = pkts_to_send0[self.target_channel]
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
pkts_to_send1 = self.deauth_obj0.get_packet(self.packet)
result1 = pkts_to_send1[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send0, True,
message0)
self.assertEqual(result0[0].subtype, 10, message1)
self.assertEqual(result0[0].addr1, self.target_bssid, message1)
self.assertEqual(result0[0].addr2, receiver0, message1)
self.assertEqual(result0[0].addr3, self.target_bssid, message1)
self.assertEqual(result0[1].subtype, 12, message1)
self.assertEqual(result0[1].addr1, self.target_bssid, message1)
self.assertEqual(result0[1].addr2, receiver0, message1)
self.assertEqual(result0[1].addr3, self.target_bssid, message1)
self.assertEqual(result0[2].subtype, 10, message1)
self.assertEqual(result0[2].addr1, receiver0, message1)
self.assertEqual(result0[2].addr2, self.target_bssid, message1)
self.assertEqual(result0[2].addr3, self.target_bssid, message1)
self.assertEqual(result0[3].subtype, 12, message1)
self.assertEqual(result0[3].addr1, receiver0, message1)
self.assertEqual(result0[3].addr2, self.target_bssid, message1)
self.assertEqual(result0[3].addr3, self.target_bssid, message1)
self.assertEqual(result1[4].subtype, 10, message1)
self.assertEqual(result1[4].addr1, sender1, message1)
self.assertEqual(result1[4].addr2, self.target_bssid, message1)
self.assertEqual(result1[4].addr3, self.target_bssid, message1)
self.assertEqual(result1[5].subtype, 12, message1)
self.assertEqual(result1[5].addr1, sender1, message1)
self.assertEqual(result1[5].addr2, self.target_bssid, message1)
self.assertEqual(result1[5].addr3, self.target_bssid, message1)
self.assertEqual(result1[6].subtype, 10, message1)
self.assertEqual(result1[6].addr1, self.target_bssid, message1)
self.assertEqual(result1[6].addr2, sender1, message1)
self.assertEqual(result1[6].addr3, self.target_bssid, message1)
self.assertEqual(result1[7].subtype, 12, message1)
self.assertEqual(result1[7].addr1, self.target_bssid, message1)
self.assertEqual(result1[7].addr2, sender1, message1)
self.assertEqual(result1[7].addr3, self.target_bssid, message1)
def test_get_packet_essid_flag_client_client_packet(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
pkts_to_send = self.deauth_obj1.get_packet(self.packet)
result = pkts_to_send[self.target_channel]
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(self.target_channel in pkts_to_send, True, message0)
self.assertEqual(result[0].subtype, 10, message1)
self.assertEqual(result[0].addr1, sender, message1)
self.assertEqual(result[0].addr2, receiver, message1)
self.assertEqual(result[0].addr3, bssid, message1)
self.assertEqual(result[1].subtype, 12, message1)
self.assertEqual(result[1].addr1, sender, message1)
self.assertEqual(result[1].addr2, receiver, message1)
self.assertEqual(result[1].addr3, bssid, message1)
self.assertEqual(result[2].subtype, 10, message1)
self.assertEqual(result[2].addr1, receiver, message1)
self.assertEqual(result[2].addr2, sender, message1)
self.assertEqual(result[2].addr3, bssid, message1)
self.assertEqual(result[3].subtype, 12, message1)
self.assertEqual(result[3].addr1, receiver, message1)
self.assertEqual(result[3].addr2, sender, message1)
self.assertEqual(result[3].addr3, bssid, message1)
def test_get_packet_essid_flag_our_own_ap_empty_list(self):
sender = "00:00:00:00:00:00"
receiver = self.rogue_mac
bssid = self.rogue_mac
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed0_channel_empty_list(self, mock_ord):
mock_ord.side_effect = TypeError
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed1_channel_empty_list(self, mock_ord):
mock_ord.side_effect = IndexError
sender = "00:00:00:00:00:00"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
@mock.patch("wifiphisher.extensions.deauth.ord")
def test_get_packet_essid_flag_malformed2_channel_empty_list(self, mock_ord):
mock_ord.return_value = 200
sender = "33:33:33:33:33:33"
receiver = "11:11:11:11:11:11"
bssid = "22:22:22:22:22:22:22"
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
result = self.deauth_obj1.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], [], message0)
self.assertEqual(result[1], [], message1)
def test_add_client_invalid_sender_none(self):
sender = constants.WIFI_INVALID
receiver = "11:11:11:11:11:11"
bssid = receiver
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_invalid_receiver_none(self):
sender = "11:11:11:11:11:11"
receiver = constants.WIFI_INVALID
bssid = sender
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_invalid_sender_receiver_none(self):
sender = constants.WIFI_INVALID
receiver = constants.WIFI_INVALID
bssid = "22:22:22:22:22:22:22"
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_irrelevent_sender_receiver_none(self):
sender = "11:11:11:11:11:11"
receiver = "33:33:33:33:33:33"
bssid = "22:22:22:22:22:22:22"
result = self.deauth_obj0._add_clients(sender, receiver, bssid)
self.assertIsNone(result)
def test_add_client_receiver_is_bssid_packets(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = receiver
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], sender, message0)
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_add_client_sender_is_bssid_packets(self):
sender = "22:22:22:22:22:22"
receiver = "11:11:11:11:11:11"
bssid = sender
result = self.deauth_obj1._add_clients(sender, receiver, bssid)
message0 = "Failed to return the correct client"
message1 = "Failed to return an correct packets"
self.assertEqual(result[0], receiver, message0)
self.assertEqual(result[1][0].subtype, 10, message1)
self.assertEqual(result[1][0].addr1, sender, message1)
self.assertEqual(result[1][0].addr2, receiver, message1)
self.assertEqual(result[1][0].addr3, bssid, message1)
self.assertEqual(result[1][1].subtype, 12, message1)
self.assertEqual(result[1][1].addr1, sender, message1)
self.assertEqual(result[1][1].addr2, receiver, message1)
self.assertEqual(result[1][1].addr3, bssid, message1)
self.assertEqual(result[1][2].subtype, 10, message1)
self.assertEqual(result[1][2].addr1, receiver, message1)
self.assertEqual(result[1][2].addr2, sender, message1)
self.assertEqual(result[1][2].addr3, bssid, message1)
self.assertEqual(result[1][3].subtype, 12, message1)
self.assertEqual(result[1][3].addr1, receiver, message1)
self.assertEqual(result[1][3].addr2, sender, message1)
self.assertEqual(result[1][3].addr3, bssid, message1)
def test_send_output_no_client_proper(self):
message = "Failed to send the proper output"
self.assertEqual(self.deauth_obj1.send_output(), [], message)
def test_send_output_single_client_proper(self):
sender = "44:44:44:44:44:44"
receiver = "55:55:55:55:55:55"
bssid = receiver
self.packet.addr1 = receiver
self.packet.addr2 = sender
self.packet.addr3 = bssid
self.deauth_obj1._deauth_bssids[bssid] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected = "DEAUTH/DISAS - {}".format(sender)
message = "Failed to send the proper output"
self.assertEqual(expected, actual[0], message)
def test_send_output_multiple_client_proper(self):
sender0 = "22:22:22:22:22:22"
receiver0 = "11:11:11:11:11:11"
bssid0 = receiver0
sender1 = "33:33:33:33:33:33"
receiver1 = "44:44:44:44:44:44"
bssid1 = sender1
self.packet.addr1 = receiver0
self.packet.addr2 = sender0
self.packet.addr3 = bssid0
self.deauth_obj1._deauth_bssids[bssid0] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
self.packet.addr1 = receiver1
self.packet.addr2 = sender1
self.packet.addr3 = bssid1
self.deauth_obj1._deauth_bssids[bssid1] = self.target_channel
self.deauth_obj1.get_packet(self.packet)
actual = self.deauth_obj1.send_output()
expected0 = "DEAUTH/DISAS - {}".format(sender0)
expected1 = "DEAUTH/DISAS - {}".format(receiver1)
self.assertIn(expected0, actual)
self.assertIn(expected1, actual)
def test_send_channels_non_frenzy_target_channel(self):
actual = self.deauth_obj0.send_channels()
message = "Failed to send target AP's channel"
expected = [self.target_channel]
self.assertEqual(expected, actual, message)
def test_send_channels_frenzy_all_channels(self):
actual = self.deauth_obj1.send_channels()
message = "Failed to send all the channels"
expected = [str(ch) for ch in range(1, 14)]
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_1_addr2(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 2
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 2"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr2
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_1_from_ds_0_addr1(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 1
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 1"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr1
self.assertEqual(expected, actual, message)
def test_extract_bssid_to_ds_0_from_ds_0_addr3(self):
# bit0 is to_ds and bit1 is from_ds
self.packet.FCfield = 0
self.packet.addr1 = "11:11:11:11:11:11"
self.packet.addr2 = "22:22:22:22:22:22"
self.packet.addr3 = "33:33:33:33:33:33"
message = "Fail to get correct BSSID as address 3"
actual = self.deauth_obj0._extract_bssid(self.packet)
expected = self.packet.addr3
self.assertEqual(expected, actual, message)
def test_get_packet_to_ds_1_from_ds_1_empty(self):
self.packet.FCfield = 3
result = self.deauth_obj0.get_packet(self.packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_get_packet_address_malform_empty(self):
packet = mock.Mock(spec=[])
result = self.deauth_obj0.get_packet(packet)
message0 = "Failed to return an correct channel"
message1 = "Failed to return an correct packets"
# check channel
self.assertEqual(result[0], [], message0)
# check the packets
self.assertEqual(result[1], [], message1)
def test_is_target_target_ap_bssid_true(self):
essid = dot11.Dot11Elt(ID='SSID', info="Evil")
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
self.deauth_obj0._data.args.deauth_essid = "Evil"
result = self.deauth_obj0._is_target(packet)
expected = True
message = "Fail to check the attacking essid: " + self.target_essid
self.assertEqual(result, expected, message)
def test_is_target_essid_non_decodable_error(self):
essid = dot11.Dot11Elt(ID='SSID', info='\x99\x87\x33')
packet = dot11.RadioTap() / dot11.Dot11() / dot11.Dot11Beacon() / essid
packet.addr3 = "99:99:99:99:99:99"
result = self.deauth_obj0._is_target(packet)
expected = False
message = 'Fail to raise the UnicodeDecodeError for non-printable essid'
self.assertEqual(result, expected, message)
def test_channel_deauth(self):
# In obj0 we are targetting a specific AP
# Default behavior (e.g. through AP selection phase)
result = self.deauth_obj0.send_channels()
expected = [str(self.deauth_obj0._data.target_ap_channel)]
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
# In obj1 we set --deauth-channels 1 2 3 4
self.deauth_obj1._data.args.deauth_channels = [1, 2, 3, 4]
result = self.deauth_obj1.send_channels()
expected = ['1', '2', '3', '4']
message = "Fail to receive right channels"
self.assertEqual(result, expected, message)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.