code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import numpy as np
import pyastar
# The start and goal coordinates are in matrix coordinates (i, j).
start = (0, 0)
goal = (4, 4)
# The minimum cost must be 1 for the heuristic to be valid.
weights = np.array([[1, 3, 3, 3, 3],
[2, 1, 3, 3, 3],
[2, 2, 1, 3, 3],
[2, 2, 2, 1, 3],
[2, 2, 2, 2, 1]], dtype=np.float32)
print("Cost matrix:")
print(weights)
path = pyastar.astar_path(weights, start, goal, allow_diagonal=True)
# The path is returned as a numpy array of (i, j) coordinates.
print(f"Shortest path from {start} to {goal} found:")
print(path)
|
[
"pyastar.astar_path",
"numpy.array"
] |
[((203, 321), 'numpy.array', 'np.array', (['[[1, 3, 3, 3, 3], [2, 1, 3, 3, 3], [2, 2, 1, 3, 3], [2, 2, 2, 1, 3], [2, 2,\n 2, 2, 1]]'], {'dtype': 'np.float32'}), '([[1, 3, 3, 3, 3], [2, 1, 3, 3, 3], [2, 2, 1, 3, 3], [2, 2, 2, 1, 3\n ], [2, 2, 2, 2, 1]], dtype=np.float32)\n', (211, 321), True, 'import numpy as np\n'), ((441, 502), 'pyastar.astar_path', 'pyastar.astar_path', (['weights', 'start', 'goal'], {'allow_diagonal': '(True)'}), '(weights, start, goal, allow_diagonal=True)\n', (459, 502), False, 'import pyastar\n')]
|
"""
A micro:bit MicroPython implementation of a random string sender for testing's sake.
"""
from microbit import *
import random
import radio
radio.on()
radio.config()
primes = [2, 3, 5, 7, 11, 13, 17, 19]
e = random.choice(primes)
while True:
p = random.choice(primes)
if p % e != -1:
break
while True:
q = random.choice(primes)
if q % e != -1:
break
n = p * q
d = (e ** -1.0) % ((p-1) * (q - 1))
e = str(e)
n = str(n)
d = str(d)
while True:
radio.send(e + " " + n + " " + d)
sleep(500)
|
[
"radio.send",
"radio.config",
"random.choice",
"radio.on"
] |
[((145, 155), 'radio.on', 'radio.on', ([], {}), '()\n', (153, 155), False, 'import radio\n'), ((157, 171), 'radio.config', 'radio.config', ([], {}), '()\n', (169, 171), False, 'import radio\n'), ((217, 238), 'random.choice', 'random.choice', (['primes'], {}), '(primes)\n', (230, 238), False, 'import random\n'), ((259, 280), 'random.choice', 'random.choice', (['primes'], {}), '(primes)\n', (272, 280), False, 'import random\n'), ((340, 361), 'random.choice', 'random.choice', (['primes'], {}), '(primes)\n', (353, 361), False, 'import random\n'), ((495, 528), 'radio.send', 'radio.send', (["(e + ' ' + n + ' ' + d)"], {}), "(e + ' ' + n + ' ' + d)\n", (505, 528), False, 'import radio\n')]
|
from pathlib import Path
module_dir = Path.home() / "module_results/basin_rivers"
module_dir.mkdir(exist_ok=True)
|
[
"pathlib.Path.home"
] |
[((39, 50), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (48, 50), False, 'from pathlib import Path\n')]
|
"""
Some basic admin tests.
Rather than testing the frontend UI -- that's be a job for something like
Selenium -- this does a bunch of mocking and just tests the various admin
callbacks.
"""
import mock
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.http import HttpRequest
from django import forms
from fack.admin import QuestionAdmin
from fack.models import Question
class FAQAdminTests(TestCase):
def test_question_admin_save_model(self):
user = get_user_model()
user1 = mock.Mock(spec=user)
user2 = mock.Mock(spec=user)
req = mock.Mock(spec=HttpRequest)
obj = mock.Mock(spec=Question)
form = mock.Mock(spec=forms.Form)
qa = QuestionAdmin(Question, admin.site)
# Test saving a new model.
req.user = user1
qa.save_model(req, obj, form, change=False)
self.assertEqual(obj.save.call_count, 1)
self.assertEqual(obj.created_by, user1, "created_by wasn't set to request.user")
self.assertEqual(obj.updated_by, user1, "updated_by wasn't set to request.user")
# And saving an existing model.
obj.save.reset_mock()
req.user = user2
qa.save_model(req, obj, form, change=True)
self.assertEqual(obj.save.call_count, 1)
self.assertEqual(obj.created_by, user1, "created_by shouldn't have been changed")
self.assertEqual(obj.updated_by, user2, "updated_by wasn't set to request.user")
|
[
"fack.admin.QuestionAdmin",
"django.contrib.auth.get_user_model",
"mock.Mock"
] |
[((543, 559), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (557, 559), False, 'from django.contrib.auth import get_user_model\n'), ((576, 596), 'mock.Mock', 'mock.Mock', ([], {'spec': 'user'}), '(spec=user)\n', (585, 596), False, 'import mock\n'), ((613, 633), 'mock.Mock', 'mock.Mock', ([], {'spec': 'user'}), '(spec=user)\n', (622, 633), False, 'import mock\n'), ((648, 675), 'mock.Mock', 'mock.Mock', ([], {'spec': 'HttpRequest'}), '(spec=HttpRequest)\n', (657, 675), False, 'import mock\n'), ((690, 714), 'mock.Mock', 'mock.Mock', ([], {'spec': 'Question'}), '(spec=Question)\n', (699, 714), False, 'import mock\n'), ((730, 756), 'mock.Mock', 'mock.Mock', ([], {'spec': 'forms.Form'}), '(spec=forms.Form)\n', (739, 756), False, 'import mock\n'), ((771, 806), 'fack.admin.QuestionAdmin', 'QuestionAdmin', (['Question', 'admin.site'], {}), '(Question, admin.site)\n', (784, 806), False, 'from fack.admin import QuestionAdmin\n')]
|
# engine/cursor.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define cursor-specific result set constructs including
:class:`.BaseCursorResult`, :class:`.CursorResult`."""
import collections
import functools
from .result import Result
from .result import ResultMetaData
from .result import SimpleResultMetaData
from .result import tuplegetter
from .row import LegacyRow
from .. import exc
from .. import util
from ..sql import expression
from ..sql import sqltypes
from ..sql import util as sql_util
from ..sql.base import _generative
from ..sql.compiler import RM_NAME
from ..sql.compiler import RM_OBJECTS
from ..sql.compiler import RM_RENDERED_NAME
from ..sql.compiler import RM_TYPE
_UNPICKLED = util.symbol("unpickled")
# metadata entry tuple indexes.
# using raw tuple is faster than namedtuple.
MD_INDEX = 0 # integer index in cursor.description
MD_OBJECTS = 1 # other string keys and ColumnElement obj that can match
MD_LOOKUP_KEY = 2 # string key we usually expect for key-based lookup
MD_RENDERED_NAME = 3 # name that is usually in cursor.description
MD_PROCESSOR = 4 # callable to process a result value into a row
MD_UNTRANSLATED = 5 # raw name from cursor.description
class CursorResultMetaData(ResultMetaData):
"""Result metadata for DBAPI cursors."""
__slots__ = (
"_keymap",
"case_sensitive",
"_processors",
"_keys",
"_tuplefilter",
"_translated_indexes",
"_safe_for_cache"
# don't need _unique_filters support here for now. Can be added
# if a need arises.
)
returns_rows = True
def _has_key(self, key):
return key in self._keymap
def _for_freeze(self):
return SimpleResultMetaData(
self._keys,
extra=[self._keymap[key][MD_OBJECTS] for key in self._keys],
)
def _reduce(self, keys):
recs = list(self._metadata_for_keys(keys))
indexes = [rec[MD_INDEX] for rec in recs]
new_keys = [rec[MD_LOOKUP_KEY] for rec in recs]
if self._translated_indexes:
indexes = [self._translated_indexes[idx] for idx in indexes]
tup = tuplegetter(*indexes)
new_metadata = self.__class__.__new__(self.__class__)
new_metadata.case_sensitive = self.case_sensitive
new_metadata._processors = self._processors
new_metadata._keys = new_keys
new_metadata._tuplefilter = tup
new_metadata._translated_indexes = indexes
new_recs = [
(index,) + rec[1:]
for index, rec in enumerate(self._metadata_for_keys(keys))
]
new_metadata._keymap = {rec[MD_LOOKUP_KEY]: rec for rec in new_recs}
# TODO: need unit test for:
# result = connection.execute("raw sql, no columns").scalars()
# without the "or ()" it's failing because MD_OBJECTS is None
new_metadata._keymap.update(
{
e: new_rec
for new_rec in new_recs
for e in new_rec[MD_OBJECTS] or ()
}
)
return new_metadata
def _adapt_to_context(self, context):
"""When using a cached Compiled construct that has a _result_map,
for a new statement that used the cached Compiled, we need to ensure
the keymap has the Column objects from our new statement as keys.
So here we rewrite keymap with new entries for the new columns
as matched to those of the cached statement.
"""
if not context.compiled._result_columns:
return self
compiled_statement = context.compiled.statement
invoked_statement = context.invoked_statement
if compiled_statement is invoked_statement:
return self
# make a copy and add the columns from the invoked statement
# to the result map.
md = self.__class__.__new__(self.__class__)
md._keymap = dict(self._keymap)
# match up new columns positionally to the result columns
for existing, new in zip(
context.compiled._result_columns,
invoked_statement._exported_columns_iterator(),
):
if existing[RM_NAME] in md._keymap:
md._keymap[new] = md._keymap[existing[RM_NAME]]
md.case_sensitive = self.case_sensitive
md._processors = self._processors
assert not self._tuplefilter
md._tuplefilter = None
md._translated_indexes = None
md._keys = self._keys
return md
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self._tuplefilter = None
self._translated_indexes = None
self.case_sensitive = dialect.case_sensitive
self._safe_for_cache = False
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = (
cols_are_ordered
) = (
num_ctx_cols
) = loose_column_name_matching = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
)
self._keymap = {}
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [
metadata_entry[MD_PROCESSOR] for metadata_entry in raw
]
# keymap by primary string...
by_key = dict(
[
(metadata_entry[MD_LOOKUP_KEY], metadata_entry)
for metadata_entry in raw
]
)
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
# new in 1.4: get the complete set of all possible keys,
# strings, objects, whatever, that are dupes across two
# different records, first.
index_by_key = {}
dupes = set()
for metadata_entry in raw:
for key in (metadata_entry[MD_RENDERED_NAME],) + (
metadata_entry[MD_OBJECTS] or ()
):
if not self.case_sensitive and isinstance(
key, util.string_types
):
key = key.lower()
idx = metadata_entry[MD_INDEX]
# if this key has been associated with more than one
# positional index, it's a dupe
if index_by_key.setdefault(key, idx) != idx:
dupes.add(key)
# then put everything we have into the keymap excluding only
# those keys that are dupes.
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
if obj_elem not in dupes
]
)
# then for the dupe keys, put the "ambiguous column"
# record into by_key.
by_key.update({key: (None, (), key) for key in dupes})
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[
(
metadata_entry[MD_UNTRANSLATED],
self._keymap[metadata_entry[MD_LOOKUP_KEY]],
)
for metadata_entry in raw
if metadata_entry[MD_UNTRANSLATED]
]
)
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`_expression.TextualSelect` construct.
This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`_expression.TextualSelect` objects in 1.1.
As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self._keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
# this metadata is safe to cache because we are guaranteed
# to have the columns in the same order for new executions
self._safe_for_cache = True
return [
(
idx,
rmap_entry[RM_OBJECTS],
rmap_entry[RM_NAME].lower()
if not case_sensitive
else rmap_entry[RM_NAME],
rmap_entry[RM_RENDERED_NAME],
context.get_result_processor(
rmap_entry[RM_TYPE],
rmap_entry[RM_RENDERED_NAME],
cursor_description[idx][1],
),
None,
)
for idx, rmap_entry in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
self._safe_for_cache = True
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
# the order of columns can change if the query is
# against a "select *", so not safe to cache
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
result_columns,
loose_column_name_matching,
)
else:
# no compiled SQL, just a raw string, order of columns
# can change for "select *"
self._safe_for_cache = False
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
return [
(
idx,
obj,
cursor_colname,
cursor_colname,
context.get_result_processor(
mapped_type, cursor_colname, coltype
),
untranslated,
)
for (
idx,
cursor_colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
def _colnames_from_description(self, context, cursor_description):
"""Extract column names and data types from a cursor.description.
Applies unicode decoding, column translation, "normalization",
and case sensitivity rules to the names based on the dialect.
"""
dialect = context.dialect
case_sensitive = dialect.case_sensitive
translate_colname = context._translate_colname
description_decoder = (
dialect._description_decoder
if dialect.description_encoding
else None
)
normalize_name = (
dialect.normalize_name if dialect.requires_name_normalize else None
)
untranslated = None
self._keys = []
for idx, rec in enumerate(cursor_description):
colname = rec[0]
coltype = rec[1]
if description_decoder:
colname = description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if normalize_name:
colname = normalize_name(colname)
self._keys.append(colname)
if not case_sensitive:
colname = colname.lower()
yield idx, colname, untranslated, coltype
def _merge_textual_cols_by_position(
self, context, cursor_description, result_columns
):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[RM_OBJECTS]
mapped_type = ctx_rec[RM_TYPE]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested "
"in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_name(
self,
context,
cursor_description,
result_columns,
loose_column_name_matching,
):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
match_map = self._create_description_match_map(
result_columns, case_sensitive, loose_column_name_matching
)
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = match_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
yield idx, colname, mapped_type, coltype, obj, untranslated
@classmethod
def _create_description_match_map(
cls,
result_columns,
case_sensitive=True,
loose_column_name_matching=False,
):
"""when matching cursor.description to a set of names that are present
in a Compiled object, as is the case with TextualSelect, get all the
names we expect might match those in cursor.description.
"""
d = {}
for elem in result_columns:
key = elem[RM_RENDERED_NAME]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname - just add the column-linked objects
# to the existing record. if there is a duplicate column
# name in the cursor description, this will allow all of those
# objects to raise an ambiguous column error
e_name, e_obj, e_type = d[key]
d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type
else:
d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
if loose_column_name_matching:
# when using a textual statement with an unordered set
# of columns that line up, we are expecting the user
# to be using label names in the SQL that match to the column
# expressions. Enable more liberal matching for this case;
# duplicate keys that are ambiguous will be fixed later.
for r_key in elem[RM_OBJECTS]:
d.setdefault(
r_key, (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
)
return d
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
def _key_fallback(self, key, err, raiseerr=True):
if raiseerr:
util.raise_(
exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% util.string_or_unprintable(key)
),
replace_context=err,
)
else:
return None
def _raise_for_ambiguous_column_name(self, rec):
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % rec[MD_LOOKUP_KEY]
)
def _index_for_key(self, key, raiseerr=True):
# TODO: can consider pre-loading ints and negative ints
# into _keymap - also no coverage here
if isinstance(key, int):
key = self._keys[key]
try:
rec = self._keymap[key]
except KeyError as ke:
rec = self._key_fallback(key, ke, raiseerr)
if rec is None:
return None
index = rec[0]
if index is None:
self._raise_for_ambiguous_column_name(rec)
return index
def _indexes_for_keys(self, keys):
try:
return [self._keymap[key][0] for key in keys]
except KeyError as ke:
# ensure it raises
CursorResultMetaData._key_fallback(self, ke.args[0], ke)
def _metadata_for_keys(self, keys):
for key in keys:
if int in key.__class__.__mro__:
key = self._keys[key]
try:
rec = self._keymap[key]
except KeyError as ke:
# ensure it raises
CursorResultMetaData._key_fallback(self, ke.args[0], ke)
index = rec[0]
if index is None:
self._raise_for_ambiguous_column_name(rec)
yield rec
def __getstate__(self):
return {
"_keymap": {
key: (rec[MD_INDEX], _UNPICKLED, key)
for key, rec in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
},
"_keys": self._keys,
"case_sensitive": self.case_sensitive,
"_translated_indexes": self._translated_indexes,
"_tuplefilter": self._tuplefilter,
}
def __setstate__(self, state):
self._processors = [None for _ in range(len(state["_keys"]))]
self._keymap = state["_keymap"]
self._keys = state["_keys"]
self.case_sensitive = state["case_sensitive"]
if state["_translated_indexes"]:
self._translated_indexes = state["_translated_indexes"]
self._tuplefilter = tuplegetter(*self._translated_indexes)
else:
self._translated_indexes = self._tuplefilter = None
class LegacyCursorResultMetaData(CursorResultMetaData):
__slots__ = ()
def _contains(self, value, row):
key = value
if key in self._keymap:
util.warn_deprecated_20(
"Using the 'in' operator to test for string or column "
"keys, or integer indexes, in a :class:`.Row` object is "
"deprecated and will "
"be removed in a future release. "
"Use the `Row._fields` or `Row._mapping` attribute, i.e. "
"'key in row._fields'",
)
return True
else:
return self._key_fallback(key, None, False) is not None
def _key_fallback(self, key, err, raiseerr=True):
map_ = self._keymap
result = None
if isinstance(key, util.string_types):
result = map_.get(key if self.case_sensitive else key.lower())
elif isinstance(key, expression.ColumnElement):
if (
key._label
and (key._label if self.case_sensitive else key._label.lower())
in map_
):
result = map_[
key._label if self.case_sensitive else key._label.lower()
]
elif (
hasattr(key, "name")
and (key.name if self.case_sensitive else key.name.lower())
in map_
):
# match is only on name.
result = map_[
key.name if self.case_sensitive else key.name.lower()
]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and result[MD_OBJECTS] not in (
None,
_UNPICKLED,
):
for obj in result[MD_OBJECTS]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is not None:
if result[MD_OBJECTS] is _UNPICKLED:
util.warn_deprecated(
"Retrieving row values using Column objects from a "
"row that was unpickled is deprecated; adequate "
"state cannot be pickled for this to be efficient. "
"This usage will raise KeyError in a future release.",
version="1.4",
)
else:
util.warn_deprecated(
"Retrieving row values using Column objects with only "
"matching names as keys is deprecated, and will raise "
"KeyError in a future release; only Column "
"objects that are explicitly part of the statement "
"object should be used.",
version="1.4",
)
if result is None:
if raiseerr:
util.raise_(
exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% util.string_or_unprintable(key)
),
replace_context=err,
)
else:
return None
else:
map_[key] = result
return result
def _warn_for_nonint(self, key):
util.warn_deprecated_20(
"Using non-integer/slice indices on Row is deprecated and will "
"be removed in version 2.0; please use row._mapping[<key>], or "
"the mappings() accessor on the Result object.",
stacklevel=4,
)
def _has_key(self, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, None, False) is not None
class ResultFetchStrategy(object):
"""Define a fetching strategy for a result object.
.. versionadded:: 1.4
"""
__slots__ = ()
alternate_cursor_description = None
def soft_close(self, result, dbapi_cursor):
raise NotImplementedError()
def hard_close(self, result, dbapi_cursor):
raise NotImplementedError()
def yield_per(self, result, dbapi_cursor, num):
return
def fetchone(self, result, dbapi_cursor, hard_close=False):
raise NotImplementedError()
def fetchmany(self, result, dbapi_cursor, size=None):
raise NotImplementedError()
def fetchall(self, result):
raise NotImplementedError()
def handle_exception(self, result, dbapi_cursor, err):
raise err
class NoCursorFetchStrategy(ResultFetchStrategy):
"""Cursor strategy for a result that has no open cursor.
There are two varieties of this strategy, one for DQL and one for
DML (and also DDL), each of which represent a result that had a cursor
but no longer has one.
"""
__slots__ = ()
def soft_close(self, result, dbapi_cursor):
pass
def hard_close(self, result, dbapi_cursor):
pass
def fetchone(self, result, dbapi_cursor, hard_close=False):
return self._non_result(result, None)
def fetchmany(self, result, dbapi_cursor, size=None):
return self._non_result(result, [])
def fetchall(self, result, dbapi_cursor):
return self._non_result(result, [])
def _non_result(self, result, default, err=None):
raise NotImplementedError()
class NoCursorDQLFetchStrategy(NoCursorFetchStrategy):
"""Cursor strategy for a DQL result that has no open cursor.
This is a result set that can return rows, i.e. for a SELECT, or for an
INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
where the cursor is closed and no rows remain available. The owning result
object may or may not be "hard closed", which determines if the fetch
methods send empty results or raise for closed result.
"""
__slots__ = ()
def _non_result(self, result, default, err=None):
if result.closed:
util.raise_(
exc.ResourceClosedError("This result object is closed."),
replace_context=err,
)
else:
return default
_NO_CURSOR_DQL = NoCursorDQLFetchStrategy()
class NoCursorDMLFetchStrategy(NoCursorFetchStrategy):
"""Cursor strategy for a DML result that has no open cursor.
This is a result set that does not return rows, i.e. for an INSERT,
UPDATE, DELETE that does not include RETURNING.
"""
__slots__ = ()
def _non_result(self, result, default, err=None):
# we only expect to have a _NoResultMetaData() here right now.
assert not result._metadata.returns_rows
result._metadata._we_dont_return_rows(err)
_NO_CURSOR_DML = NoCursorDMLFetchStrategy()
class CursorFetchStrategy(ResultFetchStrategy):
"""Call fetch methods from a DBAPI cursor.
Alternate versions of this class may instead buffer the rows from
cursors or not use cursors at all.
"""
__slots__ = ()
def soft_close(self, result, dbapi_cursor):
result.cursor_strategy = _NO_CURSOR_DQL
def hard_close(self, result, dbapi_cursor):
result.cursor_strategy = _NO_CURSOR_DQL
def handle_exception(self, result, dbapi_cursor, err):
result.connection._handle_dbapi_exception(
err, None, None, dbapi_cursor, result.context
)
def yield_per(self, result, dbapi_cursor, num):
result.cursor_strategy = BufferedRowCursorFetchStrategy(
dbapi_cursor,
{"max_row_buffer": num},
initial_buffer=collections.deque(),
growth_factor=0,
)
def fetchone(self, result, dbapi_cursor, hard_close=False):
try:
row = dbapi_cursor.fetchone()
if row is None:
result._soft_close(hard=hard_close)
return row
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
def fetchmany(self, result, dbapi_cursor, size=None):
try:
if size is None:
l = dbapi_cursor.fetchmany()
else:
l = dbapi_cursor.fetchmany(size)
if not l:
result._soft_close()
return l
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
def fetchall(self, result, dbapi_cursor):
try:
rows = dbapi_cursor.fetchall()
result._soft_close()
return rows
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
_DEFAULT_FETCH = CursorFetchStrategy()
class BufferedRowCursorFetchStrategy(CursorFetchStrategy):
"""A cursor fetch strategy with row buffering behavior.
This strategy buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up the ``max_row_buffer`` size, which defaults
to 1000::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute(text("select * from table"))
.. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
.. seealso::
:ref:`psycopg2_execution_options`
"""
__slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize", "_growth_factor")
def __init__(
self,
dbapi_cursor,
execution_options,
growth_factor=5,
initial_buffer=None,
):
self._max_row_buffer = execution_options.get("max_row_buffer", 1000)
if initial_buffer is not None:
self._rowbuffer = initial_buffer
else:
self._rowbuffer = collections.deque(dbapi_cursor.fetchmany(1))
self._growth_factor = growth_factor
if growth_factor:
self._bufsize = min(self._max_row_buffer, self._growth_factor)
else:
self._bufsize = self._max_row_buffer
@classmethod
def create(cls, result):
return BufferedRowCursorFetchStrategy(
result.cursor, result.context.execution_options,
)
def _buffer_rows(self, result, dbapi_cursor):
size = self._bufsize
try:
if size < 1:
new_rows = dbapi_cursor.fetchall()
else:
new_rows = dbapi_cursor.fetchmany(size)
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
if not new_rows:
return
self._rowbuffer = collections.deque(new_rows)
if self._growth_factor and size < self._max_row_buffer:
self._bufsize = min(
self._max_row_buffer, size * self._growth_factor
)
def yield_per(self, result, dbapi_cursor, num):
self._growth_factor = 0
self._max_row_buffer = self._bufsize = num
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super(BufferedRowCursorFetchStrategy, self).soft_close(
result, dbapi_cursor
)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super(BufferedRowCursorFetchStrategy, self).hard_close(
result, dbapi_cursor
)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if not self._rowbuffer:
self._buffer_rows(result, dbapi_cursor)
if not self._rowbuffer:
try:
result._soft_close(hard=hard_close)
except BaseException as e:
self.handle_exception(result, e)
return None
return self._rowbuffer.popleft()
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
lb = len(buf)
if size > lb:
try:
buf.extend(dbapi_cursor.fetchmany(size - lb))
except BaseException as e:
self.handle_exception(result, e)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
def fetchall(self, result, dbapi_cursor):
try:
ret = list(self._rowbuffer) + list(dbapi_cursor.fetchall())
self._rowbuffer.clear()
result._soft_close()
return ret
except BaseException as e:
self.handle_exception(result, dbapi_cursor, e)
class FullyBufferedCursorFetchStrategy(CursorFetchStrategy):
"""A cursor strategy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
__slots__ = ("_rowbuffer", "alternate_cursor_description")
def __init__(
self, dbapi_cursor, alternate_description=None, initial_buffer=None
):
self.alternate_cursor_description = alternate_description
if initial_buffer is not None:
self._rowbuffer = collections.deque(initial_buffer)
else:
self._rowbuffer = collections.deque(dbapi_cursor.fetchall())
def yield_per(self, result, dbapi_cursor, num):
pass
def soft_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super(FullyBufferedCursorFetchStrategy, self).soft_close(
result, dbapi_cursor
)
def hard_close(self, result, dbapi_cursor):
self._rowbuffer.clear()
super(FullyBufferedCursorFetchStrategy, self).hard_close(
result, dbapi_cursor
)
def fetchone(self, result, dbapi_cursor, hard_close=False):
if self._rowbuffer:
return self._rowbuffer.popleft()
else:
result._soft_close(hard=hard_close)
return None
def fetchmany(self, result, dbapi_cursor, size=None):
if size is None:
return self.fetchall(result, dbapi_cursor)
buf = list(self._rowbuffer)
rows = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
if not rows:
result._soft_close()
return rows
def fetchall(self, result, dbapi_cursor):
ret = self._rowbuffer
self._rowbuffer = collections.deque()
result._soft_close()
return ret
class _NoResultMetaData(ResultMetaData):
__slots__ = ()
returns_rows = False
def _we_dont_return_rows(self, err=None):
util.raise_(
exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically."
),
replace_context=err,
)
def _index_for_key(self, keys, raiseerr):
self._we_dont_return_rows()
def _metadata_for_keys(self, key):
self._we_dont_return_rows()
def _reduce(self, keys):
self._we_dont_return_rows()
@property
def _keymap(self):
self._we_dont_return_rows()
@property
def keys(self):
self._we_dont_return_rows()
_NO_RESULT_METADATA = _NoResultMetaData()
class BaseCursorResult(object):
"""Base class for database result objects.
"""
out_parameters = None
_metadata = None
_soft_closed = False
closed = False
def __init__(self, context, cursor_strategy, cursor_description):
self.context = context
self.dialect = context.dialect
self.cursor = context.cursor
self.cursor_strategy = cursor_strategy
self.connection = context.root_connection
self._echo = echo = (
self.connection._echo and context.engine._should_log_debug()
)
if cursor_description is not None:
# inline of Result._row_getter(), set up an initial row
# getter assuming no transformations will be called as this
# is the most common case
if echo:
log = self.context.engine.logger.debug
def log_row(row):
log("Row %r", sql_util._repr_row(row))
return row
self._row_logging_fn = log_row
else:
log_row = None
metadata = self._init_metadata(context, cursor_description)
keymap = metadata._keymap
processors = metadata._processors
process_row = self._process_row
key_style = process_row._default_key_style
_make_row = functools.partial(
process_row, metadata, processors, keymap, key_style
)
if log_row:
def make_row(row):
made_row = _make_row(row)
log_row(made_row)
return made_row
self._row_getter = make_row
else:
make_row = _make_row
self._set_memoized_attribute("_row_getter", make_row)
else:
self._metadata = _NO_RESULT_METADATA
def _init_metadata(self, context, cursor_description):
if context.compiled:
if context.compiled._cached_metadata:
metadata = self.context.compiled._cached_metadata
else:
metadata = self._cursor_metadata(self, cursor_description)
if metadata._safe_for_cache:
context.compiled._cached_metadata = metadata
# result rewrite/ adapt step. this is to suit the case
# when we are invoked against a cached Compiled object, we want
# to rewrite the ResultMetaData to reflect the Column objects
# that are in our current SQL statement object, not the one
# that is associated with the cached Compiled object.
# the Compiled object may also tell us to not
# actually do this step; this is to support the ORM where
# it is to produce a new Result object in any case, and will
# be using the cached Column objects against this database result
# so we don't want to rewrite them.
#
# Basically this step suits the use case where the end user
# is using Core SQL expressions and is accessing columns in the
# result row using row._mapping[table.c.column].
compiled = context.compiled
if (
compiled
and compiled._result_columns
and context.cache_hit is context.dialect.CACHE_HIT
and not compiled._rewrites_selected_columns
and compiled.statement is not context.invoked_statement
):
metadata = metadata._adapt_to_context(context)
self._metadata = metadata
else:
self._metadata = metadata = self._cursor_metadata(
self, cursor_description
)
if self._echo:
context.engine.logger.debug(
"Col %r", tuple(x[0] for x in cursor_description)
)
return metadata
def _soft_close(self, hard=False):
"""Soft close this :class:`_engine.CursorResult`.
This releases all DBAPI cursor resources, but leaves the
CursorResult "open" from a semantic perspective, meaning the
fetchXXX() methods will continue to return empty results.
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
This method is **not public**, but is documented in order to clarify
the "autoclose" process used.
.. versionadded:: 1.0.0
.. seealso::
:meth:`_engine.CursorResult.close`
"""
if (not hard and self._soft_closed) or (hard and self.closed):
return
if hard:
self.closed = True
self.cursor_strategy.hard_close(self, self.cursor)
else:
self.cursor_strategy.soft_close(self, self.cursor)
if not self._soft_closed:
cursor = self.cursor
self.cursor = None
self.connection._safe_close_cursor(cursor)
self._soft_closed = True
@property
def inserted_primary_key_rows(self):
"""Return a list of tuples, each containing the primary key for each row
just inserted.
Usually, this method will return at most a list with a single
entry which is the same row one would get back from
:attr:`_engine.CursorResult.inserted_primary_key`. To support
"executemany with INSERT" mode, multiple rows can be part of the
list returned.
.. versionadded:: 1.4
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used."
)
return self.context.inserted_primary_key_rows
@property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`_expression.insert`
constructs which did not explicitly specify
:meth:`_expression.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`_schema.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if self.context.executemany:
raise exc.InvalidRequestError(
"This statement was an executemany call; if primary key "
"returning is supported, please "
"use .inserted_primary_key_rows."
)
ikp = self.inserted_primary_key_rows
if ikp:
return ikp[0]
else:
return None
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults_rows(self):
"""Return a list of rows each containing the values of default
columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The return value is a list of :class:`.Row` objects.
.. versionadded:: 1.4
"""
return self.context.returned_default_rows
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.Row`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
if self.context.executemany:
raise exc.InvalidRequestError(
"This statement was an executemany call; if return defaults "
"is supported, please use .returned_defaults_rows."
)
rows = self.context.returned_default_rows
if rows:
return rows[0]
else:
return None
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`_engine.CursorResult.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`_engine.CursorResult.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`_engine.CursorResult.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`_engine.CursorResult.rowcount`
is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`_engine.CursorResult.rowcount`
may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`_engine.CursorResult.supports_sane_rowcount` and
:meth:`_engine.CursorResult.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except BaseException as e:
self.cursor_strategy.handle_exception(self, e)
@property
def lastrowid(self):
"""Return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~CursorResult.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self.context.get_lastrowid()
except BaseException as e:
self.cursor_strategy.handle_exception(self, e)
@property
def returns_rows(self):
"""True if this :class:`_engine.CursorResult` returns zero or more rows.
I.e. if it is legal to call the methods
:meth:`_engine.CursorResult.fetchone`,
:meth:`_engine.CursorResult.fetchmany`
:meth:`_engine.CursorResult.fetchall`.
Overall, the value of :attr:`_engine.CursorResult.returns_rows` should
always be synonymous with whether or not the DBAPI cursor had a
``.description`` attribute, indicating the presence of result columns,
noting that a cursor that returns zero rows still has a
``.description`` if a row-returning statement was emitted.
This attribute should be True for all results that are against
SELECT statements, as well as for DML statements INSERT/UPDATE/DELETE
that use RETURNING. For INSERT/UPDATE/DELETE statements that were
not using RETURNING, the value will usually be False, however
there are some dialect-specific exceptions to this, such as when
using the MSSQL / pyodbc dialect a SELECT is emitted inline in
order to retrieve an inserted primary key value.
"""
return self._metadata.returns_rows
@property
def is_insert(self):
"""True if this :class:`_engine.CursorResult` is the result
of a executing an expression language compiled
:func:`_expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
class CursorResult(BaseCursorResult, Result):
"""A Result that is representing state from a DBAPI cursor.
.. versionchanged:: 1.4 The :class:`.CursorResult` and
:class:`.LegacyCursorResult`
classes replace the previous :class:`.ResultProxy` interface.
These classes are based on the :class:`.Result` calling API
which provides an updated usage model and calling facade for
SQLAlchemy Core and SQLAlchemy ORM.
Returns database rows via the :class:`.Row` class, which provides
additional API features and behaviors on top of the raw data returned by
the DBAPI. Through the use of filters such as the :meth:`.Result.scalars`
method, other kinds of objects may also be returned.
Within the scope of the 1.x series of SQLAlchemy, Core SQL results in
version 1.4 return an instance of :class:`._engine.LegacyCursorResult`
which takes the place of the ``CursorResult`` class used for the 1.3 series
and previously. This object returns rows as :class:`.LegacyRow` objects,
which maintains Python mapping (i.e. dictionary) like behaviors upon the
object itself. Going forward, the :attr:`.Row._mapping` attribute should
be used for dictionary behaviors.
.. seealso::
:ref:`coretutorial_selecting` - introductory material for accessing
:class:`_engine.CursorResult` and :class:`.Row` objects.
"""
_cursor_metadata = CursorResultMetaData
_cursor_strategy_cls = CursorFetchStrategy
def _fetchiter_impl(self):
fetchone = self.cursor_strategy.fetchone
while True:
row = fetchone(self, self.cursor)
if row is None:
break
yield row
def _fetchone_impl(self, hard_close=False):
return self.cursor_strategy.fetchone(self, self.cursor, hard_close)
def _fetchall_impl(self):
return self.cursor_strategy.fetchall(self, self.cursor)
def _fetchmany_impl(self, size=None):
return self.cursor_strategy.fetchmany(self, self.cursor, size)
def _raw_row_iterator(self):
return self._fetchiter_impl()
def merge(self, *others):
merged_result = super(CursorResult, self).merge(*others)
setup_rowcounts = not self._metadata.returns_rows
if setup_rowcounts:
merged_result.rowcount = sum(
result.rowcount for result in (self,) + others
)
return merged_result
def close(self):
"""Close this :class:`_engine.CursorResult`.
This closes out the underlying DBAPI cursor corresponding to the
statement execution, if one is still present. Note that the DBAPI
cursor is automatically released when the :class:`_engine.CursorResult`
exhausts all available rows. :meth:`_engine.CursorResult.close` is
generally an optional method except in the case when discarding a
:class:`_engine.CursorResult` that still has additional rows pending
for fetch.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. seealso::
:ref:`connections_toplevel`
"""
self._soft_close(hard=True)
@_generative
def yield_per(self, num):
self._yield_per = num
self.cursor_strategy.yield_per(self, self.cursor, num)
class LegacyCursorResult(CursorResult):
"""Legacy version of :class:`.CursorResult`.
This class includes connection "connection autoclose" behavior for use with
"connectionless" execution, as well as delivers rows using the
:class:`.LegacyRow` row implementation.
.. versionadded:: 1.4
"""
_autoclose_connection = False
_process_row = LegacyRow
_cursor_metadata = LegacyCursorResultMetaData
_cursor_strategy_cls = CursorFetchStrategy
def close(self):
"""Close this :class:`_engine.LegacyCursorResult`.
This method has the same behavior as that of
:meth:`._engine.CursorResult`, but it also may close
the underlying :class:`.Connection` for the case of "connectionless"
execution.
.. deprecated:: 2.0 "connectionless" execution is deprecated and will
be removed in version 2.0. Version 2.0 will feature the
:class:`_future.Result`
object that will no longer affect the status
of the originating connection in any case.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. seealso::
:ref:`connections_toplevel`
:ref:`dbengine_implicit`
"""
self._soft_close(hard=True)
def _soft_close(self, hard=False):
soft_closed = self._soft_closed
super(LegacyCursorResult, self)._soft_close(hard=hard)
if (
not soft_closed
and self._soft_closed
and self._autoclose_connection
):
self.connection.close()
ResultProxy = LegacyCursorResult
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
.. deprecated:: 1.4 this class is now supplied using a strategy object.
See :class:`.BufferedRowCursorFetchStrategy`.
"""
_cursor_strategy_cls = BufferedRowCursorFetchStrategy
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
.. deprecated:: 1.4 this class is now supplied using a strategy object.
See :class:`.FullyBufferedCursorFetchStrategy`.
"""
_cursor_strategy_cls = FullyBufferedCursorFetchStrategy
class BufferedColumnRow(LegacyRow):
"""Row is now BufferedColumn in all cases"""
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
.. versionchanged:: 1.4 This is now the default behavior of the Row
and this class does not change behavior in any way.
"""
_process_row = BufferedColumnRow
|
[
"functools.partial",
"collections.deque"
] |
[((35366, 35393), 'collections.deque', 'collections.deque', (['new_rows'], {}), '(new_rows)\n', (35383, 35393), False, 'import collections\n'), ((36952, 36981), 'collections.deque', 'collections.deque', (['buf[size:]'], {}), '(buf[size:])\n', (36969, 36981), False, 'import collections\n'), ((38951, 38980), 'collections.deque', 'collections.deque', (['buf[size:]'], {}), '(buf[size:])\n', (38968, 38980), False, 'import collections\n'), ((39158, 39177), 'collections.deque', 'collections.deque', ([], {}), '()\n', (39175, 39177), False, 'import collections\n'), ((37931, 37964), 'collections.deque', 'collections.deque', (['initial_buffer'], {}), '(initial_buffer)\n', (37948, 37964), False, 'import collections\n'), ((41375, 41446), 'functools.partial', 'functools.partial', (['process_row', 'metadata', 'processors', 'keymap', 'key_style'], {}), '(process_row, metadata, processors, keymap, key_style)\n', (41392, 41446), False, 'import functools\n'), ((31993, 32012), 'collections.deque', 'collections.deque', ([], {}), '()\n', (32010, 32012), False, 'import collections\n')]
|
from __future__ import absolute_import
# usage example:
#
# ARVADOS_API_TOKEN=abc ARVADOS_API_HOST=arvados.local python -m unittest discover
import unittest
import arvados
import apiclient
from . import run_test_server
class PipelineTemplateTest(run_test_server.TestCaseWithServers):
MAIN_SERVER = {}
KEEP_SERVER = {}
def runTest(self):
run_test_server.authorize_with("admin")
pt_uuid = arvados.api('v1').pipeline_templates().create(
body={'name':__file__}
).execute()['uuid']
self.assertEqual(len(pt_uuid), 27,
'Unexpected format of pipeline template UUID ("%s")'
% pt_uuid)
components = {
'x': 'x',
'-x-': [1,2,{'foo':'bar'}],
'Boggis': {'Bunce': '[\'Bean\']'},
'SpassBox': True,
'spass_box': False,
'spass-box': [True, 'Maybe', False]
}
update_response = arvados.api('v1').pipeline_templates().update(
uuid=pt_uuid,
body={'components':components}
).execute()
self.assertEqual('uuid' in update_response, True,
'update() response did not include a uuid')
self.assertEqual(update_response['uuid'], pt_uuid,
'update() response has a different uuid (%s, not %s)'
% (update_response['uuid'], pt_uuid))
self.assertEqual(update_response['name'], __file__,
'update() response has a different name (%s, not %s)'
% (update_response['name'], __file__))
get_response = arvados.api('v1').pipeline_templates().get(
uuid=pt_uuid
).execute()
self.assertEqual(get_response['components'], components,
'components got munged by server (%s -> %s)'
% (components, update_response['components']))
delete_response = arvados.api('v1').pipeline_templates().delete(
uuid=pt_uuid
).execute()
self.assertEqual(delete_response['uuid'], pt_uuid,
'delete() response has wrong uuid (%s, not %s)'
% (delete_response['uuid'], pt_uuid))
with self.assertRaises(apiclient.errors.HttpError):
geterror_response = arvados.api('v1').pipeline_templates().get(
uuid=pt_uuid
).execute()
|
[
"arvados.api"
] |
[((971, 988), 'arvados.api', 'arvados.api', (['"""v1"""'], {}), "('v1')\n", (982, 988), False, 'import arvados\n'), ((1665, 1682), 'arvados.api', 'arvados.api', (['"""v1"""'], {}), "('v1')\n", (1676, 1682), False, 'import arvados\n'), ((1991, 2008), 'arvados.api', 'arvados.api', (['"""v1"""'], {}), "('v1')\n", (2002, 2008), False, 'import arvados\n'), ((418, 435), 'arvados.api', 'arvados.api', (['"""v1"""'], {}), "('v1')\n", (429, 435), False, 'import arvados\n'), ((2374, 2391), 'arvados.api', 'arvados.api', (['"""v1"""'], {}), "('v1')\n", (2385, 2391), False, 'import arvados\n')]
|
#
# File: vrfcode.py
# Copyright: Grimm Project, Ren Pin NGO, all rights reserved.
# License: MIT
# -------------------------------------------------------------------------
# Authors: <NAME>(<EMAIL>)
#
# Description: generate transaction ID and verification code,
# keeping them unique.
#
# To-Dos:
# 1. make other supplements if needed.
#
# Issues:
# No issue so far.
#
# Revision History (Date, Editor, Description):
# 1. 2019/09/19, Ming, create first revision.
#
import re
import uuid
import time
import random
import string
from flask import url_for
from itsdangerous import URLSafeTimedSerializer
from server import sys_logger
from server.core import grimm
from server.utils.misctools import get_host_ip, is_ipv4_addr
from server.core.const import HOST, PORT
from server.core.const import DEFAULT_SERIAL_NO_BYTES, DEFAULT_VRFCODE_BYTES, DEFAULT_PROTOCOL
VRFCODE_POOL = {}
def new_serial_number(_bytes=DEFAULT_SERIAL_NO_BYTES):
'''generate new serial number'''
return uuid.uuid1().hex[0:_bytes]
def new_vrfcode(_bytes=DEFAULT_VRFCODE_BYTES):
'''generate new verification code'''
global VRFCODE_POOL
code = ''.join(random.choices(string.digits, k=_bytes))
if code in VRFCODE_POOL:
return new_vrfcode()
else:
start = int(time.time())
VRFCODE_POOL[code] = start
return code
def check_vrfcode_expiry(code, limit=600):
'''check code expiry, return True if not expired, otherwise False'''
global VRFCODE_POOL
if isinstance(code, bytes):
code = code.decode('utf8')
if isinstance(code, int):
code = '%06d' % (code)
if isinstance(code, str):
if code in VRFCODE_POOL:
start = VRFCODE_POOL[code]
else:
sys_logger.error('invalid verification code: %s', code)
return False
else:
err = TypeError('invalid type for parameter: code')
sys_logger.error(err.message)
raise err
duration = time.time() - start
if duration > limit:
del VRFCODE_POOL[code]
return False
return True
def new_vrfurl(email):
'''generate new confirm verification email url'''
serializer = URLSafeTimedSerializer(grimm.config['SECRET_KEY'])
token = serializer.dumps(email, salt=grimm.config['SECURITY_PASSWORD_SALT'])
server_port = PORT
server_host = get_host_ip() if is_ipv4_addr(HOST) or HOST == 'localhost' else HOST
vrfurl = DEFAULT_PROTOCOL + '://' + '172.16.58.3' + ':' + str(server_port) + '/email?token=' + token
return vrfurl
def parse_vrftoken(token):
'''confirm email token with certain expiration time'''
serializer = URLSafeTimedSerializer(grimm.config['SECRET_KEY'])
try:
addr = serializer.loads(
token,
salt=grimm.config['SECURITY_PASSWORD_SALT'])
except:
return None
return addr
|
[
"itsdangerous.URLSafeTimedSerializer",
"random.choices",
"server.sys_logger.error",
"time.time",
"uuid.uuid1",
"server.utils.misctools.is_ipv4_addr",
"server.utils.misctools.get_host_ip"
] |
[((2180, 2230), 'itsdangerous.URLSafeTimedSerializer', 'URLSafeTimedSerializer', (["grimm.config['SECRET_KEY']"], {}), "(grimm.config['SECRET_KEY'])\n", (2202, 2230), False, 'from itsdangerous import URLSafeTimedSerializer\n'), ((2651, 2701), 'itsdangerous.URLSafeTimedSerializer', 'URLSafeTimedSerializer', (["grimm.config['SECRET_KEY']"], {}), "(grimm.config['SECRET_KEY'])\n", (2673, 2701), False, 'from itsdangerous import URLSafeTimedSerializer\n'), ((1156, 1195), 'random.choices', 'random.choices', (['string.digits'], {'k': '_bytes'}), '(string.digits, k=_bytes)\n', (1170, 1195), False, 'import random\n'), ((1906, 1935), 'server.sys_logger.error', 'sys_logger.error', (['err.message'], {}), '(err.message)\n', (1922, 1935), False, 'from server import sys_logger\n'), ((1970, 1981), 'time.time', 'time.time', ([], {}), '()\n', (1979, 1981), False, 'import time\n'), ((2353, 2366), 'server.utils.misctools.get_host_ip', 'get_host_ip', ([], {}), '()\n', (2364, 2366), False, 'from server.utils.misctools import get_host_ip, is_ipv4_addr\n'), ((996, 1008), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1006, 1008), False, 'import uuid\n'), ((1285, 1296), 'time.time', 'time.time', ([], {}), '()\n', (1294, 1296), False, 'import time\n'), ((1747, 1802), 'server.sys_logger.error', 'sys_logger.error', (['"""invalid verification code: %s"""', 'code'], {}), "('invalid verification code: %s', code)\n", (1763, 1802), False, 'from server import sys_logger\n'), ((2370, 2388), 'server.utils.misctools.is_ipv4_addr', 'is_ipv4_addr', (['HOST'], {}), '(HOST)\n', (2382, 2388), False, 'from server.utils.misctools import get_host_ip, is_ipv4_addr\n')]
|
import os
import pyomo
import pyomo.environ as pe
from pyomo.opt import SolverStatus, TerminationCondition
from blocks.economic_dispatch import EconomicDispatch
from manage_data.import_data import import_data
def run_market_clearing(
edp: EconomicDispatch
):
################################################################################
# Initiate pyomo model
################################################################################
m = pe.ConcreteModel()
# Make duals available
m.dual = pe.Suffix(direction=pe.Suffix.IMPORT)
# Set of all generators
m.generators = pe.Set(initialize=[g for g in edp.generators.values()], dimen=1)
################################################################################
# Variables
################################################################################
# Production of each generator (>= 0)
m.production = pe.Var(m.generators, domain=pe.NonNegativeReals)
################################################################################
# Objective
# Total production cost
################################################################################
def Total_Social_Cost(m):
return (
+ sum(g.c2 * m.production[g] * m.production[g]
+ g.c1 * m.production[g] for g in m.generators)
)
m.objective = pe.Objective(rule=Total_Social_Cost, sense=pe.minimize)
################################################################################
# Constraints
################################################################################
# Production capacity
def Production_Upper_Bound_Rule(m, g):
return m.production[g] <= g.capacity
m.Production_Upper_Bound = pe.Constraint(m.generators, rule=Production_Upper_Bound_Rule)
# Market balance
def Market_Balance_Rule(m):
return sum(m.production[g] for g in m.generators) == edp.load
m.Market_Balance = pe.Constraint(rule=Market_Balance_Rule)
################################################################################
# solve the model
################################################################################
solver = pyomo.opt.SolverFactory('gams')
result = solver.solve(m, tee=False, keepfiles=False) # tee: show solver info or not
if result.solver.termination_condition == TerminationCondition.optimal or \
result.solver.termination_condition == TerminationCondition.locallyOptimal:
# m.display() # display all the results
print("-" * 50)
print(" " * 10 + "Generator| Production |")
print("-" * 50)
for g in m.generators:
g.production = round(m.production[g].value, ndigits=6)
print(f"{str(g):15s} | {g.production:3.4f}|")
market_clearing_price = round(m.dual[m.Market_Balance], ndigits=6)
print("-" * 50)
print(f"The market clearing price is {market_clearing_price}.")
print("Model has been solved.")
else:
print("Something went wrong.")
if __name__ == "__main__":
data_dir = os.path.join(os.path.dirname(__file__), "data")
file_name = "test.xlsx"
file_path = os.path.join(data_dir, file_name)
edp_test = import_data(file_path)
run_market_clearing(edp_test)
|
[
"os.path.join",
"pyomo.environ.Suffix",
"pyomo.environ.Constraint",
"pyomo.environ.Var",
"os.path.dirname",
"pyomo.environ.Objective",
"pyomo.environ.ConcreteModel",
"pyomo.opt.SolverFactory",
"manage_data.import_data.import_data"
] |
[((492, 510), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (508, 510), True, 'import pyomo.environ as pe\n'), ((553, 590), 'pyomo.environ.Suffix', 'pe.Suffix', ([], {'direction': 'pe.Suffix.IMPORT'}), '(direction=pe.Suffix.IMPORT)\n', (562, 590), True, 'import pyomo.environ as pe\n'), ((959, 1007), 'pyomo.environ.Var', 'pe.Var', (['m.generators'], {'domain': 'pe.NonNegativeReals'}), '(m.generators, domain=pe.NonNegativeReals)\n', (965, 1007), True, 'import pyomo.environ as pe\n'), ((1436, 1491), 'pyomo.environ.Objective', 'pe.Objective', ([], {'rule': 'Total_Social_Cost', 'sense': 'pe.minimize'}), '(rule=Total_Social_Cost, sense=pe.minimize)\n', (1448, 1491), True, 'import pyomo.environ as pe\n'), ((1836, 1897), 'pyomo.environ.Constraint', 'pe.Constraint', (['m.generators'], {'rule': 'Production_Upper_Bound_Rule'}), '(m.generators, rule=Production_Upper_Bound_Rule)\n', (1849, 1897), True, 'import pyomo.environ as pe\n'), ((2052, 2091), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'rule': 'Market_Balance_Rule'}), '(rule=Market_Balance_Rule)\n', (2065, 2091), True, 'import pyomo.environ as pe\n'), ((2303, 2334), 'pyomo.opt.SolverFactory', 'pyomo.opt.SolverFactory', (['"""gams"""'], {}), "('gams')\n", (2326, 2334), False, 'import pyomo\n'), ((3325, 3358), 'os.path.join', 'os.path.join', (['data_dir', 'file_name'], {}), '(data_dir, file_name)\n', (3337, 3358), False, 'import os\n'), ((3375, 3397), 'manage_data.import_data.import_data', 'import_data', (['file_path'], {}), '(file_path)\n', (3386, 3397), False, 'from manage_data.import_data import import_data\n'), ((3244, 3269), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3259, 3269), False, 'import os\n')]
|
import math
import random
from os.path import basename
from urllib.request import urlopen
from urllib.parse import unquote
from gi.repository import Gtk, GObject, Pango
from pychess.compat import create_task
from pychess.Players.Human import Human
from pychess.Players.engineNest import discoverer
from pychess.System import uistuff, conf
from pychess.Utils.GameModel import GameModel
from pychess.Utils.IconLoader import load_icon, get_pixbuf
from pychess.Utils.TimeModel import TimeModel
from pychess.Utils.const import (
LOCAL,
ARTIFICIAL,
WHITE,
BLACK,
NORMALCHESS,
LECTURE,
LESSON,
PUZZLE,
ENDGAME,
)
from pychess.Variants import variants
from pychess.ic import ICLogon
from pychess.widgets import newGameDialog
from pychess.widgets.Background import giveBackground
from pychess.widgets.RecentChooser import recent_manager, recent_menu
from pychess.perspectives import perspective_manager
from pychess.perspectives.games import get_open_dialog
from pychess.perspectives.learn.LecturesPanel import LECTURES, start_lecture_from
from pychess.perspectives.learn.EndgamesPanel import ENDGAMES, start_endgame_from
from pychess.perspectives.learn.LessonsPanel import LESSONS, start_lesson_from
from pychess.perspectives.learn.PuzzlesPanel import PUZZLES, start_puzzle_from
class TaskerManager(Gtk.Table):
def __init__(self):
GObject.GObject.__init__(self)
self.border = 20
giveBackground(self)
self.connect("draw", self.expose)
# self.set_homogeneous(True)
def expose(self, widget, ctx):
cairo_win = widget.get_window().cairo_create()
for widget in self.widgets:
x_loc = widget.get_allocation().x
y_loc = widget.get_allocation().y
width = widget.get_allocation().width
height = widget.get_allocation().height
cairo_win.move_to(x_loc - self.border, y_loc)
cairo_win.curve_to(
x_loc - self.border,
y_loc - self.border / 2.0,
x_loc - self.border / 2.0,
y_loc - self.border,
x_loc,
y_loc - self.border,
)
cairo_win.line_to(x_loc + width, y_loc - self.border)
cairo_win.curve_to(
x_loc + width + self.border / 2.0,
y_loc - self.border,
x_loc + width + self.border,
y_loc - self.border / 2.0,
x_loc + width + self.border,
y_loc,
)
cairo_win.line_to(x_loc + width + self.border, y_loc + height)
cairo_win.curve_to(
x_loc + width + self.border,
y_loc + height + self.border / 2.0,
x_loc + width + self.border / 2.0,
y_loc + height + self.border,
x_loc + width,
y_loc + height + self.border,
)
cairo_win.line_to(x_loc, y_loc + height + self.border)
cairo_win.curve_to(
x_loc - self.border / 2.0,
y_loc + height + self.border,
x_loc - self.border,
y_loc + height + self.border / 2.0,
x_loc - self.border,
y_loc + height,
)
style_ctxt = self.get_style_context()
bgcolor = style_ctxt.lookup_color("p_bg_color")[1]
darkcolor = style_ctxt.lookup_color("p_dark_color")[1]
cairo_win.set_source_rgba(
bgcolor.red, bgcolor.green, bgcolor.blue, bgcolor.alpha
)
cairo_win.fill()
cairo_win.rectangle(
x_loc - self.border, y_loc + height - 30, width + self.border * 2, 30
)
cairo_win.set_source_rgba(
darkcolor.red, darkcolor.green, darkcolor.blue, darkcolor.alpha
)
cairo_win.fill()
def calcSpacings(self, n):
""" Will yield ranges like
((.50,.50),)
((.66,.33), (.33,.66))
((.75,.25), (.50,.50), (.25,.75))
((.80,.20), (.60,.40), (.40,.60), (.20,.80))
Used to create the centering in the table """
first = next = (n) / float(n + 1)
for i in range(n):
yield (next, 1 - next)
next = first - (1 - next)
def on_size_allocate(self, widget, allocation):
window = self.get_window()
if window is not None:
window.invalidate_rect(self.get_allocation(), False)
def packTaskers(self, *widgets):
self.widgets = widgets
for widget in widgets:
widget.connect("size-allocate", self.on_size_allocate)
root = math.sqrt(len(widgets))
# Calculate number of rows
rows = int(math.ceil(root))
# Calculate number of filled out rows
rrows = int(math.floor(root))
# Calculate number of cols in filled out rows
cols = int(math.ceil(len(widgets) / float(rows)))
# Calculate spacings
vspac = [s[0] for s in self.calcSpacings(rows)]
hspac = [s[0] for s in self.calcSpacings(cols)]
# Clear and set up new size
for child in self.get_children():
self.remove(child)
self.props.n_columns = cols
self.props.n_rows = rows
# Add filled out rows
for row in range(rows):
for col in range(cols):
widget = widgets[row * cols + col]
alignment = Gtk.Alignment.new(hspac[col], vspac[row], 0, 0)
alignment.add(widget)
self.attach(alignment, col, col + 1, row, row + 1)
return
# Add last row
if rows > rrows:
lastrow = Gtk.HBox()
# Calculate number of widgets in last row
numw = len(widgets) - cols * rrows
hspac = [s[0] for s in self.calcSpacings(numw)]
for col, widget in enumerate(widgets[-numw:]):
alignment = Gtk.Alignment.new(hspac[col], vspac[-1], 0, 0)
alignment.add(widget)
alignment.set_padding(
self.border, self.border, self.border, self.border
)
lastrow.pack_start(alignment, True, True, 0)
self.attach(lastrow, 0, cols, rrows, rrows + 1)
tasker = TaskerManager()
tasker_widgets = uistuff.GladeWidgets("taskers.glade")
class NewGameTasker(Gtk.Alignment):
def __init__(self):
GObject.GObject.__init__(self)
self.widgets = widgets = tasker_widgets
tasker = widgets["newGameTasker"]
tasker.unparent()
self.add(tasker)
startButton = self.widgets["startButton"]
startButton.set_name("startButton")
combo = Gtk.ComboBox()
uistuff.createCombo(
combo,
[
(get_pixbuf("glade/white.png"), _("White")),
(get_pixbuf("glade/black.png"), _("Black")),
(get_pixbuf("glade/random.png"), _("Random")),
],
)
widgets["colorDock"].add(combo)
if combo.get_active() < 0:
combo.set_active(0)
widgets["yourColorLabel"].set_mnemonic_widget(combo)
# We need to wait until after engines have been discovered, to init the
# playerCombos. We use connect_after to make sure, that newGameDialog
# has also had time to init the constants we share with them.
self.playerCombo = Gtk.ComboBox()
widgets["opponentDock"].add(self.playerCombo)
discoverer.connect_after(
"all_engines_discovered", self.__initPlayerCombo, widgets
)
widgets["opponentLabel"].set_mnemonic_widget(self.playerCombo)
def on_skill_changed(scale):
# Just to make sphinx happy...
try:
pix = newGameDialog.skillToIconLarge[int(scale.get_value())]
widgets["skillImage"].set_from_pixbuf(pix)
except TypeError:
pass
widgets["skillSlider"].connect("value-changed", on_skill_changed)
on_skill_changed(widgets["skillSlider"])
widgets["startButton"].connect("clicked", self.startClicked)
self.widgets["opendialog1"].connect("clicked", self.openDialogClicked)
def __initPlayerCombo(self, discoverer, widgets):
combo = self.playerCombo
uistuff.createCombo(combo, newGameDialog.playerItems[0])
if combo.get_active() < 0:
combo.set_active(1)
uistuff.keep(self.playerCombo, "newgametasker_playercombo")
def on_playerCombobox_changed(widget):
widgets["skillSlider"].props.visible = widget.get_active() > 0
combo.connect("changed", on_playerCombobox_changed)
uistuff.keep(widgets["skillSlider"], "taskerSkillSlider")
widgets["skillSlider"].set_no_show_all(True)
on_playerCombobox_changed(self.playerCombo)
def openDialogClicked(self, button):
newGameDialog.NewGameMode.run()
def startClicked(self, button):
color = self.widgets["colorDock"].get_child().get_active()
if color == 2:
color = random.choice([WHITE, BLACK])
opp = self.widgets["opponentDock"].get_child()
tree_iter = opp.get_active_iter()
if tree_iter is not None:
model = opp.get_model()
engine = model[tree_iter][1]
opponent = self.widgets["opponentDock"].get_child().get_active()
difficulty = int(self.widgets["skillSlider"].get_value())
gamemodel = GameModel(TimeModel(5 * 60, 0))
name = conf.get("firstName")
player0tup = (LOCAL, Human, (color, name), name)
if opponent == 0:
name = conf.get("secondName")
player1tup = (LOCAL, Human, (1 - color, name), name)
else:
engine = discoverer.getEngineByName(engine)
name = discoverer.getName(engine)
player1tup = (
ARTIFICIAL,
discoverer.initPlayerEngine,
(engine, 1 - color, difficulty, variants[NORMALCHESS], 5 * 60, 0),
name,
)
perspective = perspective_manager.get_perspective("games")
if color == WHITE:
create_task(perspective.generalStart(gamemodel, player0tup, player1tup))
else:
create_task(perspective.generalStart(gamemodel, player1tup, player0tup))
big_start = load_icon(48, "stock_init", "gnome-globe", "applications-internet")
class InternetGameTasker(Gtk.Alignment):
def __init__(self):
GObject.GObject.__init__(self)
self.widgets = tasker_widgets
tasker = self.widgets["internetGameTasker"]
tasker.unparent()
self.add(tasker)
if ICLogon.dialog is None:
ICLogon.dialog = ICLogon.ICLogon()
liststore = Gtk.ListStore(str)
liststore.append(["FICS"])
liststore.append(["ICC"])
self.ics_combo = self.widgets["ics_combo"]
self.ics_combo.set_model(liststore)
renderer_text = Gtk.CellRendererText()
self.ics_combo.pack_start(renderer_text, True)
self.ics_combo.add_attribute(renderer_text, "text", 0)
self.ics_combo.connect("changed", ICLogon.dialog.on_ics_combo_changed)
self.ics_combo.set_active(conf.get("ics_combo"))
self.widgets["connectButton"].connect("clicked", self.connectClicked)
self.widgets["opendialog2"].connect("clicked", self.openDialogClicked)
self.widgets["startIcon"].set_from_pixbuf(big_start)
uistuff.keep(self.widgets["ics_combo"], "ics_combo")
uistuff.keep(self.widgets["autoLogin"], "autoLogin")
def openDialogClicked(self, button):
ICLogon.run()
def connectClicked(self, button):
ICLogon.run()
if not ICLogon.dialog.connection:
ICLogon.dialog.widgets["connectButton"].clicked()
class LearnTasker(Gtk.Alignment):
def __init__(self):
GObject.GObject.__init__(self)
self.widgets = tasker_widgets
tasker = self.widgets["learnTasker"]
tasker.unparent()
self.add(tasker)
startButton = self.widgets["learnButton"]
startButton.set_name("learnButton")
categorystore = Gtk.ListStore(int, str)
learn_mapping = {
LECTURE: (_("Lectures"), LECTURES),
LESSON: (_("Lessons"), LESSONS),
PUZZLE: (_("Puzzles"), PUZZLES),
ENDGAME: (_("Endgames"), ENDGAMES),
}
for key, value in learn_mapping.items():
categorystore.append([key, value[0]])
self.category_combo = self.widgets["category_combo"]
self.category_combo.set_model(categorystore)
renderer = Gtk.CellRendererText()
self.category_combo.pack_start(renderer, True)
self.category_combo.add_attribute(renderer, "text", 1)
self.learnstore = Gtk.ListStore(str, str)
self.learn_combo = self.widgets["learn_combo"]
self.learn_combo.set_model(self.learnstore)
renderer_text = Gtk.CellRendererText()
renderer_text.set_property("width-chars", 30)
renderer_text.set_property("ellipsize", Pango.EllipsizeMode.END)
self.learn_combo.pack_start(renderer_text, True)
self.learn_combo.add_attribute(renderer_text, "text", 1)
self.learn_combo.set_active(0)
def on_category_changed(combo):
tree_iter = combo.get_active_iter()
if tree_iter is None:
return
else:
model = combo.get_model()
self.category = model[tree_iter][0]
self.learnstore.clear()
if self.category == LECTURE:
for file_name, title, author in LECTURES:
self.learnstore.append([file_name, title])
elif self.category == LESSON:
for file_name, title, author in LESSONS:
self.learnstore.append([file_name, title])
elif self.category == PUZZLE:
for file_name, title, author in PUZZLES:
self.learnstore.append([file_name, title])
elif self.category == ENDGAME:
for pieces, title in ENDGAMES:
self.learnstore.append([pieces, title])
learn = conf.get("learncombo%s" % self.category)
self.learn_combo.set_active(learn)
def on_learn_changed(combo):
tree_iter = combo.get_active_iter()
if tree_iter is None:
return
else:
model = combo.get_model()
newlearn = model.get_path(tree_iter)[0]
conf.set("learncombo%s" % self.category, newlearn)
self.learn_combo.connect("changed", on_learn_changed)
self.category_combo.connect("changed", on_category_changed)
self.category = conf.get("categorycombo")
self.category_combo.set_active(self.category)
uistuff.keep(self.widgets["category_combo"], "categorycombo")
self.widgets["opendialog4"].connect("clicked", self.openDialogClicked)
self.widgets["learnButton"].connect("clicked", self.learnClicked)
def openDialogClicked(self, button):
perspective = perspective_manager.get_perspective("learn")
perspective.activate()
def learnClicked(self, button):
perspective = perspective_manager.get_perspective("learn")
perspective.activate()
tree_iter = self.learn_combo.get_active_iter()
if tree_iter is None:
return
else:
model = self.learn_combo.get_model()
source = model[tree_iter][0]
if self.category == LECTURE:
start_lecture_from(source)
elif self.category == LESSON:
start_lesson_from(source)
elif self.category == PUZZLE:
start_puzzle_from(source)
elif self.category == ENDGAME:
start_endgame_from(source)
class DatabaseTasker(Gtk.Alignment):
def __init__(self):
GObject.GObject.__init__(self)
self.widgets = tasker_widgets
tasker = self.widgets["databaseTasker"]
tasker.unparent()
self.add(tasker)
startButton = self.widgets["openButton"]
startButton.set_name("openButton")
liststore = Gtk.ListStore(str, str)
self.recent_combo = self.widgets["recent_combo"]
self.recent_combo.set_model(liststore)
renderer_text = Gtk.CellRendererText()
renderer_text.set_property("width-chars", 30)
renderer_text.set_property("ellipsize", Pango.EllipsizeMode.END)
self.recent_combo.pack_start(renderer_text, True)
self.recent_combo.add_attribute(renderer_text, "text", 1)
self.on_recent_menu_changed(recent_manager, liststore)
recent_manager.connect("changed", self.on_recent_menu_changed, liststore)
self.widgets["opendialog3"].connect("clicked", self.openDialogClicked)
self.widgets["openButton"].connect("clicked", self.openClicked)
def on_recent_menu_changed(self, manager, liststore):
liststore.clear()
# Just to make sphinx happy...
try:
for uri in recent_menu.get_uris():
liststore.append((uri, basename(unquote(uri))))
except TypeError:
pass
self.recent_combo.set_active(0)
def openDialogClicked(self, button):
dialog = get_open_dialog()
response = dialog.run()
if response == Gtk.ResponseType.OK:
filenames = dialog.get_filenames()
else:
filenames = None
dialog.destroy()
if filenames is not None:
for filename in filenames:
if filename.lower().endswith(".fen"):
newGameDialog.loadFileAndRun(filename)
else:
perspective = perspective_manager.get_perspective("database")
perspective.open_chessfile(filename)
def openClicked(self, button):
if self.widgets["createNew"].get_active():
perspective = perspective_manager.get_perspective("database")
perspective.create_database()
else:
tree_iter = self.recent_combo.get_active_iter()
if tree_iter is None:
return
else:
model = self.recent_combo.get_model()
uri = model[tree_iter][0]
try:
urlopen(unquote(uri)).close()
perspective = perspective_manager.get_perspective("database")
perspective.open_chessfile(unquote(uri))
recent_manager.add_item(uri)
except (IOError, OSError):
# shomething wrong whit the uri
recent_manager.remove_item(uri)
new_game_tasker, internet_game_tasker, database_tasker, learn_tasker = (
NewGameTasker(),
InternetGameTasker(),
DatabaseTasker(),
LearnTasker(),
)
tasker.packTaskers(new_game_tasker, database_tasker, internet_game_tasker, learn_tasker)
|
[
"urllib.parse.unquote",
"pychess.perspectives.games.get_open_dialog",
"pychess.System.uistuff.createCombo",
"pychess.Players.engineNest.discoverer.connect_after",
"gi.repository.Gtk.ListStore",
"pychess.Utils.TimeModel.TimeModel",
"pychess.Players.engineNest.discoverer.getName",
"pychess.System.conf.get",
"gi.repository.Gtk.ComboBox",
"pychess.System.conf.set",
"gi.repository.Gtk.CellRendererText",
"gi.repository.Gtk.HBox",
"pychess.widgets.RecentChooser.recent_manager.remove_item",
"pychess.widgets.RecentChooser.recent_manager.connect",
"pychess.ic.ICLogon.run",
"pychess.Players.engineNest.discoverer.getEngineByName",
"pychess.widgets.newGameDialog.loadFileAndRun",
"pychess.ic.ICLogon.ICLogon",
"gi.repository.Gtk.Alignment.new",
"pychess.widgets.newGameDialog.NewGameMode.run",
"math.ceil",
"pychess.widgets.RecentChooser.recent_menu.get_uris",
"pychess.perspectives.learn.LessonsPanel.start_lesson_from",
"pychess.widgets.Background.giveBackground",
"pychess.System.uistuff.keep",
"gi.repository.GObject.GObject.__init__",
"pychess.widgets.RecentChooser.recent_manager.add_item",
"pychess.perspectives.learn.PuzzlesPanel.start_puzzle_from",
"math.floor",
"random.choice",
"pychess.perspectives.learn.EndgamesPanel.start_endgame_from",
"pychess.Utils.IconLoader.load_icon",
"pychess.System.uistuff.GladeWidgets",
"pychess.perspectives.learn.LecturesPanel.start_lecture_from",
"pychess.Utils.IconLoader.get_pixbuf",
"pychess.perspectives.perspective_manager.get_perspective"
] |
[((6396, 6433), 'pychess.System.uistuff.GladeWidgets', 'uistuff.GladeWidgets', (['"""taskers.glade"""'], {}), "('taskers.glade')\n", (6416, 6433), False, 'from pychess.System import uistuff, conf\n'), ((10499, 10566), 'pychess.Utils.IconLoader.load_icon', 'load_icon', (['(48)', '"""stock_init"""', '"""gnome-globe"""', '"""applications-internet"""'], {}), "(48, 'stock_init', 'gnome-globe', 'applications-internet')\n", (10508, 10566), False, 'from pychess.Utils.IconLoader import load_icon, get_pixbuf\n'), ((1375, 1405), 'gi.repository.GObject.GObject.__init__', 'GObject.GObject.__init__', (['self'], {}), '(self)\n', (1399, 1405), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((1439, 1459), 'pychess.widgets.Background.giveBackground', 'giveBackground', (['self'], {}), '(self)\n', (1453, 1459), False, 'from pychess.widgets.Background import giveBackground\n'), ((6504, 6534), 'gi.repository.GObject.GObject.__init__', 'GObject.GObject.__init__', (['self'], {}), '(self)\n', (6528, 6534), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((6787, 6801), 'gi.repository.Gtk.ComboBox', 'Gtk.ComboBox', ([], {}), '()\n', (6799, 6801), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((7498, 7512), 'gi.repository.Gtk.ComboBox', 'Gtk.ComboBox', ([], {}), '()\n', (7510, 7512), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((7575, 7662), 'pychess.Players.engineNest.discoverer.connect_after', 'discoverer.connect_after', (['"""all_engines_discovered"""', 'self.__initPlayerCombo', 'widgets'], {}), "('all_engines_discovered', self.__initPlayerCombo,\n widgets)\n", (7599, 7662), False, 'from pychess.Players.engineNest import discoverer\n'), ((8406, 8462), 'pychess.System.uistuff.createCombo', 'uistuff.createCombo', (['combo', 'newGameDialog.playerItems[0]'], {}), '(combo, newGameDialog.playerItems[0])\n', (8425, 8462), False, 'from pychess.System import uistuff, conf\n'), ((9032, 9063), 'pychess.widgets.newGameDialog.NewGameMode.run', 'newGameDialog.NewGameMode.run', ([], {}), '()\n', (9061, 9063), False, 'from pychess.widgets import newGameDialog\n'), ((9659, 9680), 'pychess.System.conf.get', 'conf.get', (['"""firstName"""'], {}), "('firstName')\n", (9667, 9680), False, 'from pychess.System import uistuff, conf\n'), ((10229, 10273), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""games"""'], {}), "('games')\n", (10264, 10273), False, 'from pychess.perspectives import perspective_manager\n'), ((10642, 10672), 'gi.repository.GObject.GObject.__init__', 'GObject.GObject.__init__', (['self'], {}), '(self)\n', (10666, 10672), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((10918, 10936), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['str'], {}), '(str)\n', (10931, 10936), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((11125, 11147), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (11145, 11147), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((11631, 11683), 'pychess.System.uistuff.keep', 'uistuff.keep', (["self.widgets['ics_combo']", '"""ics_combo"""'], {}), "(self.widgets['ics_combo'], 'ics_combo')\n", (11643, 11683), False, 'from pychess.System import uistuff, conf\n'), ((11692, 11744), 'pychess.System.uistuff.keep', 'uistuff.keep', (["self.widgets['autoLogin']", '"""autoLogin"""'], {}), "(self.widgets['autoLogin'], 'autoLogin')\n", (11704, 11744), False, 'from pychess.System import uistuff, conf\n'), ((11795, 11808), 'pychess.ic.ICLogon.run', 'ICLogon.run', ([], {}), '()\n', (11806, 11808), False, 'from pychess.ic import ICLogon\n'), ((11856, 11869), 'pychess.ic.ICLogon.run', 'ICLogon.run', ([], {}), '()\n', (11867, 11869), False, 'from pychess.ic import ICLogon\n'), ((12042, 12072), 'gi.repository.GObject.GObject.__init__', 'GObject.GObject.__init__', (['self'], {}), '(self)\n', (12066, 12072), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((12327, 12350), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['int', 'str'], {}), '(int, str)\n', (12340, 12350), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((12807, 12829), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (12827, 12829), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((12975, 12998), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['str', 'str'], {}), '(str, str)\n', (12988, 12998), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((13130, 13152), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (13150, 13152), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((15095, 15120), 'pychess.System.conf.get', 'conf.get', (['"""categorycombo"""'], {}), "('categorycombo')\n", (15103, 15120), False, 'from pychess.System import uistuff, conf\n'), ((15184, 15245), 'pychess.System.uistuff.keep', 'uistuff.keep', (["self.widgets['category_combo']", '"""categorycombo"""'], {}), "(self.widgets['category_combo'], 'categorycombo')\n", (15196, 15245), False, 'from pychess.System import uistuff, conf\n'), ((15464, 15508), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""learn"""'], {}), "('learn')\n", (15499, 15508), False, 'from pychess.perspectives import perspective_manager\n'), ((15599, 15643), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""learn"""'], {}), "('learn')\n", (15634, 15643), False, 'from pychess.perspectives import perspective_manager\n'), ((16262, 16292), 'gi.repository.GObject.GObject.__init__', 'GObject.GObject.__init__', (['self'], {}), '(self)\n', (16286, 16292), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((16544, 16567), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['str', 'str'], {}), '(str, str)\n', (16557, 16567), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((16697, 16719), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (16717, 16719), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((17043, 17116), 'pychess.widgets.RecentChooser.recent_manager.connect', 'recent_manager.connect', (['"""changed"""', 'self.on_recent_menu_changed', 'liststore'], {}), "('changed', self.on_recent_menu_changed, liststore)\n", (17065, 17116), False, 'from pychess.widgets.RecentChooser import recent_manager, recent_menu\n'), ((17659, 17676), 'pychess.perspectives.games.get_open_dialog', 'get_open_dialog', ([], {}), '()\n', (17674, 17676), False, 'from pychess.perspectives.games import get_open_dialog\n'), ((4802, 4817), 'math.ceil', 'math.ceil', (['root'], {}), '(root)\n', (4811, 4817), False, 'import math\n'), ((4885, 4901), 'math.floor', 'math.floor', (['root'], {}), '(root)\n', (4895, 4901), False, 'import math\n'), ((5758, 5768), 'gi.repository.Gtk.HBox', 'Gtk.HBox', ([], {}), '()\n', (5766, 5768), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((8542, 8601), 'pychess.System.uistuff.keep', 'uistuff.keep', (['self.playerCombo', '"""newgametasker_playercombo"""'], {}), "(self.playerCombo, 'newgametasker_playercombo')\n", (8554, 8601), False, 'from pychess.System import uistuff, conf\n'), ((8811, 8868), 'pychess.System.uistuff.keep', 'uistuff.keep', (["widgets['skillSlider']", '"""taskerSkillSlider"""'], {}), "(widgets['skillSlider'], 'taskerSkillSlider')\n", (8823, 8868), False, 'from pychess.System import uistuff, conf\n'), ((9211, 9240), 'random.choice', 'random.choice', (['[WHITE, BLACK]'], {}), '([WHITE, BLACK])\n', (9224, 9240), False, 'import random\n'), ((9621, 9641), 'pychess.Utils.TimeModel.TimeModel', 'TimeModel', (['(5 * 60)', '(0)'], {}), '(5 * 60, 0)\n', (9630, 9641), False, 'from pychess.Utils.TimeModel import TimeModel\n'), ((9783, 9805), 'pychess.System.conf.get', 'conf.get', (['"""secondName"""'], {}), "('secondName')\n", (9791, 9805), False, 'from pychess.System import uistuff, conf\n'), ((9906, 9940), 'pychess.Players.engineNest.discoverer.getEngineByName', 'discoverer.getEngineByName', (['engine'], {}), '(engine)\n', (9932, 9940), False, 'from pychess.Players.engineNest import discoverer\n'), ((9960, 9986), 'pychess.Players.engineNest.discoverer.getName', 'discoverer.getName', (['engine'], {}), '(engine)\n', (9978, 9986), False, 'from pychess.Players.engineNest import discoverer\n'), ((10879, 10896), 'pychess.ic.ICLogon.ICLogon', 'ICLogon.ICLogon', ([], {}), '()\n', (10894, 10896), False, 'from pychess.ic import ICLogon\n'), ((11379, 11400), 'pychess.System.conf.get', 'conf.get', (['"""ics_combo"""'], {}), "('ics_combo')\n", (11387, 11400), False, 'from pychess.System import uistuff, conf\n'), ((15934, 15960), 'pychess.perspectives.learn.LecturesPanel.start_lecture_from', 'start_lecture_from', (['source'], {}), '(source)\n', (15952, 15960), False, 'from pychess.perspectives.learn.LecturesPanel import LECTURES, start_lecture_from\n'), ((17429, 17451), 'pychess.widgets.RecentChooser.recent_menu.get_uris', 'recent_menu.get_uris', ([], {}), '()\n', (17449, 17451), False, 'from pychess.widgets.RecentChooser import recent_manager, recent_menu\n'), ((18331, 18378), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""database"""'], {}), "('database')\n", (18366, 18378), False, 'from pychess.perspectives import perspective_manager\n'), ((5518, 5565), 'gi.repository.Gtk.Alignment.new', 'Gtk.Alignment.new', (['hspac[col]', 'vspac[row]', '(0)', '(0)'], {}), '(hspac[col], vspac[row], 0, 0)\n', (5535, 5565), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((6017, 6063), 'gi.repository.Gtk.Alignment.new', 'Gtk.Alignment.new', (['hspac[col]', 'vspac[-1]', '(0)', '(0)'], {}), '(hspac[col], vspac[-1], 0, 0)\n', (6034, 6063), False, 'from gi.repository import Gtk, GObject, Pango\n'), ((14449, 14489), 'pychess.System.conf.get', 'conf.get', (["('learncombo%s' % self.category)"], {}), "('learncombo%s' % self.category)\n", (14457, 14489), False, 'from pychess.System import uistuff, conf\n'), ((16011, 16036), 'pychess.perspectives.learn.LessonsPanel.start_lesson_from', 'start_lesson_from', (['source'], {}), '(source)\n', (16028, 16036), False, 'from pychess.perspectives.learn.LessonsPanel import LESSONS, start_lesson_from\n'), ((18761, 18808), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""database"""'], {}), "('database')\n", (18796, 18808), False, 'from pychess.perspectives import perspective_manager\n'), ((18882, 18910), 'pychess.widgets.RecentChooser.recent_manager.add_item', 'recent_manager.add_item', (['uri'], {}), '(uri)\n', (18905, 18910), False, 'from pychess.widgets.RecentChooser import recent_manager, recent_menu\n'), ((6881, 6910), 'pychess.Utils.IconLoader.get_pixbuf', 'get_pixbuf', (['"""glade/white.png"""'], {}), "('glade/white.png')\n", (6891, 6910), False, 'from pychess.Utils.IconLoader import load_icon, get_pixbuf\n'), ((6942, 6971), 'pychess.Utils.IconLoader.get_pixbuf', 'get_pixbuf', (['"""glade/black.png"""'], {}), "('glade/black.png')\n", (6952, 6971), False, 'from pychess.Utils.IconLoader import load_icon, get_pixbuf\n'), ((7003, 7033), 'pychess.Utils.IconLoader.get_pixbuf', 'get_pixbuf', (['"""glade/random.png"""'], {}), "('glade/random.png')\n", (7013, 7033), False, 'from pychess.Utils.IconLoader import load_icon, get_pixbuf\n'), ((16087, 16112), 'pychess.perspectives.learn.PuzzlesPanel.start_puzzle_from', 'start_puzzle_from', (['source'], {}), '(source)\n', (16104, 16112), False, 'from pychess.perspectives.learn.PuzzlesPanel import PUZZLES, start_puzzle_from\n'), ((18018, 18056), 'pychess.widgets.newGameDialog.loadFileAndRun', 'newGameDialog.loadFileAndRun', (['filename'], {}), '(filename)\n', (18046, 18056), False, 'from pychess.widgets import newGameDialog\n'), ((18113, 18160), 'pychess.perspectives.perspective_manager.get_perspective', 'perspective_manager.get_perspective', (['"""database"""'], {}), "('database')\n", (18148, 18160), False, 'from pychess.perspectives import perspective_manager\n'), ((18852, 18864), 'urllib.parse.unquote', 'unquote', (['uri'], {}), '(uri)\n', (18859, 18864), False, 'from urllib.parse import unquote\n'), ((19014, 19045), 'pychess.widgets.RecentChooser.recent_manager.remove_item', 'recent_manager.remove_item', (['uri'], {}), '(uri)\n', (19040, 19045), False, 'from pychess.widgets.RecentChooser import recent_manager, recent_menu\n'), ((14880, 14930), 'pychess.System.conf.set', 'conf.set', (["('learncombo%s' % self.category)", 'newlearn'], {}), "('learncombo%s' % self.category, newlearn)\n", (14888, 14930), False, 'from pychess.System import uistuff, conf\n'), ((16164, 16190), 'pychess.perspectives.learn.EndgamesPanel.start_endgame_from', 'start_endgame_from', (['source'], {}), '(source)\n', (16182, 16190), False, 'from pychess.perspectives.learn.EndgamesPanel import ENDGAMES, start_endgame_from\n'), ((17501, 17513), 'urllib.parse.unquote', 'unquote', (['uri'], {}), '(uri)\n', (17508, 17513), False, 'from urllib.parse import unquote\n'), ((18709, 18721), 'urllib.parse.unquote', 'unquote', (['uri'], {}), '(uri)\n', (18716, 18721), False, 'from urllib.parse import unquote\n')]
|
# Unsupervised learning: Iris clustering¶
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_digits
digits = load_digits()
digits.data.shape
digits.data[:10, :10]
digits.images.shape
import matplotlib.pyplot as plt
fig, axes = plt.subplots(10, 10, figsize=(8, 8),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
for i, ax in enumerate(axes.flat):
ax.imshow(digits.images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(digits.target[i]),
transform=ax.transAxes, color='green')
X = digits.data
y = digits.target
# Unsupervised learning: Dimensionality reduction
# We see that the projected data is now two-dimensional. Let's plot this data to see if we can learn anything from its structure:
# manifold learning algorithm called Isomap
from sklearn.manifold import Isomap
iso = Isomap(n_components=2)
iso.fit(digits.data)
data_projected = iso.transform(digits.data)
data_projected.shape
data_projected[:10]
plt.scatter(data_projected[:, 0], data_projected[:, 1], c=digits.target,
edgecolor='none', alpha=0.5,
cmap=plt.cm.get_cmap('plasma', 10))
plt.colorbar(label='digit label', ticks=range(10))
plt.clim(-0.5, 9.5);
plt.show()
# Classification on digits¶
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, random_state=0)
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
model.fit(Xtrain, ytrain)
y_model = model.predict(Xtest)
from sklearn.metrics import accuracy_score
accuracy_score(ytest, y_model)
# single accuracy number does not give where things go wrong so use confusion matrix
# confusion matrix
from sklearn.metrics import confusion_matrix
mat = confusion_matrix(ytest, y_model)
mat
sns.heatmap(mat, square=True, annot=True, cbar=False)
plt.xlabel('predicted value')
plt.ylabel('true value');
# This shows us where the mis-labeled points tend to be: for example, a large number of twos here are mis-classified as either ones or eights. Another way to gain intuition into the characteristics of the model is to plot the inputs again, with their predicted labels. We'll use green for correct labels, and red for incorrect labels:
fig, axes = plt.subplots(10, 10, figsize=(8, 8),
subplot_kw={'xticks':[], 'yticks':[]},
gridspec_kw=dict(hspace=0.1, wspace=0.1))
Xtest.shape
test_images = Xtest.reshape(-1, 8, 8)
test_images.shape
for i, ax in enumerate(axes.flat):
ax.imshow(test_images[i], cmap='binary', interpolation='nearest')
ax.text(0.05, 0.05, str(y_model[i]),
transform=ax.transAxes,
color='green' if (ytest[i] == y_model[i]) else 'red')
plt.show()
|
[
"sklearn.datasets.load_digits",
"sklearn.naive_bayes.GaussianNB",
"matplotlib.pyplot.show",
"seaborn.heatmap",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.ylabel",
"sklearn.manifold.Isomap",
"matplotlib.pyplot.clim",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.cm.get_cmap"
] |
[((168, 181), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (179, 181), False, 'from sklearn.datasets import load_digits\n'), ((967, 989), 'sklearn.manifold.Isomap', 'Isomap', ([], {'n_components': '(2)'}), '(n_components=2)\n', (973, 989), False, 'from sklearn.manifold import Isomap\n'), ((1313, 1332), 'matplotlib.pyplot.clim', 'plt.clim', (['(-0.5)', '(9.5)'], {}), '(-0.5, 9.5)\n', (1321, 1332), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1342, 1344), True, 'import matplotlib.pyplot as plt\n'), ((1407, 1445), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'random_state': '(0)'}), '(X, y, random_state=0)\n', (1423, 1445), False, 'from sklearn.model_selection import train_test_split\n'), ((1498, 1510), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (1508, 1510), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((1612, 1642), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['ytest', 'y_model'], {}), '(ytest, y_model)\n', (1626, 1642), False, 'from sklearn.metrics import accuracy_score\n'), ((1800, 1832), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['ytest', 'y_model'], {}), '(ytest, y_model)\n', (1816, 1832), False, 'from sklearn.metrics import confusion_matrix\n'), ((1838, 1891), 'seaborn.heatmap', 'sns.heatmap', (['mat'], {'square': '(True)', 'annot': '(True)', 'cbar': '(False)'}), '(mat, square=True, annot=True, cbar=False)\n', (1849, 1891), True, 'import seaborn as sns\n'), ((1892, 1921), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""predicted value"""'], {}), "('predicted value')\n", (1902, 1921), True, 'import matplotlib.pyplot as plt\n'), ((1922, 1946), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""true value"""'], {}), "('true value')\n", (1932, 1946), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2791, 2793), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1260), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['"""plasma"""', '(10)'], {}), "('plasma', 10)\n", (1246, 1260), True, 'import matplotlib.pyplot as plt\n')]
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
from __future__ import unicode_literals
import frappe
def execute():
frappe.db.set_value("DocType", "Maintenance Schedule", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Schedule Detail", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Schedule Item", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Visit", "module", "Maintenance")
frappe.db.set_value("DocType", "Maintenance Visit Purpose", "module", "Maintenance")
|
[
"frappe.db.set_value"
] |
[((141, 220), 'frappe.db.set_value', 'frappe.db.set_value', (['"""DocType"""', '"""Maintenance Schedule"""', '"""module"""', '"""Maintenance"""'], {}), "('DocType', 'Maintenance Schedule', 'module', 'Maintenance')\n", (160, 220), False, 'import frappe\n'), ((222, 312), 'frappe.db.set_value', 'frappe.db.set_value', (['"""DocType"""', '"""Maintenance Schedule Detail"""', '"""module"""', '"""Maintenance"""'], {}), "('DocType', 'Maintenance Schedule Detail', 'module',\n 'Maintenance')\n", (241, 312), False, 'import frappe\n'), ((310, 398), 'frappe.db.set_value', 'frappe.db.set_value', (['"""DocType"""', '"""Maintenance Schedule Item"""', '"""module"""', '"""Maintenance"""'], {}), "('DocType', 'Maintenance Schedule Item', 'module',\n 'Maintenance')\n", (329, 398), False, 'import frappe\n'), ((396, 472), 'frappe.db.set_value', 'frappe.db.set_value', (['"""DocType"""', '"""Maintenance Visit"""', '"""module"""', '"""Maintenance"""'], {}), "('DocType', 'Maintenance Visit', 'module', 'Maintenance')\n", (415, 472), False, 'import frappe\n'), ((474, 562), 'frappe.db.set_value', 'frappe.db.set_value', (['"""DocType"""', '"""Maintenance Visit Purpose"""', '"""module"""', '"""Maintenance"""'], {}), "('DocType', 'Maintenance Visit Purpose', 'module',\n 'Maintenance')\n", (493, 562), False, 'import frappe\n')]
|
#!/usr/bin/env python
# Set True to force compile native C-coded extension providing direct access
# to inotify's syscalls. If set to False this extension will only be compiled
# if no inotify interface from ctypes is found.
compile_ext_mod = False
# import statements
import os
import sys
import distutils.extension
from distutils.util import get_platform
try:
# First try to load most advanced setuptools setup.
from setuptools import setup
except:
# Fall back if setuptools is not installed.
from distutils.core import setup
platform = get_platform()
# check Python's version
if sys.version_info < (3, 5):
sys.stderr.write('This module requires at least Python 3.5\n')
sys.exit(1)
classif = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Filesystems',
'Topic :: System :: Monitoring',
]
def should_compile_ext_mod():
try:
import ctypes
import ctypes.util
except:
return True
try_libc_name = 'c'
if platform.startswith('freebsd'):
try_libc_name = 'inotify'
libc_name = None
try:
libc_name = ctypes.util.find_library(try_libc_name)
except:
pass # Will attemp to load it with None anyway.
libc = ctypes.CDLL(libc_name)
# Eventually check that libc has needed inotify bindings.
if (not hasattr(libc, 'inotify_init') or
not hasattr(libc, 'inotify_add_watch') or
not hasattr(libc, 'inotify_rm_watch')):
return True
return False
ext_mod = []
if compile_ext_mod or should_compile_ext_mod():
# add -fpic if x86_64 arch
if platform in ["linux-x86_64"]:
os.environ["CFLAGS"] = "-fpic"
# sources for ext module
ext_mod_src = ['common/inotify_syscalls.c']
# dst for ext module
ext_mod.append(distutils.extension.Extension('inotify_syscalls',
ext_mod_src))
setup(
name='pyinotify-smarkets',
version='1.0.0',
description='Linux filesystem events monitoring',
author='<NAME>',
author_email='<EMAIL>',
maintainer='Smarkets Limited',
maintainer_email='<EMAIL>',
license='MIT License',
platforms='Linux',
classifiers=classif,
url='http://github.com/smarkets/pyinotify-smarkets',
download_url='http://pypi.python.org/pypi/pyinotify-smarkets',
ext_modules=ext_mod,
py_modules=['pyinotify'],
)
|
[
"ctypes.util.find_library",
"distutils.core.setup",
"distutils.util.get_platform",
"sys.stderr.write",
"ctypes.CDLL",
"sys.exit"
] |
[((558, 572), 'distutils.util.get_platform', 'get_platform', ([], {}), '()\n', (570, 572), False, 'from distutils.util import get_platform\n'), ((2373, 2826), 'distutils.core.setup', 'setup', ([], {'name': '"""pyinotify-smarkets"""', 'version': '"""1.0.0"""', 'description': '"""Linux filesystem events monitoring"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""Smarkets Limited"""', 'maintainer_email': '"""<EMAIL>"""', 'license': '"""MIT License"""', 'platforms': '"""Linux"""', 'classifiers': 'classif', 'url': '"""http://github.com/smarkets/pyinotify-smarkets"""', 'download_url': '"""http://pypi.python.org/pypi/pyinotify-smarkets"""', 'ext_modules': 'ext_mod', 'py_modules': "['pyinotify']"}), "(name='pyinotify-smarkets', version='1.0.0', description=\n 'Linux filesystem events monitoring', author='<NAME>', author_email=\n '<EMAIL>', maintainer='Smarkets Limited', maintainer_email='<EMAIL>',\n license='MIT License', platforms='Linux', classifiers=classif, url=\n 'http://github.com/smarkets/pyinotify-smarkets', download_url=\n 'http://pypi.python.org/pypi/pyinotify-smarkets', ext_modules=ext_mod,\n py_modules=['pyinotify'])\n", (2378, 2826), False, 'from distutils.core import setup\n'), ((633, 695), 'sys.stderr.write', 'sys.stderr.write', (['"""This module requires at least Python 3.5\n"""'], {}), "('This module requires at least Python 3.5\\n')\n", (649, 695), False, 'import sys\n'), ((700, 711), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (708, 711), False, 'import sys\n'), ((1702, 1724), 'ctypes.CDLL', 'ctypes.CDLL', (['libc_name'], {}), '(libc_name)\n', (1713, 1724), False, 'import ctypes\n'), ((1581, 1620), 'ctypes.util.find_library', 'ctypes.util.find_library', (['try_libc_name'], {}), '(try_libc_name)\n', (1605, 1620), False, 'import ctypes\n')]
|
import logging
import re
import sys
from datetime import datetime
from pathlib import Path
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
log = logging.getLogger('filesystem')
class Filesystem:
"""
Uses local path content as an input. Can use recursion if configured.
Recursion is False by default. Can be configured to true or get integer that will specify max depth in relation to
base folder.
All files/dir/symlinks are retrieved by default. Can be changed by using the 'retrieve' property.
Example 1:: Single path
filesystem: /storage/movies/
Example 2:: List of paths
filesystem:
- /storage/movies/
- /storage/tv/
Example 3:: Object with list of paths
filesystem:
path:
- /storage/movies/
- /storage/tv/
mask: '*.mkv'
Example 4::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: 4 # 4 levels deep from each base folder
retrieve: files # Only files will be retrieved
Example 5::
filesystem:
path:
- /storage/movies/
- /storage/tv/
recursive: yes # No limit to depth, all sub dirs will be accessed
retrieve: # Only files and dirs will be retrieved
- files
- dirs
"""
retrieval_options = ['files', 'dirs', 'symlinks']
paths = one_or_more({'type': 'string', 'format': 'path'}, unique_items=True)
schema = {
'oneOf': [
paths,
{
'type': 'object',
'properties': {
'path': paths,
'mask': {'type': 'string'},
'regexp': {'type': 'string', 'format': 'regex'},
'recursive': {
'oneOf': [{'type': 'integer', 'minimum': 2}, {'type': 'boolean'}]
},
'retrieve': one_or_more(
{'type': 'string', 'enum': retrieval_options}, unique_items=True
),
},
'required': ['path'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
from fnmatch import translate
config = config
# Converts config to a dict with a list of paths
if not isinstance(config, dict):
config = {'path': config}
if not isinstance(config['path'], list):
config['path'] = [config['path']]
config.setdefault('recursive', False)
# If mask was specified, turn it in to a regexp
if config.get('mask'):
config['regexp'] = translate(config['mask'])
# If no mask or regexp specified, accept all files
config.setdefault('regexp', '.')
# Sets the default retrieval option to files
config.setdefault('retrieve', self.retrieval_options)
return config
def create_entry(self, filepath: Path, test_mode):
"""
Creates a single entry using a filepath and a type (file/dir)
"""
filepath = filepath.absolute()
entry = Entry()
entry['location'] = str(filepath)
entry['url'] = Path(filepath).absolute().as_uri()
entry['filename'] = filepath.name
if filepath.is_file():
entry['title'] = filepath.stem
else:
entry['title'] = filepath.name
file_stat = filepath.stat()
try:
entry['timestamp'] = datetime.fromtimestamp(file_stat.st_mtime)
except Exception as e:
log.warning('Error setting timestamp for %s: %s' % (filepath, e))
entry['timestamp'] = None
entry['accessed'] = datetime.fromtimestamp(file_stat.st_atime)
entry['modified'] = datetime.fromtimestamp(file_stat.st_mtime)
entry['created'] = datetime.fromtimestamp(file_stat.st_ctime)
if entry.isvalid():
if test_mode:
log.info("Test mode. Entry includes:")
log.info(" Title: %s" % entry["title"])
log.info(" URL: %s" % entry["url"])
log.info(" Filename: %s" % entry["filename"])
log.info(" Location: %s" % entry["location"])
log.info(" Timestamp: %s" % entry["timestamp"])
return entry
else:
log.error('Non valid entry created: %s ' % entry)
return
def get_max_depth(self, recursion, base_depth):
if recursion is False:
return base_depth + 1
elif recursion is True:
return float('inf')
else:
return base_depth + recursion
@staticmethod
def get_folder_objects(folder: Path, recursion: bool):
return folder.rglob('*') if recursion else folder.iterdir()
def get_entries_from_path(
self, path_list, match, recursion, test_mode, get_files, get_dirs, get_symlinks
):
entries = []
for folder in path_list:
log.verbose('Scanning folder %s. Recursion is set to %s.', folder, recursion)
folder = Path(folder).expanduser()
log.debug('Scanning %s', folder)
base_depth = len(folder.parts)
max_depth = self.get_max_depth(recursion, base_depth)
folder_objects = self.get_folder_objects(folder, recursion)
for path_object in folder_objects:
log.debug('Checking if %s qualifies to be added as an entry.', path_object)
try:
path_object.exists()
except UnicodeError:
log.error(
'File %s not decodable with filesystem encoding: %s',
path_object,
sys.getfilesystemencoding(),
)
continue
entry = None
object_depth = len(path_object.parts)
if object_depth <= max_depth:
if match(str(path_object)):
if (
(path_object.is_dir() and get_dirs)
or (path_object.is_symlink() and get_symlinks)
or (
path_object.is_file()
and not path_object.is_symlink()
and get_files
)
):
entry = self.create_entry(path_object, test_mode)
else:
log.debug(
"Path object's %s type doesn't match requested object types.",
path_object,
)
if entry and entry not in entries:
entries.append(entry)
return entries
def on_task_input(self, task, config):
config = self.prepare_config(config)
path_list = config['path']
test_mode = task.options.test
match = re.compile(config['regexp'], re.IGNORECASE).match
recursive = config['recursive']
get_files = 'files' in config['retrieve']
get_dirs = 'dirs' in config['retrieve']
get_symlinks = 'symlinks' in config['retrieve']
log.verbose('Starting to scan folders.')
return self.get_entries_from_path(
path_list, match, recursive, test_mode, get_files, get_dirs, get_symlinks
)
@event('plugin.register')
def register_plugin():
plugin.register(Filesystem, 'filesystem', api_ver=2)
|
[
"flexget.entry.Entry",
"flexget.event.event",
"sys.getfilesystemencoding",
"pathlib.Path",
"flexget.plugin.register",
"flexget.config_schema.one_or_more",
"datetime.datetime.fromtimestamp",
"fnmatch.translate",
"logging.getLogger",
"re.compile"
] |
[((236, 267), 'logging.getLogger', 'logging.getLogger', (['"""filesystem"""'], {}), "('filesystem')\n", (253, 267), False, 'import logging\n'), ((7650, 7674), 'flexget.event.event', 'event', (['"""plugin.register"""'], {}), "('plugin.register')\n", (7655, 7674), False, 'from flexget.event import event\n'), ((1500, 1568), 'flexget.config_schema.one_or_more', 'one_or_more', (["{'type': 'string', 'format': 'path'}"], {'unique_items': '(True)'}), "({'type': 'string', 'format': 'path'}, unique_items=True)\n", (1511, 1568), False, 'from flexget.config_schema import one_or_more\n'), ((7702, 7754), 'flexget.plugin.register', 'plugin.register', (['Filesystem', '"""filesystem"""'], {'api_ver': '(2)'}), "(Filesystem, 'filesystem', api_ver=2)\n", (7717, 7754), False, 'from flexget import plugin\n'), ((3263, 3270), 'flexget.entry.Entry', 'Entry', ([], {}), '()\n', (3268, 3270), False, 'from flexget.entry import Entry\n'), ((3844, 3886), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['file_stat.st_atime'], {}), '(file_stat.st_atime)\n', (3866, 3886), False, 'from datetime import datetime\n'), ((3915, 3957), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['file_stat.st_mtime'], {}), '(file_stat.st_mtime)\n', (3937, 3957), False, 'from datetime import datetime\n'), ((3985, 4027), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['file_stat.st_ctime'], {}), '(file_stat.st_ctime)\n', (4007, 4027), False, 'from datetime import datetime\n'), ((2794, 2819), 'fnmatch.translate', 'translate', (["config['mask']"], {}), "(config['mask'])\n", (2803, 2819), False, 'from fnmatch import translate\n'), ((3626, 3668), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['file_stat.st_mtime'], {}), '(file_stat.st_mtime)\n', (3648, 3668), False, 'from datetime import datetime\n'), ((7214, 7257), 're.compile', 're.compile', (["config['regexp']", 're.IGNORECASE'], {}), "(config['regexp'], re.IGNORECASE)\n", (7224, 7257), False, 'import re\n'), ((2035, 2112), 'flexget.config_schema.one_or_more', 'one_or_more', (["{'type': 'string', 'enum': retrieval_options}"], {'unique_items': '(True)'}), "({'type': 'string', 'enum': retrieval_options}, unique_items=True)\n", (2046, 2112), False, 'from flexget.config_schema import one_or_more\n'), ((5245, 5257), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (5249, 5257), False, 'from pathlib import Path\n'), ((3336, 3350), 'pathlib.Path', 'Path', (['filepath'], {}), '(filepath)\n', (3340, 3350), False, 'from pathlib import Path\n'), ((5905, 5932), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (5930, 5932), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import random as _random
import tarfile as _tarfile
import turicreate as _tc
from turicreate import extensions as _extensions
import turicreate.toolkits._internal_utils as _tkutl
from turicreate.toolkits.one_shot_object_detector.util._error_handling import check_one_shot_input
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from turicreate.toolkits import _data_zoo
def preview_synthetic_training_data(data,
target,
backgrounds=None,
verbose=True,
**kwargs):
"""
A utility function to visualize the synthetically generated data.
Parameters
----------
data : SFrame | tc.Image
A single starter image or an SFrame that contains the starter images
along with their corresponding labels. These image(s) can be in either
RGB or RGBA format. They should not be padded.
target : string
Name of the target (when data is a single image) or the target column
name (when data is an SFrame of images).
backgrounds : optional SArray
A list of backgrounds used for synthetic data generation. When set to
None, a set of default backgrounds are downloaded and used.
Returns
-------
out : SFrame
An SFrame of sythetically generated annotated training data.
"""
dataset_to_augment, image_column_name, target_column_name = check_one_shot_input(data, target, backgrounds)
_tkutl._handle_missing_values(dataset_to_augment, image_column_name, 'dataset')
one_shot_model = _extensions.one_shot_object_detector()
seed = kwargs["seed"] if "seed" in kwargs else _random.randint(0, 2**32 - 1)
if backgrounds is None:
backgrounds_downloader = _data_zoo.OneShotObjectDetectorBackgroundData()
backgrounds_tar_path = backgrounds_downloader.get_backgrounds_path()
backgrounds_tar = _tarfile.open(backgrounds_tar_path)
backgrounds_tar.extractall()
backgrounds = _tc.SArray("one_shot_backgrounds.sarray")
# We resize the background dimensions by half along each axis to reduce
# the disk footprint during augmentation, and also reduce the time
# taken to synthesize data.
backgrounds = backgrounds.apply(lambda im: _tc.image_analysis.resize(
im,
int(im.width/2),
int(im.height/2),
im.channels
))
# Option arguments to pass in to C++ Object Detector, if we use it:
# {'mlmodel_path':'darknet.mlmodel', 'max_iterations' : 25}
options_for_augmentation = {
"seed": seed,
"verbose": verbose
}
augmented_data = one_shot_model.augment(dataset_to_augment,
image_column_name,
target_column_name,
backgrounds,
options_for_augmentation)
return augmented_data
|
[
"turicreate.extensions.one_shot_object_detector",
"random.randint",
"turicreate.toolkits.one_shot_object_detector.util._error_handling.check_one_shot_input",
"turicreate.SArray",
"turicreate.toolkits._data_zoo.OneShotObjectDetectorBackgroundData",
"tarfile.open",
"turicreate.toolkits._internal_utils._handle_missing_values"
] |
[((1726, 1773), 'turicreate.toolkits.one_shot_object_detector.util._error_handling.check_one_shot_input', 'check_one_shot_input', (['data', 'target', 'backgrounds'], {}), '(data, target, backgrounds)\n', (1746, 1773), False, 'from turicreate.toolkits.one_shot_object_detector.util._error_handling import check_one_shot_input\n'), ((1778, 1857), 'turicreate.toolkits._internal_utils._handle_missing_values', '_tkutl._handle_missing_values', (['dataset_to_augment', 'image_column_name', '"""dataset"""'], {}), "(dataset_to_augment, image_column_name, 'dataset')\n", (1807, 1857), True, 'import turicreate.toolkits._internal_utils as _tkutl\n'), ((1879, 1917), 'turicreate.extensions.one_shot_object_detector', '_extensions.one_shot_object_detector', ([], {}), '()\n', (1915, 1917), True, 'from turicreate import extensions as _extensions\n'), ((1969, 2000), 'random.randint', '_random.randint', (['(0)', '(2 ** 32 - 1)'], {}), '(0, 2 ** 32 - 1)\n', (1984, 2000), True, 'import random as _random\n'), ((2060, 2107), 'turicreate.toolkits._data_zoo.OneShotObjectDetectorBackgroundData', '_data_zoo.OneShotObjectDetectorBackgroundData', ([], {}), '()\n', (2105, 2107), False, 'from turicreate.toolkits import _data_zoo\n'), ((2211, 2246), 'tarfile.open', '_tarfile.open', (['backgrounds_tar_path'], {}), '(backgrounds_tar_path)\n', (2224, 2246), True, 'import tarfile as _tarfile\n'), ((2306, 2347), 'turicreate.SArray', '_tc.SArray', (['"""one_shot_backgrounds.sarray"""'], {}), "('one_shot_backgrounds.sarray')\n", (2316, 2347), True, 'import turicreate as _tc\n')]
|
from shutil import rmtree
from argparse import Namespace
from _pytest.tmpdir import TempPathFactory
from _pytest.capture import CaptureFixture
from pytest_mock import MockerFixture
from grizzly_cli.init import tree, init
from .helpers import onerror
def test_tree(tmp_path_factory: TempPathFactory) -> None:
test_context = tmp_path_factory.mktemp('test_context')
(test_context / 'a' / 'b' / 'c').mkdir(parents=True)
(test_context / 'a' / 'file-a1.txt').touch()
(test_context / 'a' / 'file-a2.txt').touch()
(test_context / 'a' / 'b' / 'file-b1.txt').touch()
(test_context / 'a' / 'b' / 'c' / 'file-c1.txt').touch()
(test_context / 'a' / 'b' / 'c' / 'file-c2.txt').touch()
(test_context / 'root.yaml').touch()
try:
assert '\n'.join([line for line in tree(test_context)]) == '''├── a
│ ├── b
│ │ ├── c
│ │ │ ├── file-c1.txt
│ │ │ └── file-c2.txt
│ │ └── file-b1.txt
│ ├── file-a1.txt
│ └── file-a2.txt
└── root.yaml'''
finally:
rmtree(test_context, onerror=onerror)
def test_init(tmp_path_factory: TempPathFactory, capsys: CaptureFixture, mocker: MockerFixture) -> None:
test_context = tmp_path_factory.mktemp('test_context')
test_existing = test_context / 'foobar'
test_existing.mkdir()
mocker.patch('grizzly_cli.init.EXECUTION_CONTEXT', str(test_context))
try:
arguments = Namespace(project='foobar', with_mq=False, grizzly_version=None)
assert init(arguments) == 1
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == f'"foobar" already exists in {test_context}\n'
(test_existing / 'environments').mkdir()
(test_existing / 'features').mkdir()
(test_existing / 'requirements.txt').touch()
mocker.patch('grizzly_cli.init.EXECUTION_CONTEXT', str(test_existing))
assert init(arguments) == 1
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == f'''oops, looks like you are already in a grizzly project directory
{test_existing}
├── environments
├── features
└── requirements.txt
'''
rmtree(test_existing, onerror=onerror)
question_mock = mocker.patch('grizzly_cli.init.ask_yes_no', side_effect=[None] * 4)
mocker.patch('grizzly_cli.init.EXECUTION_CONTEXT', str(test_context))
assert init(arguments) == 0
assert question_mock.call_count == 1
args, _ = question_mock.call_args_list[-1]
assert args[0] == '''the following structure will be created:
foobar
├── environments
│ └── foobar.yaml
├── features
│ ├── environment.py
│ ├── steps
│ │ └── steps.py
│ ├── foobar.feature
│ └── requests
└── requirements.txt
do you want to create grizzly project "foobar"?'''
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == '''successfully created project "foobar", with the following options:
• without IBM MQ support
• latest grizzly version
'''
template_root = test_context / 'foobar'
assert template_root.is_dir()
assert (template_root / 'environments').is_dir()
environments_file = template_root / 'environments' / 'foobar.yaml'
assert environments_file.is_file()
assert environments_file.read_text() == '''configuration:
template:
host: https://localhost
'''
assert (template_root / 'features').is_dir()
feature_file = template_root / 'features' / 'foobar.feature'
assert feature_file.is_file()
assert feature_file.read_text() == '''Feature: Template feature file
Scenario: Template scenario
Given a user of type "RestApi" with weight "1" load testing "$conf::template.host"
'''
environment_file = template_root / 'features' / 'environment.py'
assert environment_file.is_file()
assert environment_file.read_text() == 'from grizzly.environment import *\n\n'
assert (template_root / 'features' / 'requests').is_dir()
assert (template_root / 'features' / 'steps').is_dir()
steps_file = template_root / 'features' / 'steps' / 'steps.py'
assert steps_file.is_file()
assert steps_file.read_text() == 'from grizzly.steps import *\n\n'
requirements_file = template_root / 'requirements.txt'
assert requirements_file.is_file()
assert requirements_file.read_text() == 'grizzly-loadtester\n'
created_structure = '\n'.join([line for line in tree(template_root)])
print(created_structure)
assert created_structure == '''├── environments
│ └── foobar.yaml
├── features
│ ├── environment.py
│ ├── foobar.feature
│ ├── requests
│ └── steps
│ └── steps.py
└── requirements.txt'''
rmtree(template_root, onerror=onerror)
capsys.readouterr()
arguments.with_mq = True
assert init(arguments) == 0
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == '''successfully created project "foobar", with the following options:
• with IBM MQ support
• latest grizzly version
'''
requirements_file = template_root / 'requirements.txt'
assert requirements_file.is_file()
assert requirements_file.read_text() == 'grizzly-loadtester[mq]\n'
rmtree(template_root, onerror=onerror)
arguments.with_mq = False
arguments.grizzly_version = '1.2.4'
assert init(arguments) == 0
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == '''successfully created project "foobar", with the following options:
• without IBM MQ support
• pinned to grizzly version 1.2.4
'''
requirements_file = template_root / 'requirements.txt'
assert requirements_file.is_file()
assert requirements_file.read_text() == 'grizzly-loadtester==1.2.4\n'
rmtree(template_root, onerror=onerror)
arguments.with_mq = True
arguments.grizzly_version = '1.5.0'
assert init(arguments) == 0
capture = capsys.readouterr()
assert capture.err == ''
assert capture.out == '''successfully created project "foobar", with the following options:
• with IBM MQ support
• pinned to grizzly version 1.5.0
'''
requirements_file = template_root / 'requirements.txt'
assert requirements_file.is_file()
assert requirements_file.read_text() == 'grizzly-loadtester[mq]==1.5.0\n'
finally:
rmtree(test_context, onerror=onerror)
|
[
"argparse.Namespace",
"shutil.rmtree",
"grizzly_cli.init.init",
"grizzly_cli.init.tree"
] |
[((1018, 1055), 'shutil.rmtree', 'rmtree', (['test_context'], {'onerror': 'onerror'}), '(test_context, onerror=onerror)\n', (1024, 1055), False, 'from shutil import rmtree\n'), ((1398, 1462), 'argparse.Namespace', 'Namespace', ([], {'project': '"""foobar"""', 'with_mq': '(False)', 'grizzly_version': 'None'}), "(project='foobar', with_mq=False, grizzly_version=None)\n", (1407, 1462), False, 'from argparse import Namespace\n'), ((2165, 2203), 'shutil.rmtree', 'rmtree', (['test_existing'], {'onerror': 'onerror'}), '(test_existing, onerror=onerror)\n', (2171, 2203), False, 'from shutil import rmtree\n'), ((4836, 4874), 'shutil.rmtree', 'rmtree', (['template_root'], {'onerror': 'onerror'}), '(template_root, onerror=onerror)\n', (4842, 4874), False, 'from shutil import rmtree\n'), ((5392, 5430), 'shutil.rmtree', 'rmtree', (['template_root'], {'onerror': 'onerror'}), '(template_root, onerror=onerror)\n', (5398, 5430), False, 'from shutil import rmtree\n'), ((5978, 6016), 'shutil.rmtree', 'rmtree', (['template_root'], {'onerror': 'onerror'}), '(template_root, onerror=onerror)\n', (5984, 6016), False, 'from shutil import rmtree\n'), ((6578, 6615), 'shutil.rmtree', 'rmtree', (['test_context'], {'onerror': 'onerror'}), '(test_context, onerror=onerror)\n', (6584, 6615), False, 'from shutil import rmtree\n'), ((1479, 1494), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (1483, 1494), False, 'from grizzly_cli.init import tree, init\n'), ((1893, 1908), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (1897, 1908), False, 'from grizzly_cli.init import tree, init\n'), ((2391, 2406), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (2395, 2406), False, 'from grizzly_cli.init import tree, init\n'), ((4954, 4969), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (4958, 4969), False, 'from grizzly_cli.init import tree, init\n'), ((5525, 5540), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (5529, 5540), False, 'from grizzly_cli.init import tree, init\n'), ((6111, 6126), 'grizzly_cli.init.init', 'init', (['arguments'], {}), '(arguments)\n', (6115, 6126), False, 'from grizzly_cli.init import tree, init\n'), ((4561, 4580), 'grizzly_cli.init.tree', 'tree', (['template_root'], {}), '(template_root)\n', (4565, 4580), False, 'from grizzly_cli.init import tree, init\n'), ((803, 821), 'grizzly_cli.init.tree', 'tree', (['test_context'], {}), '(test_context)\n', (807, 821), False, 'from grizzly_cli.init import tree, init\n')]
|
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelItem,\
TabbedPanelHeader, TabbedPanelContent
from kivy.properties import ObjectProperty, StringProperty,\
BooleanProperty, NumericProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.bubble import Bubble, BubbleButton
from kivy.lang import Builder
from kivy.metrics import dp
from kivy.uix.scrollview import ScrollView
from kivy.clock import Clock
from kivy.uix.actionbar import ActionItem, ActionView
class DesignerActionView(ActionView):
'''Custom ActionView to support custom action group
'''
def _layout_random(self):
'''Handle custom action group
'''
self.overflow_group.show_group = self.show_group
super(DesignerActionView, self)._layout_random()
def show_group(self, *l):
'''Show custom groups
'''
over = self.overflow_group
over.clear_widgets()
for item in over._list_overflow_items + over.list_action_item:
item.inside_group = True
if item.parent is not None:
item.parent.remove_widget(item)
group = self.get_group(item)
if group is not None and group.disabled:
continue
if not isinstance(item, ContextSubMenu):
over._dropdown.add_widget(item)
def get_group(self, item):
'''Get the ActionGroup of an item
'''
for group in self._list_action_group:
if item in group.list_action_item:
return group
return None
class MenuBubble(Bubble):
'''
'''
pass
class MenuHeader(TabbedPanelHeader):
'''MenuHeader class. To be used as default TabbedHeader.
'''
show_arrow = BooleanProperty(False)
'''Specifies whether to show arrow or not.
:data:`show_arrow` is a :class:`~kivy.properties.BooleanProperty`,
default to True
'''
class ContextMenuException(Exception):
'''ContextMenuException class
'''
pass
class MenuButton(Button):
'''MenuButton class. Used as a default menu button. It auto provides
look and feel for a menu button.
'''
cont_menu = ObjectProperty(None)
'''Reference to :class:`~designer.uix.contextual.ContextMenu`.
'''
def on_release(self, *args):
'''Default Event Handler for 'on_release'
'''
self.cont_menu.dismiss()
super(MenuButton, self).on_release(*args)
class ContextMenu(TabbedPanel):
'''ContextMenu class. See module documentation for more information.
:Events:
`on_select`: data
Fired when a selection is done, with the data of the selection as
first argument. Data is what you pass in the :meth:`select` method
as first argument.
`on_dismiss`:
.. versionadded:: 1.8.0
Fired when the ContextMenu is dismissed either on selection or on
touching outside the widget.
'''
container = ObjectProperty(None)
'''(internal) The container which will be used to contain Widgets of
main menu.
:data:`container` is a :class:`~kivy.properties.ObjectProperty`, default
to :class:`~kivy.uix.boxlayout.BoxLayout`.
'''
main_tab = ObjectProperty(None)
'''Main Menu Tab of ContextMenu.
:data:`main_tab` is a :class:`~kivy.properties.ObjectProperty`, default
to None.
'''
bubble_cls = ObjectProperty(MenuBubble)
'''Bubble Class, whose instance will be used to create
container of ContextMenu.
:data:`bubble_cls` is a :class:`~kivy.properties.ObjectProperty`,
default to :class:`MenuBubble`.
'''
header_cls = ObjectProperty(MenuHeader)
'''Header Class used to create Tab Header.
:data:`header_cls` is a :class:`~kivy.properties.ObjectProperty`,
default to :class:`MenuHeader`.
'''
attach_to = ObjectProperty(allownone=True)
'''(internal) Property that will be set to the widget on which the
drop down list is attached to.
The method :meth:`open` will automatically set that property, while
:meth:`dismiss` will set back to None.
'''
auto_width = BooleanProperty(True)
'''By default, the width of the ContextMenu will be the same
as the width of the attached widget. Set to False if you want
to provide your own width.
'''
dismiss_on_select = BooleanProperty(True)
'''By default, the ContextMenu will be automatically dismissed
when a selection have been done. Set to False to prevent the dismiss.
:data:`dismiss_on_select` is a :class:`~kivy.properties.BooleanProperty`,
default to True.
'''
max_height = NumericProperty(None, allownone=True)
'''Indicate the maximum height that the dropdown can take. If None, it will
take the maximum height available, until the top or bottom of the screen
will be reached.
:data:`max_height` is a :class:`~kivy.properties.NumericProperty`, default
to None.
'''
__events__ = ('on_select', 'on_dismiss')
def __init__(self, **kwargs):
self._win = None
self.add_tab = super(ContextMenu, self).add_widget
self.bubble = self.bubble_cls(size_hint=(None, None))
self.container = None
self.main_tab = self.header_cls(text='Main')
self.main_tab.content = ScrollView(size_hint=(1, 1))
self.main_tab.content.bind(height=self.on_scroll_height)
super(ContextMenu, self).__init__(**kwargs)
self.bubble.add_widget(self)
self.bind(size=self._reposition)
self.bubble.bind(on_height=self._bubble_height)
def _bubble_height(self, *args):
'''Handler for bubble's 'on_height' event.
'''
self.height = self.bubble.height
def open(self, widget):
'''Open the dropdown list, and attach to a specific widget.
Depending the position of the widget on the window and
the height of the dropdown, the placement might be
lower or higher off that widget.
'''
# if trying to open a non-visible widget
if widget.parent is None:
return
# ensure we are not already attached
if self.attach_to is not None:
self.dismiss()
# we will attach ourself to the main window, so ensure the widget we are
# looking for have a window
self._win = widget.get_parent_window()
if self._win is None:
raise ContextMenuException(
'Cannot open a dropdown list on a hidden widget')
self.attach_to = widget
widget.bind(pos=self._reposition, size=self._reposition)
self.add_tab(self.main_tab)
self.switch_to(self.main_tab)
self.main_tab.show_arrow = False
self._reposition()
# attach ourself to the main window
self._win.add_widget(self.bubble)
self.main_tab.color = (0, 0, 0, 0)
def on_select(self, data):
'''Default handler for 'on_select' event.
'''
pass
def dismiss(self, *largs):
'''Remove the dropdown widget from the window, and detach itself from
the attached widget.
'''
if self.bubble.parent:
self.bubble.parent.remove_widget(self.bubble)
if self.attach_to:
self.attach_to.unbind(pos=self._reposition, size=self._reposition)
self.attach_to = None
self.switch_to(self.main_tab)
for child in self.tab_list[:]:
self.remove_widget(child)
self.dispatch('on_dismiss')
def select(self, data):
'''Call this method to trigger the `on_select` event, with the `data`
selection. The `data` can be anything you want.
'''
self.dispatch('on_select', data)
if self.dismiss_on_select:
self.dismiss()
def on_dismiss(self):
'''Default event handler for 'on_dismiss' event.
'''
pass
def _set_width_to_bubble(self, *args):
'''To set self.width and bubble's width equal.
'''
self.width = self.bubble.width
def _reposition(self, *largs):
# calculate the coordinate of the attached widget in the window
# coordinate sysem
win = self._win
widget = self.attach_to
if not widget or not win:
return
wx, wy = widget.to_window(*widget.pos)
wright, wtop = widget.to_window(widget.right, widget.top)
# set width and x
if self.auto_width:
# Calculate minimum required width
if len(self.container.children) == 1:
self.bubble.width = max(self.main_tab.parent.parent.width,
self.container.children[0].width)
else:
self.bubble.width = max(self.main_tab.parent.parent.width,
self.bubble.width,
*([i.width
for i in self.container.children]))
Clock.schedule_once(self._set_width_to_bubble, 0.01)
# ensure the dropdown list doesn't get out on the X axis, with a
# preference to 0 in case the list is too wide.
# try to center bubble with parent position
x = wx - self.bubble.width / 4
if x + self.bubble.width > win.width:
x = win.width - self.bubble.width
if x < 0:
x = 0
self.bubble.x = x
# bubble position relative with the parent center
x_relative = x - (wx - self.bubble.width / 4)
x_range = self.bubble.width / 4 # consider 25% as the range
# determine if we display the dropdown upper or lower to the widget
h_bottom = wy - self.bubble.height
h_top = win.height - (wtop + self.bubble.height)
def _get_hpos():
'''Compare the position of the widget with the parent
to display the arrow in the correct position
'''
_pos = 'mid'
if x_relative == 0:
_pos = 'mid'
elif x_relative < -x_range:
_pos = 'right'
elif x_relative > x_range:
_pos = 'left'
return _pos
if h_bottom > 0:
self.bubble.top = wy
self.bubble.arrow_pos = 'top_' + _get_hpos()
elif h_top > 0:
self.bubble.y = wtop
self.bubble.arrow_pos = 'bottom_' + _get_hpos()
else:
# none of both top/bottom have enough place to display the widget at
# the current size. Take the best side, and fit to it.
height = max(h_bottom, h_top)
if height == h_bottom:
self.bubble.top = wy
self.bubble.height = wy
self.bubble.arrow_pos = 'top_' + _get_hpos()
else:
self.bubble.y = wtop
self.bubble.height = win.height - wtop
self.bubble.arrow_pos = 'bottom_' + _get_hpos()
def on_touch_down(self, touch):
'''Default Handler for 'on_touch_down'
'''
if super(ContextMenu, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos):
return True
self.dismiss()
def on_touch_up(self, touch):
'''Default Handler for 'on_touch_up'
'''
if super(ContextMenu, self).on_touch_up(touch):
return True
self.dismiss()
def add_widget(self, widget, index=0):
'''Add a widget.
'''
if self.content is None:
return
if widget.parent is not None:
widget.parent.remove_widget(widget)
if self.tab_list and widget == self.tab_list[0].content or\
widget == self._current_tab.content or \
self.content == widget or\
self._tab_layout == widget or\
isinstance(widget, TabbedPanelContent) or\
isinstance(widget, TabbedPanelHeader):
super(ContextMenu, self).add_widget(widget, index)
return
if not self.container:
self.container = GridLayout(orientation='vertical',
size_hint_y=None,
cols=1)
self.main_tab.content.add_widget(self.container)
self.container.bind(height=self.on_main_box_height)
self.container.add_widget(widget, index)
if hasattr(widget, 'cont_menu'):
widget.cont_menu = self
widget.bind(height=self.on_child_height)
widget.size_hint_y = None
def remove_widget(self, widget):
'''Remove a widget
'''
if self.container and widget in self.container.children:
self.container.remove_widget(widget)
else:
super(ContextMenu, self).remove_widget(widget)
def on_scroll_height(self, *args):
'''Event Handler for scollview's height.
'''
if not self.container:
return
self.container.height = max(self.container.height,
self.main_tab.content.height)
def on_main_box_height(self, *args):
'''Event Handler for main_box's height.
'''
if not self.container:
return
self.container.height = max(self.container.height,
self.main_tab.content.height)
if self.max_height:
self.bubble.height = min(self.container.height +
self.tab_height + dp(16),
self.max_height)
else:
self.bubble.height = self.container.height + \
self.tab_height + dp(16)
def on_child_height(self, *args):
'''Event Handler for children's height.
'''
height = 0
for i in self.container.children:
height += i.height
self.main_tab.content.height = height
self.container.height = height
def add_tab(self, widget, index=0):
'''To add a Widget as a new Tab.
'''
super(ContextMenu, self).add_widget(widget, index)
class ContextSubMenu(MenuButton):
'''ContextSubMenu class. To be used to add a sub menu.
'''
attached_menu = ObjectProperty(None)
'''(internal) Menu attached to this sub menu.
:data:`attached_menu` is a :class:`~kivy.properties.ObjectProperty`,
default to None.
'''
cont_menu = ObjectProperty(None)
'''(internal) Reference to the main ContextMenu.
:data:`cont_menu` is a :class:`~kivy.properties.ObjectProperty`,
default to None.
'''
container = ObjectProperty(None)
'''(internal) The container which will be used to contain Widgets of
main menu.
:data:`container` is a :class:`~kivy.properties.ObjectProperty`, default
to :class:`~kivy.uix.boxlayout.BoxLayout`.
'''
show_arrow = BooleanProperty(False)
'''(internal) To specify whether ">" arrow image should be shown in the
header or not. If there exists a child menu then arrow image will be
shown otherwise not.
:data:`show_arrow` is a
:class:`~kivy.properties.BooleanProperty`, default to False
'''
def __init__(self, **kwargs):
super(ContextSubMenu, self).__init__(**kwargs)
self._list_children = []
def on_text(self, *args):
'''Default handler for text.
'''
if self.attached_menu:
self.attached_menu.text = self.text
def on_attached_menu(self, *args):
'''Default handler for attached_menu.
'''
self.attached_menu.text = self.text
def add_widget(self, widget, index=0):
'''Add a widget.
'''
if isinstance(widget, Image):
Button.add_widget(self, widget, index)
return
self._list_children.append((widget, index))
if hasattr(widget, 'cont_menu'):
widget.cont_menu = self.cont_menu
def remove_children(self):
'''Clear _list_children[]
'''
for child, index in self._list_children:
self.container.remove_widget(child)
self._list_children = []
def on_cont_menu(self, *args):
'''Default handler for cont_menu.
'''
self._add_widget()
def _add_widget(self, *args):
if not self.cont_menu:
return
if not self.attached_menu:
self.attached_menu = self.cont_menu.header_cls(text=self.text)
self.attached_menu.content = ScrollView(size_hint=(1, 1))
self.attached_menu.content.bind(height=self.on_scroll_height)
self.container = GridLayout(orientation='vertical',
size_hint_y=None, cols=1)
self.attached_menu.content.add_widget(self.container)
self.container.bind(height=self.on_container_height)
for widget, index in self._list_children:
self.container.add_widget(widget, index)
widget.cont_menu = self.cont_menu
widget.bind(height=self.on_child_height)
def on_scroll_height(self, *args):
'''Handler for scrollview's height.
'''
self.container.height = max(self.container.minimum_height,
self.attached_menu.content.height)
def on_container_height(self, *args):
'''Handler for container's height.
'''
self.container.height = max(self.container.minimum_height,
self.attached_menu.content.height)
def on_child_height(self, *args):
'''Handler for children's height.
'''
height = 0
for i in self.container.children:
height += i.height
self.container.height = height
def on_release(self, *args):
'''Default handler for 'on_release' event.
'''
if not self.attached_menu or not self._list_children:
return
try:
index = self.cont_menu.tab_list.index(self.attached_menu)
self.cont_menu.switch_to(self.cont_menu.tab_list[index])
tab = self.cont_menu.tab_list[index]
if hasattr(tab, 'show_arrow') and index != 0:
tab.show_arrow = True
else:
tab.show_arrow = False
except:
if not self.cont_menu.current_tab in self.cont_menu.tab_list:
return
curr_index = self.cont_menu.tab_list.index(
self.cont_menu.current_tab)
for i in range(curr_index - 1, -1, -1):
self.cont_menu.remove_widget(self.cont_menu.tab_list[i])
self.cont_menu.add_tab(self.attached_menu)
self.cont_menu.switch_to(self.cont_menu.tab_list[0])
if hasattr(self.cont_menu.tab_list[1], 'show_arrow'):
self.cont_menu.tab_list[1].show_arrow = True
else:
self.cont_menu.tab_list[1].show_arrow = False
from kivy.clock import Clock
Clock.schedule_once(self._scroll, 0.1)
def _scroll(self, dt):
'''To scroll ContextMenu's strip to appropriate place.
'''
from kivy.animation import Animation
self.cont_menu._reposition()
total_tabs = len(self.cont_menu.tab_list)
tab_list = self.cont_menu.tab_list
curr_index = total_tabs - tab_list.index(self.cont_menu.current_tab)
to_scroll = len(tab_list) / curr_index
anim = Animation(scroll_x=to_scroll, d=0.75)
anim.cancel_all(self.cont_menu.current_tab.parent.parent)
anim.start(self.cont_menu.current_tab.parent.parent)
if __name__ == '__main__':
from kivy.app import App
class ActionContext(ContextSubMenu, ActionItem):
pass
Builder.load_string('''
#:import ContextMenu contextual.ContextMenu
<ContextMenu>:
<DesignerActionView>:
<Test>:
ActionBar:
pos_hint: {'top':1}
DesignerActionView:
use_separator: True
ActionPrevious:
title: 'Action Bar'
with_previous: False
ActionOverflow:
ActionButton:
text: 'Btn0'
icon: 'atlas://data/images/defaulttheme/audio-volume-high'
ActionButton:
text: 'Btn1'
ActionButton:
text: 'Btn2'
ActionButton:
text: 'Btn3'
ActionButton:
text: 'Btn4'
ActionGroup:
mode: 'spinner'
text: 'Group1'
dropdown_cls: ContextMenu
ActionButton:
text: 'Btn5'
height: 30
size_hint_y: None
ActionButton:
text: 'Btnddddddd6'
height: 30
size_hint_y: None
ActionButton:
text: 'Btn7'
height: 30
size_hint_y: None
ActionContext:
text: 'Item2'
size_hint_y: None
height: 30
ActionButton:
text: '2->1'
size_hint_y: None
height: 30
ActionButton:
text: '2->2'
size_hint_y: None
height: 30
ActionButton:
text: '2->2'
size_hint_y: None
height: 30
''')
class CMenu(ContextMenu):
pass
class Test(FloatLayout):
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
self.context_menu = CMenu()
def add_menu(self, obj, *l):
self.context_menu = CMenu()
self.context_menu.open(self.children[0])
class MyApp(App):
def build(self):
return Test()
MyApp().run()
|
[
"kivy.uix.gridlayout.GridLayout",
"kivy.lang.Builder.load_string",
"kivy.uix.button.Button.add_widget",
"kivy.properties.BooleanProperty",
"kivy.clock.Clock.schedule_once",
"kivy.animation.Animation",
"kivy.metrics.dp",
"kivy.properties.ObjectProperty",
"kivy.uix.scrollview.ScrollView",
"kivy.properties.NumericProperty"
] |
[((1927, 1949), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (1942, 1949), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((2359, 2379), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (2373, 2379), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((3172, 3192), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (3186, 3192), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((3438, 3458), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (3452, 3458), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((3617, 3643), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['MenuBubble'], {}), '(MenuBubble)\n', (3631, 3643), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((3874, 3900), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['MenuHeader'], {}), '(MenuHeader)\n', (3888, 3900), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((4085, 4115), 'kivy.properties.ObjectProperty', 'ObjectProperty', ([], {'allownone': '(True)'}), '(allownone=True)\n', (4099, 4115), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((4373, 4394), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(True)'], {}), '(True)\n', (4388, 4394), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((4596, 4617), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(True)'], {}), '(True)\n', (4611, 4617), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((4885, 4922), 'kivy.properties.NumericProperty', 'NumericProperty', (['None'], {'allownone': '(True)'}), '(None, allownone=True)\n', (4900, 4922), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((14577, 14597), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (14591, 14597), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((14767, 14787), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (14781, 14787), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((14956, 14976), 'kivy.properties.ObjectProperty', 'ObjectProperty', (['None'], {}), '(None)\n', (14970, 14976), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((15224, 15246), 'kivy.properties.BooleanProperty', 'BooleanProperty', (['(False)'], {}), '(False)\n', (15239, 15246), False, 'from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, NumericProperty\n'), ((20107, 21929), 'kivy.lang.Builder.load_string', 'Builder.load_string', (['"""\n#:import ContextMenu contextual.ContextMenu\n\n<ContextMenu>:\n<DesignerActionView>:\n<Test>:\n ActionBar:\n pos_hint: {\'top\':1}\n DesignerActionView:\n use_separator: True\n ActionPrevious:\n title: \'Action Bar\'\n with_previous: False\n ActionOverflow:\n ActionButton:\n text: \'Btn0\'\n icon: \'atlas://data/images/defaulttheme/audio-volume-high\'\n ActionButton:\n text: \'Btn1\'\n ActionButton:\n text: \'Btn2\'\n ActionButton:\n text: \'Btn3\'\n ActionButton:\n text: \'Btn4\'\n ActionGroup:\n mode: \'spinner\'\n text: \'Group1\'\n dropdown_cls: ContextMenu\n ActionButton:\n text: \'Btn5\'\n height: 30\n size_hint_y: None\n ActionButton:\n text: \'Btnddddddd6\'\n height: 30\n size_hint_y: None\n ActionButton:\n text: \'Btn7\'\n height: 30\n size_hint_y: None\n\n ActionContext:\n text: \'Item2\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->1\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->2\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->2\'\n size_hint_y: None\n height: 30\n"""'], {}), '(\n """\n#:import ContextMenu contextual.ContextMenu\n\n<ContextMenu>:\n<DesignerActionView>:\n<Test>:\n ActionBar:\n pos_hint: {\'top\':1}\n DesignerActionView:\n use_separator: True\n ActionPrevious:\n title: \'Action Bar\'\n with_previous: False\n ActionOverflow:\n ActionButton:\n text: \'Btn0\'\n icon: \'atlas://data/images/defaulttheme/audio-volume-high\'\n ActionButton:\n text: \'Btn1\'\n ActionButton:\n text: \'Btn2\'\n ActionButton:\n text: \'Btn3\'\n ActionButton:\n text: \'Btn4\'\n ActionGroup:\n mode: \'spinner\'\n text: \'Group1\'\n dropdown_cls: ContextMenu\n ActionButton:\n text: \'Btn5\'\n height: 30\n size_hint_y: None\n ActionButton:\n text: \'Btnddddddd6\'\n height: 30\n size_hint_y: None\n ActionButton:\n text: \'Btn7\'\n height: 30\n size_hint_y: None\n\n ActionContext:\n text: \'Item2\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->1\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->2\'\n size_hint_y: None\n height: 30\n ActionButton:\n text: \'2->2\'\n size_hint_y: None\n height: 30\n"""\n )\n', (20126, 21929), False, 'from kivy.lang import Builder\n'), ((5544, 5572), 'kivy.uix.scrollview.ScrollView', 'ScrollView', ([], {'size_hint': '(1, 1)'}), '(size_hint=(1, 1))\n', (5554, 5572), False, 'from kivy.uix.scrollview import ScrollView\n'), ((9263, 9315), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self._set_width_to_bubble', '(0.01)'], {}), '(self._set_width_to_bubble, 0.01)\n', (9282, 9315), False, 'from kivy.clock import Clock\n'), ((19357, 19395), 'kivy.clock.Clock.schedule_once', 'Clock.schedule_once', (['self._scroll', '(0.1)'], {}), '(self._scroll, 0.1)\n', (19376, 19395), False, 'from kivy.clock import Clock\n'), ((19813, 19850), 'kivy.animation.Animation', 'Animation', ([], {'scroll_x': 'to_scroll', 'd': '(0.75)'}), '(scroll_x=to_scroll, d=0.75)\n', (19822, 19850), False, 'from kivy.animation import Animation\n'), ((12404, 12464), 'kivy.uix.gridlayout.GridLayout', 'GridLayout', ([], {'orientation': '"""vertical"""', 'size_hint_y': 'None', 'cols': '(1)'}), "(orientation='vertical', size_hint_y=None, cols=1)\n", (12414, 12464), False, 'from kivy.uix.gridlayout import GridLayout\n'), ((16088, 16126), 'kivy.uix.button.Button.add_widget', 'Button.add_widget', (['self', 'widget', 'index'], {}), '(self, widget, index)\n', (16105, 16126), False, 'from kivy.uix.button import Button\n'), ((16848, 16876), 'kivy.uix.scrollview.ScrollView', 'ScrollView', ([], {'size_hint': '(1, 1)'}), '(size_hint=(1, 1))\n', (16858, 16876), False, 'from kivy.uix.scrollview import ScrollView\n'), ((16980, 17040), 'kivy.uix.gridlayout.GridLayout', 'GridLayout', ([], {'orientation': '"""vertical"""', 'size_hint_y': 'None', 'cols': '(1)'}), "(orientation='vertical', size_hint_y=None, cols=1)\n", (16990, 17040), False, 'from kivy.uix.gridlayout import GridLayout\n'), ((14016, 14022), 'kivy.metrics.dp', 'dp', (['(16)'], {}), '(16)\n', (14018, 14022), False, 'from kivy.metrics import dp\n'), ((13847, 13853), 'kivy.metrics.dp', 'dp', (['(16)'], {}), '(16)\n', (13849, 13853), False, 'from kivy.metrics import dp\n')]
|
import asyncio
import inspect
import sys
import time
from contextvars import ContextVar, Token
from types import TracebackType
from typing import Any, Callable, Coroutine, Dict, List, Optional, Type, Union
def current_task(loop: Optional[asyncio.AbstractEventLoop] = None) -> "Optional[asyncio.Task[Any]]":
"""return current task"""
if sys.version_info >= (3, 7):
return asyncio.current_task(loop=loop)
else:
return asyncio.Task.current_task(loop=loop)
def get_event_loop() -> asyncio.AbstractEventLoop:
"""get event loop in runtime"""
if sys.version_info >= (3, 7):
return asyncio.get_running_loop()
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
if not loop.is_running():
raise RuntimeError("no running event loop")
return loop
def done_future(loop: Optional[asyncio.AbstractEventLoop] = None) -> asyncio.Future:
"""create init future, use in obj.__inti__ method"""
future: asyncio.Future = asyncio.Future(loop=loop)
future.set_result(True)
return future
async def can_cancel_sleep(delay: float, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
"""Sleep method that can be cancelled"""
await asyncio.wait_for(asyncio.Future(), delay, loop=loop)
def gen_new_param_coro(coro: Coroutine, new_param_dict: Dict[str, Any]) -> Coroutine:
"""Return a new coro according to the parameters
>>> async def demo(a: int, b: int) -> int:
... return a + b
>>> value1: int = asyncio.run(demo(1, 3))
>>> value2: int = asyncio.run(gen_new_param_coro(demo(1, 5), {"b": 3}))
>>> assert value1 == value2
"""
if not asyncio.iscoroutine(coro):
raise TypeError("")
qualname: str = coro.__qualname__.split(".<locals>", 1)[0].rsplit(".", 1)[0]
func: Callable = getattr(inspect.getmodule(coro.cr_frame), qualname)
old_param_dict: Dict[str, Any] = coro.cr_frame.f_locals
for key, value in new_param_dict.items():
if key not in old_param_dict:
raise KeyError(f"Not found {key} in {old_param_dict.keys()}")
old_param_dict[key] = value
return func(**old_param_dict)
async def as_first_completed(
future_list: List[Union[Coroutine, asyncio.Future]],
not_cancel_future_list: Optional[List[Union[Coroutine, asyncio.Future]]] = None,
) -> Any:
"""Wait for multiple coroutines to process data, until one of the coroutines returns data,
and the remaining coroutines are cancelled
"""
not_cancel_future_list = not_cancel_future_list if not_cancel_future_list else []
future_list.extend(not_cancel_future_list)
(done, pending) = await asyncio.wait(future_list, return_when=asyncio.FIRST_COMPLETED)
for task in pending:
if task not in not_cancel_future_list:
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
for task in done:
return task.result()
def del_future(future: asyncio.Future) -> None:
"""Cancel the running future and read the result"""
if not future.cancelled():
future.cancel()
if future.done():
future.result()
def safe_del_future(future: asyncio.Future) -> None:
"""Cancel the running future, read the result and not raise exc"""
try:
del_future(future)
except Exception:
pass
class Semaphore(asyncio.Semaphore):
"""Compared with the original version, an additional method `inflight` is used to obtain the current usage"""
def __init__(self, value: int = 1, *, loop: Optional[asyncio.AbstractEventLoop] = None):
self.raw_value: int = value
super(Semaphore, self).__init__(value, loop=loop)
@property
def inflight(self) -> int:
value: int = self.raw_value - self._value # type: ignore
if value < 0:
value = 0
if value > self.raw_value:
value = self.raw_value
return value
class IgnoreDeadlineTimeoutExc(Exception):
pass
deadline_context: ContextVar[Optional["Deadline"]] = ContextVar("deadline_context", default=None)
class Deadline(object):
"""
cancel and timeout for human
The design is inspired by https://vorpus.org/blog/timeouts-and-cancellation-for-humans/
"""
def __init__(
self,
delay: Optional[float],
loop: Optional[asyncio.AbstractEventLoop] = None,
timeout_exc: Optional[Exception] = None,
):
"""
:param delay: How many seconds are before the deadline, if delay is None, Deadline not delay
:param loop: Event loop
:param timeout_exc: The exception thrown when the task is not completed at the deadline
None: raise asyncio.Timeout
IgnoreDeadlineTimeoutExc: not raise exc
"""
self._delay: Optional[float] = delay
self._loop = loop or get_event_loop()
self._timeout_exc: Exception = timeout_exc or asyncio.TimeoutError()
self._parent: Optional["Deadline"] = None
self._child: Optional["Deadline"] = None
self._deadline_future: asyncio.Future = asyncio.Future()
self._with_scope_future: Optional[asyncio.Future] = None
if self._delay is not None:
self._end_timestamp: Optional[float] = time.time() + self._delay
self._end_loop_time: Optional[float] = self._loop.time() + self._delay
self._loop.call_at(self._end_loop_time, self._set_deadline_future_result)
else:
self._end_timestamp = None
self._end_loop_time = None
self._context_token: Optional[Token] = None
def _set_context(self) -> None:
"""reset parent context and set self context"""
if self._parent and self._parent._context_token:
deadline_context.reset(self._parent._context_token)
self._parent._context_token = None
self._context_token = deadline_context.set(self)
def _reset_context(self) -> None:
"""reset self context and set parent (if active) context"""
if self._context_token:
deadline_context.reset(self._context_token)
self._context_token = None
if self._parent and self._parent.is_active:
self._parent._context_token = deadline_context.set(self._parent)
def _set_deadline_future_result(self) -> None:
"""set deadline finish"""
self._deadline_future.set_result(True)
if self._with_scope_future and not self._with_scope_future.cancelled():
self._with_scope_future.cancel()
def __await__(self) -> Any:
"""wait deadline"""
return self._deadline_future.__await__()
#######################
# support `async with`#
#######################
async def __aenter__(self) -> "Deadline":
return self.__enter__()
async def __aexit__(
self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], exc_tb: Optional[TracebackType]
) -> Optional[bool]:
return self.__exit__(exc_type, exc_val, exc_tb)
def __enter__(self) -> "Deadline":
if self._with_scope_future:
raise RuntimeError("`with` can only be called once")
self._set_context()
if self._delay is not None:
main_task: Optional[asyncio.Task] = current_task(self._loop)
if not main_task:
raise RuntimeError("Can not found current task")
self._with_scope_future = main_task
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
try:
if self._with_scope_future:
self._with_scope_future = None
else:
return None
if self._deadline_future.done():
if exc_type:
if isinstance(self._timeout_exc, IgnoreDeadlineTimeoutExc):
return True
raise self._timeout_exc
else:
return None
finally:
self._reset_context()
def inherit(self, timeout_exc: Optional[Exception] = None) -> "Deadline":
"""gen child Deadline"""
if not timeout_exc:
timeout_exc = self._timeout_exc
if self._end_loop_time is None:
delay: Optional[float] = None
else:
delay = self._end_loop_time - self._loop.time()
deadline: "Deadline" = self.__class__(delay=delay, loop=self._loop, timeout_exc=timeout_exc)
self._child = deadline
deadline._parent = self
return deadline
@property
def is_active(self) -> bool:
return self._with_scope_future is not None
@property
def surplus(self) -> float:
if self._end_loop_time is None:
return 0.0
return self._end_loop_time - self._loop.time()
@property
def end_timestamp(self) -> Optional[float]:
return self._end_timestamp
@property
def end_loop_time(self) -> Optional[float]:
return self._end_loop_time
async def wait_for(self, future: Union[asyncio.Future, Coroutine]) -> Any:
"""wait future completed or deadline"""
try:
if self._delay is None:
return await future
else:
return await asyncio.wait_for(future, self.surplus)
except asyncio.TimeoutError:
if isinstance(self._timeout_exc, IgnoreDeadlineTimeoutExc):
return
else:
raise self._timeout_exc
|
[
"asyncio.get_event_loop",
"asyncio.TimeoutError",
"asyncio.Task.current_task",
"asyncio.current_task",
"asyncio.get_running_loop",
"time.time",
"inspect.getmodule",
"asyncio.iscoroutine",
"contextvars.ContextVar",
"asyncio.wait_for",
"asyncio.wait",
"asyncio.Future"
] |
[((4069, 4113), 'contextvars.ContextVar', 'ContextVar', (['"""deadline_context"""'], {'default': 'None'}), "('deadline_context', default=None)\n", (4079, 4113), False, 'from contextvars import ContextVar, Token\n'), ((688, 712), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (710, 712), False, 'import asyncio\n'), ((984, 1009), 'asyncio.Future', 'asyncio.Future', ([], {'loop': 'loop'}), '(loop=loop)\n', (998, 1009), False, 'import asyncio\n'), ((389, 420), 'asyncio.current_task', 'asyncio.current_task', ([], {'loop': 'loop'}), '(loop=loop)\n', (409, 420), False, 'import asyncio\n'), ((446, 482), 'asyncio.Task.current_task', 'asyncio.Task.current_task', ([], {'loop': 'loop'}), '(loop=loop)\n', (471, 482), False, 'import asyncio\n'), ((622, 648), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (646, 648), False, 'import asyncio\n'), ((1655, 1680), 'asyncio.iscoroutine', 'asyncio.iscoroutine', (['coro'], {}), '(coro)\n', (1674, 1680), False, 'import asyncio\n'), ((1820, 1852), 'inspect.getmodule', 'inspect.getmodule', (['coro.cr_frame'], {}), '(coro.cr_frame)\n', (1837, 1852), False, 'import inspect\n'), ((2648, 2710), 'asyncio.wait', 'asyncio.wait', (['future_list'], {'return_when': 'asyncio.FIRST_COMPLETED'}), '(future_list, return_when=asyncio.FIRST_COMPLETED)\n', (2660, 2710), False, 'import asyncio\n'), ((5122, 5138), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (5136, 5138), False, 'import asyncio\n'), ((1233, 1249), 'asyncio.Future', 'asyncio.Future', ([], {}), '()\n', (1247, 1249), False, 'import asyncio\n'), ((4951, 4973), 'asyncio.TimeoutError', 'asyncio.TimeoutError', ([], {}), '()\n', (4971, 4973), False, 'import asyncio\n'), ((5291, 5302), 'time.time', 'time.time', ([], {}), '()\n', (5300, 5302), False, 'import time\n'), ((9425, 9463), 'asyncio.wait_for', 'asyncio.wait_for', (['future', 'self.surplus'], {}), '(future, self.surplus)\n', (9441, 9463), False, 'import asyncio\n')]
|
import logging
import re
import shutil
from bs4 import BeautifulSoup
from chibi.file import Chibi_path
from chibi.atlas import Chibi_atlas
from chibi_dl.site.base.site import Site
from chibi_requests import Chibi_url
from .regex import re_image
logger = logging.getLogger( "chibi_dl.sites.ehentai.episode" )
class Image( Site ):
@classmethod
def can_proccess( cls, url ):
if re_image.match( str( url ) ):
return cls( url )
def parse_info( self ):
image = Chibi_url( self.soup.find(
"section", id="image-container" ).img.get( 'src' ) )
return Chibi_atlas( image=image, )
@property
def image( self ):
return self.info.image
def download( self, path ):
if path.is_a_folder:
path += self.image.base_name
logger.info( f"descargnado {path.base_name} en {path.dir_name}" )
self.image.download( path )
return path
|
[
"chibi.atlas.Chibi_atlas",
"logging.getLogger"
] |
[((258, 309), 'logging.getLogger', 'logging.getLogger', (['"""chibi_dl.sites.ehentai.episode"""'], {}), "('chibi_dl.sites.ehentai.episode')\n", (275, 309), False, 'import logging\n'), ((609, 633), 'chibi.atlas.Chibi_atlas', 'Chibi_atlas', ([], {'image': 'image'}), '(image=image)\n', (620, 633), False, 'from chibi.atlas import Chibi_atlas\n')]
|
import RNA
sequence = "GGGGAAAACCCC"
# Set global switch for unique ML decomposition
RNA.cvar.uniq_ML = 1
subopt_data = { 'counter' : 1, 'sequence' : sequence }
# Print a subopt result as FASTA record
def print_subopt_result(structure, energy, data):
if not structure == None:
print(">subopt {:d}".format(data['counter']))
print("{}\n{} [{:6.2f}]".format(data['sequence'], structure, energy))
# increase structure counter
data['counter'] = data['counter'] + 1
# Create a 'fold_compound' for our sequence
a = RNA.fold_compound(sequence)
# Enumerate all structures 500 dacal/mol = 5 kcal/mol arround
# the MFE and print each structure using the function above
a.subopt_cb(500, print_subopt_result, subopt_data);
|
[
"RNA.fold_compound"
] |
[((549, 576), 'RNA.fold_compound', 'RNA.fold_compound', (['sequence'], {}), '(sequence)\n', (566, 576), False, 'import RNA\n')]
|
from base64 import urlsafe_b64decode
from django import forms
from phraseless.certificates import deserialize_certificate_chain
class CertificateAuth(forms.Form):
certificate_chain = forms.CharField()
challenge_signature = forms.CharField()
def clean_certificate_chain(self):
return deserialize_certificate_chain(
self.cleaned_data['certificate_chain'].encode()
)
def clean_challenge_signature(self):
return urlsafe_b64decode(
self.cleaned_data['challenge_signature'].encode()
)
|
[
"django.forms.CharField"
] |
[((191, 208), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (206, 208), False, 'from django import forms\n'), ((235, 252), 'django.forms.CharField', 'forms.CharField', ([], {}), '()\n', (250, 252), False, 'from django import forms\n')]
|
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ... import meta as _meta
__all__ = [
'AuditSink',
'AuditSinkSpec',
'Policy',
'ServiceReference',
'Webhook',
'WebhookClientConfig',
'WebhookThrottleConfig',
]
@pulumi.output_type
class AuditSink(dict):
"""
AuditSink represents a cluster level audit sink
"""
def __init__(__self__, *,
api_version: Optional[str] = None,
kind: Optional[str] = None,
metadata: Optional['_meta.v1.outputs.ObjectMeta'] = None,
spec: Optional['outputs.AuditSinkSpec'] = None):
"""
AuditSink represents a cluster level audit sink
:param str api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param str kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param 'AuditSinkSpecArgs' spec: Spec defines the audit configuration spec
"""
if api_version is not None:
pulumi.set(__self__, "api_version", 'auditregistration.k8s.io/v1alpha1')
if kind is not None:
pulumi.set(__self__, "kind", 'AuditSink')
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[str]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def metadata(self) -> Optional['_meta.v1.outputs.ObjectMeta']:
return pulumi.get(self, "metadata")
@property
@pulumi.getter
def spec(self) -> Optional['outputs.AuditSinkSpec']:
"""
Spec defines the audit configuration spec
"""
return pulumi.get(self, "spec")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AuditSinkSpec(dict):
"""
AuditSinkSpec holds the spec for the audit sink
"""
def __init__(__self__, *,
policy: 'outputs.Policy',
webhook: 'outputs.Webhook'):
"""
AuditSinkSpec holds the spec for the audit sink
:param 'PolicyArgs' policy: Policy defines the policy for selecting which events should be sent to the webhook required
:param 'WebhookArgs' webhook: Webhook to send events required
"""
pulumi.set(__self__, "policy", policy)
pulumi.set(__self__, "webhook", webhook)
@property
@pulumi.getter
def policy(self) -> 'outputs.Policy':
"""
Policy defines the policy for selecting which events should be sent to the webhook required
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter
def webhook(self) -> 'outputs.Webhook':
"""
Webhook to send events required
"""
return pulumi.get(self, "webhook")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class Policy(dict):
"""
Policy defines the configuration of how audit events are logged
"""
def __init__(__self__, *,
level: str,
stages: Optional[Sequence[str]] = None):
"""
Policy defines the configuration of how audit events are logged
:param str level: The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required
:param Sequence[str] stages: Stages is a list of stages for which events are created.
"""
pulumi.set(__self__, "level", level)
if stages is not None:
pulumi.set(__self__, "stages", stages)
@property
@pulumi.getter
def level(self) -> str:
"""
The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def stages(self) -> Optional[Sequence[str]]:
"""
Stages is a list of stages for which events are created.
"""
return pulumi.get(self, "stages")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ServiceReference(dict):
"""
ServiceReference holds a reference to Service.legacy.k8s.io
"""
def __init__(__self__, *,
name: str,
namespace: str,
path: Optional[str] = None,
port: Optional[int] = None):
"""
ServiceReference holds a reference to Service.legacy.k8s.io
:param str name: `name` is the name of the service. Required
:param str namespace: `namespace` is the namespace of the service. Required
:param str path: `path` is an optional URL path which will be sent in any request to this service.
:param int port: If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
@property
@pulumi.getter
def name(self) -> str:
"""
`name` is the name of the service. Required
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def namespace(self) -> str:
"""
`namespace` is the namespace of the service. Required
"""
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
`path` is an optional URL path which will be sent in any request to this service.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).
"""
return pulumi.get(self, "port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class Webhook(dict):
"""
Webhook holds the configuration of the webhook
"""
def __init__(__self__, *,
client_config: 'outputs.WebhookClientConfig',
throttle: Optional['outputs.WebhookThrottleConfig'] = None):
"""
Webhook holds the configuration of the webhook
:param 'WebhookClientConfigArgs' client_config: ClientConfig holds the connection parameters for the webhook required
:param 'WebhookThrottleConfigArgs' throttle: Throttle holds the options for throttling the webhook
"""
pulumi.set(__self__, "client_config", client_config)
if throttle is not None:
pulumi.set(__self__, "throttle", throttle)
@property
@pulumi.getter(name="clientConfig")
def client_config(self) -> 'outputs.WebhookClientConfig':
"""
ClientConfig holds the connection parameters for the webhook required
"""
return pulumi.get(self, "client_config")
@property
@pulumi.getter
def throttle(self) -> Optional['outputs.WebhookThrottleConfig']:
"""
Throttle holds the options for throttling the webhook
"""
return pulumi.get(self, "throttle")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WebhookClientConfig(dict):
"""
WebhookClientConfig contains the information to make a connection with the webhook
"""
def __init__(__self__, *,
ca_bundle: Optional[str] = None,
service: Optional['outputs.ServiceReference'] = None,
url: Optional[str] = None):
"""
WebhookClientConfig contains the information to make a connection with the webhook
:param str ca_bundle: `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
:param 'ServiceReferenceArgs' service: `service` is a reference to the service for this webhook. Either `service` or `url` must be specified.
If the webhook is running within the cluster, then you should use `service`.
:param str url: `url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
if ca_bundle is not None:
pulumi.set(__self__, "ca_bundle", ca_bundle)
if service is not None:
pulumi.set(__self__, "service", service)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter(name="caBundle")
def ca_bundle(self) -> Optional[str]:
"""
`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.
"""
return pulumi.get(self, "ca_bundle")
@property
@pulumi.getter
def service(self) -> Optional['outputs.ServiceReference']:
"""
`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.
If the webhook is running within the cluster, then you should use `service`.
"""
return pulumi.get(self, "service")
@property
@pulumi.getter
def url(self) -> Optional[str]:
"""
`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.
The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.
Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.
The scheme must be "https"; the URL must begin with "https://".
A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.
Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either.
"""
return pulumi.get(self, "url")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WebhookThrottleConfig(dict):
"""
WebhookThrottleConfig holds the configuration for throttling events
"""
def __init__(__self__, *,
burst: Optional[int] = None,
qps: Optional[int] = None):
"""
WebhookThrottleConfig holds the configuration for throttling events
:param int burst: ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS
:param int qps: ThrottleQPS maximum number of batches per second default 10 QPS
"""
if burst is not None:
pulumi.set(__self__, "burst", burst)
if qps is not None:
pulumi.set(__self__, "qps", qps)
@property
@pulumi.getter
def burst(self) -> Optional[int]:
"""
ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS
"""
return pulumi.get(self, "burst")
@property
@pulumi.getter
def qps(self) -> Optional[int]:
"""
ThrottleQPS maximum number of batches per second default 10 QPS
"""
return pulumi.get(self, "qps")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
|
[
"pulumi.get",
"pulumi.getter",
"pulumi.set"
] |
[((2084, 2116), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""apiVersion"""'}), "(name='apiVersion')\n", (2097, 2116), False, 'import pulumi\n'), ((8717, 8751), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""clientConfig"""'}), "(name='clientConfig')\n", (8730, 8751), False, 'import pulumi\n'), ((11791, 11821), 'pulumi.getter', 'pulumi.getter', ([], {'name': '"""caBundle"""'}), "(name='caBundle')\n", (11804, 11821), False, 'import pulumi\n'), ((2498, 2529), 'pulumi.get', 'pulumi.get', (['self', '"""api_version"""'], {}), "(self, 'api_version')\n", (2508, 2529), False, 'import pulumi\n'), ((2939, 2963), 'pulumi.get', 'pulumi.get', (['self', '"""kind"""'], {}), "(self, 'kind')\n", (2949, 2963), False, 'import pulumi\n'), ((3080, 3108), 'pulumi.get', 'pulumi.get', (['self', '"""metadata"""'], {}), "(self, 'metadata')\n", (3090, 3108), False, 'import pulumi\n'), ((3289, 3313), 'pulumi.get', 'pulumi.get', (['self', '"""spec"""'], {}), "(self, 'spec')\n", (3299, 3313), False, 'import pulumi\n'), ((3945, 3983), 'pulumi.set', 'pulumi.set', (['__self__', '"""policy"""', 'policy'], {}), "(__self__, 'policy', policy)\n", (3955, 3983), False, 'import pulumi\n'), ((3992, 4032), 'pulumi.set', 'pulumi.set', (['__self__', '"""webhook"""', 'webhook'], {}), "(__self__, 'webhook', webhook)\n", (4002, 4032), False, 'import pulumi\n'), ((4248, 4274), 'pulumi.get', 'pulumi.get', (['self', '"""policy"""'], {}), "(self, 'policy')\n", (4258, 4274), False, 'import pulumi\n'), ((4432, 4459), 'pulumi.get', 'pulumi.get', (['self', '"""webhook"""'], {}), "(self, 'webhook')\n", (4442, 4459), False, 'import pulumi\n'), ((5150, 5186), 'pulumi.set', 'pulumi.set', (['__self__', '"""level"""', 'level'], {}), "(__self__, 'level', level)\n", (5160, 5186), False, 'import pulumi\n'), ((5492, 5517), 'pulumi.get', 'pulumi.get', (['self', '"""level"""'], {}), "(self, 'level')\n", (5502, 5517), False, 'import pulumi\n'), ((5705, 5731), 'pulumi.get', 'pulumi.get', (['self', '"""stages"""'], {}), "(self, 'stages')\n", (5715, 5731), False, 'import pulumi\n'), ((6702, 6736), 'pulumi.set', 'pulumi.set', (['__self__', '"""name"""', 'name'], {}), "(__self__, 'name', name)\n", (6712, 6736), False, 'import pulumi\n'), ((6745, 6789), 'pulumi.set', 'pulumi.set', (['__self__', '"""namespace"""', 'namespace'], {}), "(__self__, 'namespace', namespace)\n", (6755, 6789), False, 'import pulumi\n'), ((7094, 7118), 'pulumi.get', 'pulumi.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (7104, 7118), False, 'import pulumi\n'), ((7286, 7315), 'pulumi.get', 'pulumi.get', (['self', '"""namespace"""'], {}), "(self, 'namespace')\n", (7296, 7315), False, 'import pulumi\n'), ((7516, 7540), 'pulumi.get', 'pulumi.get', (['self', '"""path"""'], {}), "(self, 'path')\n", (7526, 7540), False, 'import pulumi\n'), ((7821, 7845), 'pulumi.get', 'pulumi.get', (['self', '"""port"""'], {}), "(self, 'port')\n", (7831, 7845), False, 'import pulumi\n'), ((8556, 8608), 'pulumi.set', 'pulumi.set', (['__self__', '"""client_config"""', 'client_config'], {}), "(__self__, 'client_config', client_config)\n", (8566, 8608), False, 'import pulumi\n'), ((8931, 8964), 'pulumi.get', 'pulumi.get', (['self', '"""client_config"""'], {}), "(self, 'client_config')\n", (8941, 8964), False, 'import pulumi\n'), ((9169, 9197), 'pulumi.get', 'pulumi.get', (['self', '"""throttle"""'], {}), "(self, 'throttle')\n", (9179, 9197), False, 'import pulumi\n'), ((12076, 12105), 'pulumi.get', 'pulumi.get', (['self', '"""ca_bundle"""'], {}), "(self, 'ca_bundle')\n", (12086, 12105), False, 'import pulumi\n'), ((12439, 12466), 'pulumi.get', 'pulumi.get', (['self', '"""service"""'], {}), "(self, 'service')\n", (12449, 12466), False, 'import pulumi\n'), ((13743, 13766), 'pulumi.get', 'pulumi.get', (['self', '"""url"""'], {}), "(self, 'url')\n", (13753, 13766), False, 'import pulumi\n'), ((14797, 14822), 'pulumi.get', 'pulumi.get', (['self', '"""burst"""'], {}), "(self, 'burst')\n", (14807, 14822), False, 'import pulumi\n'), ((15004, 15027), 'pulumi.get', 'pulumi.get', (['self', '"""qps"""'], {}), "(self, 'qps')\n", (15014, 15027), False, 'import pulumi\n'), ((1744, 1816), 'pulumi.set', 'pulumi.set', (['__self__', '"""api_version"""', '"""auditregistration.k8s.io/v1alpha1"""'], {}), "(__self__, 'api_version', 'auditregistration.k8s.io/v1alpha1')\n", (1754, 1816), False, 'import pulumi\n'), ((1858, 1899), 'pulumi.set', 'pulumi.set', (['__self__', '"""kind"""', '"""AuditSink"""'], {}), "(__self__, 'kind', 'AuditSink')\n", (1868, 1899), False, 'import pulumi\n'), ((1945, 1987), 'pulumi.set', 'pulumi.set', (['__self__', '"""metadata"""', 'metadata'], {}), "(__self__, 'metadata', metadata)\n", (1955, 1987), False, 'import pulumi\n'), ((2029, 2063), 'pulumi.set', 'pulumi.set', (['__self__', '"""spec"""', 'spec'], {}), "(__self__, 'spec', spec)\n", (2039, 2063), False, 'import pulumi\n'), ((5230, 5268), 'pulumi.set', 'pulumi.set', (['__self__', '"""stages"""', 'stages'], {}), "(__self__, 'stages', stages)\n", (5240, 5268), False, 'import pulumi\n'), ((6831, 6865), 'pulumi.set', 'pulumi.set', (['__self__', '"""path"""', 'path'], {}), "(__self__, 'path', path)\n", (6841, 6865), False, 'import pulumi\n'), ((6907, 6941), 'pulumi.set', 'pulumi.set', (['__self__', '"""port"""', 'port'], {}), "(__self__, 'port', port)\n", (6917, 6941), False, 'import pulumi\n'), ((8654, 8696), 'pulumi.set', 'pulumi.set', (['__self__', '"""throttle"""', 'throttle'], {}), "(__self__, 'throttle', throttle)\n", (8664, 8696), False, 'import pulumi\n'), ((11568, 11612), 'pulumi.set', 'pulumi.set', (['__self__', '"""ca_bundle"""', 'ca_bundle'], {}), "(__self__, 'ca_bundle', ca_bundle)\n", (11578, 11612), False, 'import pulumi\n'), ((11657, 11697), 'pulumi.set', 'pulumi.set', (['__self__', '"""service"""', 'service'], {}), "(__self__, 'service', service)\n", (11667, 11697), False, 'import pulumi\n'), ((11738, 11770), 'pulumi.set', 'pulumi.set', (['__self__', '"""url"""', 'url'], {}), "(__self__, 'url', url)\n", (11748, 11770), False, 'import pulumi\n'), ((14483, 14519), 'pulumi.set', 'pulumi.set', (['__self__', '"""burst"""', 'burst'], {}), "(__self__, 'burst', burst)\n", (14493, 14519), False, 'import pulumi\n'), ((14560, 14592), 'pulumi.set', 'pulumi.set', (['__self__', '"""qps"""', 'qps'], {}), "(__self__, 'qps', qps)\n", (14570, 14592), False, 'import pulumi\n')]
|
#simulate the movement of the rogue AP and recieved RSSI values at the stationary
#APs based on the lognormal shadowing model
#Results will be written in a file to be read by the server to calculate the distance to the rogue AP
#Prx(d) = Prx(d0)-10*n*log(d/d0) + x(0, σ)
#rogue AP moves at a constant speed = 1m/sec
from time import sleep
from Crypto.Random import random
import math
from numpy import random as ff
AP1 = (16.6, 16.6)
AP2 = (16.6, 33.3)
AP3 = (33.3, 16.6)
AP4 = (33.3, 33.3)
#Rogue_loc = (random.randrange(1, 49), random.randrange(1, 49)) #initial location of the rogue AP
def distance(a, b):
return round(math.sqrt((b[0] - a[0])**2 +(b[1] - a[1])**2), 2)
def calcRSSI(AP, sigma, Rogue_loc):
if(AP == 1):
d = distance(AP1, Rogue_loc)
elif(AP == 2):
d = distance(AP2, Rogue_loc)
elif(AP == 3):
d = distance(AP3, Rogue_loc)
elif(AP == 4):
d = distance(AP4, Rogue_loc)
else:
print('Hmmm, did someone edit my code?')
return 0
if sigma == 0:
return(round(-40 -10*3*math.log10(d/1),2 ))
else:
return(round(-40 -10*3*math.log10(d/1)+ff.normal(0,sigma,1)[0],2 ))
def calcDistance(RSSI):
return(round(10**(-(RSSI+40)/30),2))
def exec(Rogue_loc, sigma):
f = open('simulation.txt', 'w')
direction = random.choice([0, 1, 2, 3]) #movement direction, 0=up, 1=right, 2=down, 3=left
step = 20 #change direction every x seconds
speed = 1 #m/s
'''stdev = input('please choose environment:\n 1:static\n2:semistatic\n3:somewhat dynamic\n4:highly dynamic\n')
if stdev == '1':
sigma = 0
elif stdev == '2':
sigma = 2
elif stdev =='3':
sigma = 4
elif stdev == '4':
sigma = 6'''
for i in range(0,300): #each second
for x in range(0, 10): #10 beacons/sec
if direction == 0 and Rogue_loc[0] != 0:
Rogue_loc = (round(Rogue_loc[0],2), round(Rogue_loc[1]+speed/10,2)) #move up
elif direction == 1:
Rogue_loc = (round(Rogue_loc[0]+speed/10, 2), round(Rogue_loc[1], 2)) #move right
elif direction == 2:
Rogue_loc = (round(Rogue_loc[0], 2), round(Rogue_loc[1]-speed/10,2)) #move down
elif direction == 3:
Rogue_loc = (round(Rogue_loc[0]-speed/10, 2), round(Rogue_loc[1], 2)) #move left
if Rogue_loc[0] == 0 or Rogue_loc[0] == 50 or Rogue_loc[1] == 0 or Rogue_loc[1] == 50: #correct movement direction in case it goes out of 50*50 range
direction = (direction + 2) % 4
f.write(str(Rogue_loc[0])+' '+str(Rogue_loc[1])+' '+str(calcRSSI(1, sigma,Rogue_loc))+' '+str(calcRSSI(2, sigma,Rogue_loc))+' '+str(calcRSSI(3, sigma,Rogue_loc))+' '+ str(calcRSSI(4, sigma,Rogue_loc))+'\n')
'''
print('Distance from AP1 ', calcDistance(calcRSSI(distance(Rogue_loc, AP1))), 'real distance = ', distance(Rogue_loc, AP1))
print('Distance from AP2 ', calcDistance(calcRSSI(distance(Rogue_loc, AP2))), 'real distance = ', distance(Rogue_loc, AP2))
print('Distance from AP3 ', calcDistance(calcRSSI(distance(Rogue_loc, AP3))), 'real distance = ', distance(Rogue_loc, AP3))
print('Distance from AP4 ', calcDistance(calcRSSI(distance(Rogue_loc, AP4))), 'real distance = ', distance(Rogue_loc, AP4))
print(Rogue_loc)
print(direction)'''
if i % random.randrange(10, 25) == 0: #change direction at random intervals
direction = random.choice([0, 1, 2, 3])
f.close()
|
[
"math.sqrt",
"Crypto.Random.random.randrange",
"Crypto.Random.random.choice",
"math.log10",
"numpy.random.normal"
] |
[((1323, 1350), 'Crypto.Random.random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (1336, 1350), False, 'from Crypto.Random import random\n'), ((628, 678), 'math.sqrt', 'math.sqrt', (['((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2)'], {}), '((b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2)\n', (637, 678), False, 'import math\n'), ((3564, 3591), 'Crypto.Random.random.choice', 'random.choice', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (3577, 3591), False, 'from Crypto.Random import random\n'), ((3471, 3495), 'Crypto.Random.random.randrange', 'random.randrange', (['(10)', '(25)'], {}), '(10, 25)\n', (3487, 3495), False, 'from Crypto.Random import random\n'), ((1067, 1084), 'math.log10', 'math.log10', (['(d / 1)'], {}), '(d / 1)\n', (1077, 1084), False, 'import math\n'), ((1145, 1167), 'numpy.random.normal', 'ff.normal', (['(0)', 'sigma', '(1)'], {}), '(0, sigma, 1)\n', (1154, 1167), True, 'from numpy import random as ff\n'), ((1129, 1146), 'math.log10', 'math.log10', (['(d / 1)'], {}), '(d / 1)\n', (1139, 1146), False, 'import math\n')]
|
import networkx as nx
import pandas as pd
# file = open('/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/emb/karate_1.txt')
# i = 0
# for line in file:
# line = line.strip()
# line = line.split(' ')
# line = [float(i) for i in line]
# print(i , line)
# i += 1
df = pd.read_csv('/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/node_pair.csv')
df_subset = df[df['weight'] < 0.0001]
print(df_subset)
# create fully-connected weigthed genomic graph
Graphtype = nx.Graph()
g = nx.from_pandas_edgelist(df, edge_attr='weight', create_using=Graphtype)
#g = nx.convert_node_labels_to_integers(g, first_label=0, ordering='default', label_attribute=None)
print(nx.get_edge_attributes(g,'weight'))
nx.write_edgelist(g, "/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_full.edgelist")
# create weight-truncated genomic graph
Graphtype = nx.Graph()
g_subset = nx.from_pandas_edgelist(df_subset, edge_attr='weight', create_using=Graphtype)
#g_subset = nx.convert_node_labels_to_integers(g_subset, first_label=0, ordering='default', label_attribute=None)
nx.write_edgelist(g_subset, "/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_subset.edgelist")
|
[
"pandas.read_csv",
"networkx.get_edge_attributes",
"networkx.Graph",
"networkx.from_pandas_edgelist",
"networkx.write_edgelist"
] |
[((297, 407), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/node_pair.csv"""'], {}), "(\n '/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/node_pair.csv'\n )\n", (308, 407), True, 'import pandas as pd\n'), ((515, 525), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (523, 525), True, 'import networkx as nx\n'), ((530, 601), 'networkx.from_pandas_edgelist', 'nx.from_pandas_edgelist', (['df'], {'edge_attr': '"""weight"""', 'create_using': 'Graphtype'}), "(df, edge_attr='weight', create_using=Graphtype)\n", (553, 601), True, 'import networkx as nx\n'), ((745, 871), 'networkx.write_edgelist', 'nx.write_edgelist', (['g', '"""/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_full.edgelist"""'], {}), "(g,\n '/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_full.edgelist'\n )\n", (762, 871), True, 'import networkx as nx\n'), ((917, 927), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (925, 927), True, 'import networkx as nx\n'), ((939, 1017), 'networkx.from_pandas_edgelist', 'nx.from_pandas_edgelist', (['df_subset'], {'edge_attr': '"""weight"""', 'create_using': 'Graphtype'}), "(df_subset, edge_attr='weight', create_using=Graphtype)\n", (962, 1017), True, 'import networkx as nx\n'), ((1133, 1268), 'networkx.write_edgelist', 'nx.write_edgelist', (['g_subset', '"""/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_subset.edgelist"""'], {}), "(g_subset,\n '/Users/aida/Dropbox/PhD/Internship/RegLab/COVID-Outbreak/node2vec/graph/genomic_subset.edgelist'\n )\n", (1150, 1268), True, 'import networkx as nx\n'), ((708, 743), 'networkx.get_edge_attributes', 'nx.get_edge_attributes', (['g', '"""weight"""'], {}), "(g, 'weight')\n", (730, 743), True, 'import networkx as nx\n')]
|
# Import modules
import re
from json import loads
from urllib.request import urlopen
from subprocess import check_output, DEVNULL, STDOUT
# MAC address regex
macRegex = re.compile('[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$')
# Get router ip address
cmd = 'chcp 65001 && ipconfig | findstr /i \"Default Gateway\"'
check_output(cmd, shell=True, stderr=DEVNULL, stdin=DEVNULL)
# Get mac by local ip
def GetMacByIP():
a = check_output('arp -a', shell=True, stderr=DEVNULL, stdin=DEVNULL)
b = a.decode(encoding='cp866')
c = b.find('')
d = b[c:].split(' ')
for b in d:
if macRegex.match(b):
return b.replace('-', ':')
# Locate by BSSID
def GetLocationByBSSID(BSSID):
try:
result = urlopen(f'http://api.mylnikov.org/geolocation/wifi?bssid={BSSID}').read().decode('utf8')
except:
return None
else:
result = loads(result)
return result['data']
|
[
"subprocess.check_output",
"urllib.request.urlopen",
"json.loads",
"re.compile"
] |
[((173, 236), 're.compile', 're.compile', (['"""[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$"""'], {}), "('[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\\\1[0-9a-f]{2}){4}$')\n", (183, 236), False, 'import re\n'), ((328, 388), 'subprocess.check_output', 'check_output', (['cmd'], {'shell': '(True)', 'stderr': 'DEVNULL', 'stdin': 'DEVNULL'}), '(cmd, shell=True, stderr=DEVNULL, stdin=DEVNULL)\n', (340, 388), False, 'from subprocess import check_output, DEVNULL, STDOUT\n'), ((437, 502), 'subprocess.check_output', 'check_output', (['"""arp -a"""'], {'shell': '(True)', 'stderr': 'DEVNULL', 'stdin': 'DEVNULL'}), "('arp -a', shell=True, stderr=DEVNULL, stdin=DEVNULL)\n", (449, 502), False, 'from subprocess import check_output, DEVNULL, STDOUT\n'), ((839, 852), 'json.loads', 'loads', (['result'], {}), '(result)\n', (844, 852), False, 'from json import loads\n'), ((709, 775), 'urllib.request.urlopen', 'urlopen', (['f"""http://api.mylnikov.org/geolocation/wifi?bssid={BSSID}"""'], {}), "(f'http://api.mylnikov.org/geolocation/wifi?bssid={BSSID}')\n", (716, 775), False, 'from urllib.request import urlopen\n')]
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2020 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
"""Helpers for commandlets for KWS(-1)."""
from __future__ import absolute_import, division, print_function
from nicos.guisupport import typedvalue
from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, \
QObject, QSpinBox, QWidget, pyqtSignal
from nicos.guisupport.utils import DoubleValidator
from nicos.utils import num_sort
class MeasElement(QObject):
"""Represents one setting for a measurement that can be manipulated."""
LABEL = ''
ORDER = 1
changed = pyqtSignal(object)
def __init__(self, eltype, client, value=None, extra=None):
"""Initialize widget contents, if necessary."""
QObject.__init__(self)
self.eltype = eltype
self.value = value
self.extra = extra
self._widget = None
self.clientUpdate(client)
def getLabel(self):
"""Return label for the element."""
if self.LABEL:
return self.LABEL
return self.eltype.capitalize()
def clientUpdate(self, client):
"""Update internal info from daemon."""
def createWidget(self, parent, client):
"""Create and return a Qt widget for editing this element."""
def destroyWidget(self):
"""Destroy the currently created widget."""
if self._widget:
self._widget.deleteLater()
self._widget = None
def getValue(self):
"""Return currently selected value."""
return self.value
def getDispValue(self):
"""Return a form of the value to be displayed."""
return str(self.getValue())
def otherChanged(self, eltype, value):
"""Hook to be called when a sibling element changed."""
class ChoiceElement(MeasElement):
"""Base for elements that allow an arbitrary choice."""
CACHE_KEY = ''
SORT_KEY = lambda self, x: None # keep previous ordering
VALUES = []
def createWidget(self, parent, client):
if self.CACHE_KEY:
values = client.getDeviceParam(*self.CACHE_KEY.split('/'))
values = sorted(values or [], key=self.SORT_KEY)
else:
values = self.VALUES
self._values = values
self._widget = QComboBox(parent)
self._widget.addItems(self._values)
if self.value is not None and self.value in self._values:
self._widget.setCurrentIndex(self._values.index(self.value))
elif self.value is None and self._values:
self.value = self._values[0]
self._widget.currentIndexChanged.connect(self._updateValue)
return self._widget
def _updateValue(self, index):
self.value = self._values[index]
self.changed.emit(self.value)
class CheckElement(MeasElement):
"""Base for elements that allow yes/no choice."""
def createWidget(self, parent, client):
self._widget = QCheckBox(parent)
if self.value is None:
self.value = False
self._widget.setChecked(self.value)
self._widget.toggled.connect(self._updateValue)
return self._widget
def _updateValue(self, checked):
self.value = checked
self.changed.emit(self.value)
def getDispValue(self):
return 'yes' if self.value else 'no'
class FloatElement(MeasElement):
"""Base for elements that are floating point numbers."""
def createWidget(self, parent, client):
if self.value is None:
self.value = 10.0
self._widget = QLineEdit(parent)
self._widget.setValidator(DoubleValidator(parent))
self._widget.setText('%g' % self.value)
self._widget.textChanged.connect(self._updateValue)
return self._widget
def _updateValue(self, text):
self.value = float(text.replace(',', '.'))
self.changed.emit(self.value)
class Detector(MeasElement):
"""Element for selecting detector distance, depending on selector."""
CACHE_KEY = 'detector/presets'
SORT_KEY = lambda self, x: num_sort(x)
LABEL = 'Detector'
_allvalues = None
def clientUpdate(self, client):
self._allvalues = client.getDeviceParam(*self.CACHE_KEY.split('/'))
self._values = []
def createWidget(self, parent, client):
self.clientUpdate(client)
self._widget = QComboBox(parent)
self._updateWidget()
self._widget.currentIndexChanged.connect(self._updateValue)
return self._widget
def _updateWidget(self):
self._widget.clear()
self._widget.addItems(self._values)
if self.value in self._values:
self._widget.setCurrentIndex(self._values.index(self.value))
def otherChanged(self, eltype, value):
if eltype == 'selector' and self._allvalues is not None:
self._values = sorted(self._allvalues[value], key=self.SORT_KEY)
if self.value not in self._values:
if self._values:
self.value = self._values[0]
else:
self.value = None
if self._widget is not None:
self._updateWidget()
def _updateValue(self, index):
self.value = self._values[index]
self.changed.emit(self.value)
class Polarizer(ChoiceElement):
CACHE_KEY = 'polarizer/values'
LABEL = 'Polarizer'
class Collimation(ChoiceElement):
CACHE_KEY = 'collimation/mapping'
SORT_KEY = lambda self, x: num_sort(x)
LABEL = 'Collimation'
class Mode(ChoiceElement):
LABEL = 'Mode'
VALUES = ['TRANS', 'SANS']
class MeasTime(MeasElement):
"""Element for selecting measurement time in different time units."""
LABEL = 'Time'
def createWidget(self, parent, client):
if self.value is None:
self.value = 30 * 60
self._widget = QWidget(parent)
layout = QHBoxLayout()
self._widget.number = QSpinBox(self._widget)
self._widget.number.setValue(30)
self._widget.number.setMinimum(1)
self._widget.number.setMaximum(10000)
self._widget.unit = QComboBox(self._widget)
self._widget.unit.addItems(['sec', 'min', 'hr'])
self._widget.unit.setCurrentIndex(1)
layout.addWidget(self._widget.number)
layout.addWidget(self._widget.unit)
layout.setContentsMargins(0, 0, 0, 0)
self._widget.setLayout(layout)
self._widget.number.valueChanged.connect(self._updateValue)
self._widget.unit.currentIndexChanged.connect(self._updateValue)
self._widget.setMinimumWidth(120)
if self.value is not None:
if self.value % 3600 == 0:
self._widget.number.setValue(self.value // 3600)
self._widget.unit.setCurrentIndex(2)
elif self.value % 60 == 0:
self._widget.number.setValue(self.value // 60)
self._widget.unit.setCurrentIndex(1)
else:
self._widget.number.setValue(self.value)
self._widget.unit.setCurrentIndex(0)
return self._widget
def _updateValue(self, *args):
unit = self._widget.unit.currentIndex()
number = self._widget.number.value()
if unit == 0:
self.value = number
elif unit == 1:
self.value = number * 60
else:
self.value = number * 3600
self.changed.emit(self.value)
def getDispValue(self):
# TODO: better display here
if self.value % 3600 == 0:
return '%d hr' % (self.value // 3600)
elif self.value % 60 == 0:
return '%d min' % (self.value // 60)
else:
return '%d sec' % self.value
class Sample(ChoiceElement):
CACHE_KEY = 'exp/samples'
LABEL = 'Sample'
ORDER = 0
class Device(MeasElement):
"""Element to select the value for a device.
eltype is set to the device name.
"""
ORDER = 2
def getLabel(self):
return self.eltype
def clientUpdate(self, client):
self._valuetype = client.getDeviceValuetype(self.eltype)
if self.value is None:
self.value = self._valuetype()
def createWidget(self, parent, client):
self._widget = typedvalue.create(parent, self._valuetype, self.value,
allow_enter=False)
self._widget.valueModified.connect(self._updateValue)
return self._widget
def _updateValue(self):
try:
self.value = self._widget.getValue()
except Exception:
pass
else:
self.changed.emit(self.value)
|
[
"nicos.guisupport.qt.pyqtSignal",
"nicos.guisupport.typedvalue.create",
"nicos.guisupport.qt.QWidget",
"nicos.guisupport.utils.DoubleValidator",
"nicos.guisupport.qt.QLineEdit",
"nicos.guisupport.qt.QCheckBox",
"nicos.guisupport.qt.QHBoxLayout",
"nicos.guisupport.qt.QSpinBox",
"nicos.guisupport.qt.QObject.__init__",
"nicos.guisupport.qt.QComboBox",
"nicos.utils.num_sort"
] |
[((1565, 1583), 'nicos.guisupport.qt.pyqtSignal', 'pyqtSignal', (['object'], {}), '(object)\n', (1575, 1583), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((1713, 1735), 'nicos.guisupport.qt.QObject.__init__', 'QObject.__init__', (['self'], {}), '(self)\n', (1729, 1735), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((3248, 3265), 'nicos.guisupport.qt.QComboBox', 'QComboBox', (['parent'], {}), '(parent)\n', (3257, 3265), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((3908, 3925), 'nicos.guisupport.qt.QCheckBox', 'QCheckBox', (['parent'], {}), '(parent)\n', (3917, 3925), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((4520, 4537), 'nicos.guisupport.qt.QLineEdit', 'QLineEdit', (['parent'], {}), '(parent)\n', (4529, 4537), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((5029, 5040), 'nicos.utils.num_sort', 'num_sort', (['x'], {}), '(x)\n', (5037, 5040), False, 'from nicos.utils import num_sort\n'), ((5328, 5345), 'nicos.guisupport.qt.QComboBox', 'QComboBox', (['parent'], {}), '(parent)\n', (5337, 5345), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((6452, 6463), 'nicos.utils.num_sort', 'num_sort', (['x'], {}), '(x)\n', (6460, 6463), False, 'from nicos.utils import num_sort\n'), ((6826, 6841), 'nicos.guisupport.qt.QWidget', 'QWidget', (['parent'], {}), '(parent)\n', (6833, 6841), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((6859, 6872), 'nicos.guisupport.qt.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (6870, 6872), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((6903, 6925), 'nicos.guisupport.qt.QSpinBox', 'QSpinBox', (['self._widget'], {}), '(self._widget)\n', (6911, 6925), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((7083, 7106), 'nicos.guisupport.qt.QComboBox', 'QComboBox', (['self._widget'], {}), '(self._widget)\n', (7092, 7106), False, 'from nicos.guisupport.qt import QCheckBox, QComboBox, QHBoxLayout, QLineEdit, QObject, QSpinBox, QWidget, pyqtSignal\n'), ((9226, 9299), 'nicos.guisupport.typedvalue.create', 'typedvalue.create', (['parent', 'self._valuetype', 'self.value'], {'allow_enter': '(False)'}), '(parent, self._valuetype, self.value, allow_enter=False)\n', (9243, 9299), False, 'from nicos.guisupport import typedvalue\n'), ((4572, 4595), 'nicos.guisupport.utils.DoubleValidator', 'DoubleValidator', (['parent'], {}), '(parent)\n', (4587, 4595), False, 'from nicos.guisupport.utils import DoubleValidator\n')]
|
#!/usr/bin/python
# encoding: utf-8
import random
import os
import torch
from PIL import Image
import numpy as np
from utils import *
import cv2
def scale_image_channel(im, c, v):
cs = list(im.split())
cs[c] = cs[c].point(lambda i: i * v)
out = Image.merge(im.mode, tuple(cs))
return out
def distort_image(im, hue, sat, val):
im = im.convert('HSV')
cs = list(im.split())
cs[1] = cs[1].point(lambda i: i * sat)
cs[2] = cs[2].point(lambda i: i * val)
def change_hue(x):
x += hue*255
if x > 255:
x -= 255
if x < 0:
x += 255
return x
cs[0] = cs[0].point(change_hue)
im = Image.merge(im.mode, tuple(cs))
im = im.convert('RGB')
#constrain_image(im)
return im
def rand_scale(s):
scale = random.uniform(1, s)
if(random.randint(1,10000)%2):
return scale
return 1./scale
def random_distort_image(im, dhue, dsat, dexp):
res = distort_image(im, dhue, dsat, dexp)
return res
def data_augmentation(clip, shape, jitter, hue, saturation, exposure):
# Initialize Random Variables
oh = clip[0].height
ow = clip[0].width
dw =int(ow*jitter)
dh =int(oh*jitter)
pleft = random.randint(-dw, dw)
pright = random.randint(-dw, dw)
ptop = random.randint(-dh, dh)
pbot = random.randint(-dh, dh)
swidth = ow - pleft - pright
sheight = oh - ptop - pbot
sx = float(swidth) / ow
sy = float(sheight) / oh
dx = (float(pleft)/ow)/sx
dy = (float(ptop) /oh)/sy
flip = random.randint(1,10000)%2
dhue = random.uniform(-hue, hue)
dsat = rand_scale(saturation)
dexp = rand_scale(exposure)
# Augment
cropped = [img.crop((pleft, ptop, pleft + swidth - 1, ptop + sheight - 1)) for img in clip]
sized = [img.resize(shape) for img in cropped]
if flip:
sized = [img.transpose(Image.FLIP_LEFT_RIGHT) for img in sized]
clip = [random_distort_image(img, dhue, dsat, dexp) for img in sized]
return clip, flip, dx, dy, sx, sy
# this function works for obtaining new labels after data augumentation
def fill_truth_detection(labpath, w, h, flip, dx, dy, sx, sy):
max_boxes = 50
label = np.zeros((max_boxes,5))
if os.path.getsize(labpath):
bs = np.loadtxt(labpath)
if bs is None:
return label
bs = np.reshape(bs, (-1, 5))
for i in range(bs.shape[0]):
cx = (bs[i][1] + bs[i][3]) / (2 * 320)
cy = (bs[i][2] + bs[i][4]) / (2 * 240)
imgw = (bs[i][3] - bs[i][1]) / 320
imgh = (bs[i][4] - bs[i][2]) / 240
bs[i][0] = bs[i][0] - 1
bs[i][1] = cx
bs[i][2] = cy
bs[i][3] = imgw
bs[i][4] = imgh
cc = 0
for i in range(bs.shape[0]):
x1 = bs[i][1] - bs[i][3]/2
y1 = bs[i][2] - bs[i][4]/2
x2 = bs[i][1] + bs[i][3]/2
y2 = bs[i][2] + bs[i][4]/2
x1 = min(0.999, max(0, x1 * sx - dx))
y1 = min(0.999, max(0, y1 * sy - dy))
x2 = min(0.999, max(0, x2 * sx - dx))
y2 = min(0.999, max(0, y2 * sy - dy))
bs[i][1] = (x1 + x2)/2
bs[i][2] = (y1 + y2)/2
bs[i][3] = (x2 - x1)
bs[i][4] = (y2 - y1)
if flip:
bs[i][1] = 0.999 - bs[i][1]
if bs[i][3] < 0.001 or bs[i][4] < 0.001:
continue
label[cc] = bs[i]
cc += 1
if cc >= 50:
break
label = np.reshape(label, (-1))
return label
def load_data_detection(base_path, imgpath, train, train_dur, shape, dataset_use='ucf101-24', jitter=0.2, hue=0.1, saturation=1.5, exposure=1.5):
# clip loading and data augmentation
# if dataset_use == 'ucf101-24':
# base_path = "/usr/home/sut/datasets/ucf24"
# else:
# base_path = "/usr/home/sut/Tim-Documents/jhmdb/data/jhmdb"
im_split = imgpath.split('/')
num_parts = len(im_split)
im_ind = int(im_split[num_parts-1][0:5])
labpath = os.path.join(base_path, 'labels', im_split[0], im_split[1] ,'{:05d}.txt'.format(im_ind))
img_folder = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1])
if dataset_use == 'ucf101-24':
max_num = len(os.listdir(img_folder))
else:
max_num = len(os.listdir(img_folder)) - 1
clip = []
### We change downsampling rate throughout training as a ###
### temporal augmentation, which brings around 1-2 frame ###
### mAP. During test time it is set to 1. ###
d = 1
if train:
d = random.randint(1, 2)
for i in reversed(range(train_dur)):
# make it as a loop
i_temp = im_ind - i * d
while i_temp < 1:
i_temp = max_num + i_temp
while i_temp > max_num:
i_temp = i_temp - max_num
if dataset_use == 'ucf101-24':
path_tmp = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1] ,'{:05d}.jpg'.format(i_temp))
else:
path_tmp = os.path.join(base_path, 'rgb-images', im_split[0], im_split[1] ,'{:05d}.png'.format(i_temp))
clip.append(Image.open(path_tmp).convert('RGB'))
if train: # Apply augmentation
clip,flip,dx,dy,sx,sy = data_augmentation(clip, shape, jitter, hue, saturation, exposure)
label = fill_truth_detection(labpath, clip[0].width, clip[0].height, flip, dx, dy, 1./sx, 1./sy)
label = torch.from_numpy(label)
else: # No augmentation
label = torch.zeros(50*5)
try:
tmp = torch.from_numpy(read_truths_args(labpath, 8.0/clip[0].width).astype('float32'))
except Exception:
tmp = torch.zeros(1,5)
tmp = tmp.view(-1)
tsz = tmp.numel()
if tsz > 50*5:
label = tmp[0:50*5]
elif tsz > 0:
label[0:tsz] = tmp
if train:
return clip, label
else:
return im_split[0] + '_' +im_split[1] + '_' + im_split[2], clip, label
def load_data_detection_test(root, imgpath, train_dur, num_samples):
clip,label = get_clip(root, imgpath, train_dur, num_samples)
return clip, label
def get_clip(root, imgpath, train_dur, num_samples):
im_split = imgpath.split('/')
num_parts = len(im_split)
im_ind = int(im_split[num_parts - 1][0:5])
# for UCF101 dataset
base_path = "/usr/home/sut/datasets/ucf24"
labpath = os.path.join(base_path, 'labels', im_split[6], im_split[7], '{:05d}.txt'.format(im_ind))
img_folder = os.path.join(base_path, 'rgb-images', im_split[6], im_split[7])
# for arbitrary videos
max_num = len(os.listdir(img_folder))
clip = []
for i in reversed(range(train_dur)):
# the clip is created with the trained sample(image) being placed as the last image and 7 adjacent images before it
i_temp = im_ind - i
if i_temp < 1:
i_temp = 1
if i_temp > max_num:
i_temp = max_num
path_tmp = os.path.join(base_path, 'rgb-images', im_split[6], im_split[7] ,'{:05d}.jpg'.format(i_temp))
clip.append(Image.open(path_tmp).convert('RGB'))
label = torch.zeros(50 * 5)
tmp = torch.zeros(1, 5)
tmp = tmp.view(-1)
tsz = tmp.numel()
if tsz > 50 * 5:
label = tmp[0:50 * 5]
elif tsz > 0:
label[0:tsz] = tmp
return clip, label
|
[
"random.randint",
"random.uniform",
"os.path.getsize",
"numpy.zeros",
"PIL.Image.open",
"numpy.reshape",
"numpy.loadtxt",
"torch.zeros",
"os.path.join",
"os.listdir",
"torch.from_numpy"
] |
[((807, 827), 'random.uniform', 'random.uniform', (['(1)', 's'], {}), '(1, s)\n', (821, 827), False, 'import random\n'), ((1240, 1263), 'random.randint', 'random.randint', (['(-dw)', 'dw'], {}), '(-dw, dw)\n', (1254, 1263), False, 'import random\n'), ((1277, 1300), 'random.randint', 'random.randint', (['(-dw)', 'dw'], {}), '(-dw, dw)\n', (1291, 1300), False, 'import random\n'), ((1314, 1337), 'random.randint', 'random.randint', (['(-dh)', 'dh'], {}), '(-dh, dh)\n', (1328, 1337), False, 'import random\n'), ((1351, 1374), 'random.randint', 'random.randint', (['(-dh)', 'dh'], {}), '(-dh, dh)\n', (1365, 1374), False, 'import random\n'), ((1615, 1640), 'random.uniform', 'random.uniform', (['(-hue)', 'hue'], {}), '(-hue, hue)\n', (1629, 1640), False, 'import random\n'), ((2243, 2267), 'numpy.zeros', 'np.zeros', (['(max_boxes, 5)'], {}), '((max_boxes, 5))\n', (2251, 2267), True, 'import numpy as np\n'), ((2274, 2298), 'os.path.getsize', 'os.path.getsize', (['labpath'], {}), '(labpath)\n', (2289, 2298), False, 'import os\n'), ((3638, 3659), 'numpy.reshape', 'np.reshape', (['label', '(-1)'], {}), '(label, -1)\n', (3648, 3659), True, 'import numpy as np\n'), ((4269, 4332), 'os.path.join', 'os.path.join', (['base_path', '"""rgb-images"""', 'im_split[0]', 'im_split[1]'], {}), "(base_path, 'rgb-images', im_split[0], im_split[1])\n", (4281, 4332), False, 'import os\n'), ((6651, 6714), 'os.path.join', 'os.path.join', (['base_path', '"""rgb-images"""', 'im_split[6]', 'im_split[7]'], {}), "(base_path, 'rgb-images', im_split[6], im_split[7])\n", (6663, 6714), False, 'import os\n'), ((7289, 7308), 'torch.zeros', 'torch.zeros', (['(50 * 5)'], {}), '(50 * 5)\n', (7300, 7308), False, 'import torch\n'), ((7319, 7336), 'torch.zeros', 'torch.zeros', (['(1)', '(5)'], {}), '(1, 5)\n', (7330, 7336), False, 'import torch\n'), ((835, 859), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (849, 859), False, 'import random\n'), ((1577, 1601), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (1591, 1601), False, 'import random\n'), ((2313, 2332), 'numpy.loadtxt', 'np.loadtxt', (['labpath'], {}), '(labpath)\n', (2323, 2332), True, 'import numpy as np\n'), ((2394, 2417), 'numpy.reshape', 'np.reshape', (['bs', '(-1, 5)'], {}), '(bs, (-1, 5))\n', (2404, 2417), True, 'import numpy as np\n'), ((4722, 4742), 'random.randint', 'random.randint', (['(1)', '(2)'], {}), '(1, 2)\n', (4736, 4742), False, 'import random\n'), ((5578, 5601), 'torch.from_numpy', 'torch.from_numpy', (['label'], {}), '(label)\n', (5594, 5601), False, 'import torch\n'), ((5647, 5666), 'torch.zeros', 'torch.zeros', (['(50 * 5)'], {}), '(50 * 5)\n', (5658, 5666), False, 'import torch\n'), ((6761, 6783), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (6771, 6783), False, 'import os\n'), ((4390, 4412), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (4400, 4412), False, 'import os\n'), ((4446, 4468), 'os.listdir', 'os.listdir', (['img_folder'], {}), '(img_folder)\n', (4456, 4468), False, 'import os\n'), ((5821, 5838), 'torch.zeros', 'torch.zeros', (['(1)', '(5)'], {}), '(1, 5)\n', (5832, 5838), False, 'import torch\n'), ((5286, 5306), 'PIL.Image.open', 'Image.open', (['path_tmp'], {}), '(path_tmp)\n', (5296, 5306), False, 'from PIL import Image\n'), ((7239, 7259), 'PIL.Image.open', 'Image.open', (['path_tmp'], {}), '(path_tmp)\n', (7249, 7259), False, 'from PIL import Image\n')]
|
import os
import labelbox2pascal as lb2pa
class TestFromJSON():
def results_output(self):
TEST_OUTPUT_DIR = 'test-results'
if not os.path.isdir(TEST_OUTPUT_DIR):
os.makedirs(TEST_OUTPUT_DIR)
return TEST_OUTPUT_DIR
def test_wkt_1(self):
lb2pa.from_json('test-fixtures/labelbox_1.json', self.results_output(),
self.results_output())
def test_wkt_2(self):
lb2pa.from_json('test-fixtures/labelbox_2.json', self.results_output(),
self.results_output())
def test_xy_1(self):
lb2pa.from_json('test-fixtures/labelbox_xy_1.json',
self.results_output(), self.results_output(),
label_format='XY')
def test_bad_label_format(self):
try:
lb2pa.from_json('test-fixtures/labelbox_xy_1.json',
self.results_output(), self.results_output(),
label_format='bad format')
except lb2pa.UnknownFormatError as e:
pass
|
[
"os.path.isdir",
"os.makedirs"
] |
[((152, 182), 'os.path.isdir', 'os.path.isdir', (['TEST_OUTPUT_DIR'], {}), '(TEST_OUTPUT_DIR)\n', (165, 182), False, 'import os\n'), ((196, 224), 'os.makedirs', 'os.makedirs', (['TEST_OUTPUT_DIR'], {}), '(TEST_OUTPUT_DIR)\n', (207, 224), False, 'import os\n')]
|
import logging
import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
from torch.optim.lr_scheduler import ReduceLROnPlateau
from . import utils
from tqdm import tqdm
from unet3d.utils import unpad_eval
class UNet3DTrainer:
"""3D UNet trainer.
Args:
model (Unet3D): UNet 3D model to be trained
optimizer (nn.optim.Optimizer): optimizer used for training
lr_scheduler (torch.optim.lr_scheduler._LRScheduler): learning rate scheduler
WARN: bear in mind that lr_scheduler.step() is invoked after every validation step
(i.e. validate_after_iters) not after every epoch. So e.g. if one uses StepLR with step_size=30
the learning rate will be adjusted after every 30 * validate_after_iters iterations.
loss_criterion (callable): loss function
eval_criterion (callable): used to compute training/validation metric (such as Dice, IoU, AP or Rand score)
saving the best checkpoint is based on the result of this function on the validation set
device (torch.device): device to train on
loaders (dict): 'train' and 'val' loaders
checkpoint_dir (string): dir for saving checkpoints and tensorboard logs
max_num_epochs (int): maximum number of epochs
max_num_iterations (int): maximum number of iterations
validate_after_iters (int): validate after that many iterations
log_after_iters (int): number of iterations before logging to tensorboard
validate_iters (int): number of validation iterations, if None validate
on the whole validation set
eval_score_higher_is_better (bool): if True higher eval scores are considered better
best_eval_score (float): best validation score so far (higher better)
num_iterations (int): useful when loading the model from the checkpoint
num_epoch (int): useful when loading the model from the checkpoint
"""
def __init__(self, model, optimizer, lr_scheduler, loss_criterion,
eval_criterion, device, loaders, checkpoint_dir,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
logger=None):
if logger is None:
self.logger = utils.get_logger('UNet3DTrainer', level=logging.DEBUG)
else:
self.logger = logger
self.logger.info(model)
self.model = model
self.optimizer = optimizer
self.scheduler = lr_scheduler
self.loss_criterion = loss_criterion
self.eval_criterion = eval_criterion
self.device = device
self.loaders = loaders
self.checkpoint_dir = checkpoint_dir
self.max_num_epochs = max_num_epochs
self.max_num_iterations = max_num_iterations
self.validate_after_iters = validate_after_iters
self.log_after_iters = log_after_iters
self.validate_iters = validate_iters
self.eval_score_higher_is_better = eval_score_higher_is_better
logger.info(f'eval_score_higher_is_better: {eval_score_higher_is_better}')
if best_eval_score is not None:
self.best_eval_score = best_eval_score
else:
# initialize the best_eval_score
if eval_score_higher_is_better:
self.best_eval_score = float('-inf')
else:
self.best_eval_score = float('+inf')
self.writer = SummaryWriter(logdir=os.path.join(checkpoint_dir, 'logs'))
self.num_iterations = num_iterations
self.num_epoch = num_epoch
@classmethod
def from_checkpoint(cls, checkpoint_path, model, optimizer, lr_scheduler, loss_criterion, eval_criterion, loaders,
logger=None):
logger.info(f"Loading checkpoint '{checkpoint_path}'...")
state = utils.load_checkpoint(checkpoint_path, model, optimizer)
logger.info(
f"Checkpoint loaded. Epoch: {state['epoch']}. Best val score: {state['best_eval_score']}. Num_iterations: {state['num_iterations']}")
checkpoint_dir = os.path.split(checkpoint_path)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
torch.device(state['device']),
loaders, checkpoint_dir,
eval_score_higher_is_better=state['eval_score_higher_is_better'],
best_eval_score=state['best_eval_score'],
num_iterations=state['num_iterations'],
num_epoch=state['epoch'],
max_num_epochs=state['max_num_epochs'],
max_num_iterations=state['max_num_iterations'],
validate_after_iters=state['validate_after_iters'],
log_after_iters=state['log_after_iters'],
validate_iters=state['validate_iters'],
logger=logger)
@classmethod
def from_pretrained(cls, pre_trained, model, optimizer, lr_scheduler, loss_criterion, eval_criterion,
device, loaders,
max_num_epochs=100, max_num_iterations=1e5,
validate_after_iters=100, log_after_iters=100,
validate_iters=None, num_iterations=1, num_epoch=0,
eval_score_higher_is_better=True, best_eval_score=None,
logger=None):
logger.info(f"Logging pre-trained model from '{pre_trained}'...")
utils.load_checkpoint(pre_trained, model, None)
checkpoint_dir = os.path.split(pre_trained)[0]
return cls(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
device, loaders, checkpoint_dir,
eval_score_higher_is_better=eval_score_higher_is_better,
best_eval_score=best_eval_score,
num_iterations=num_iterations,
num_epoch=num_epoch,
max_num_epochs=max_num_epochs,
max_num_iterations=max_num_iterations,
validate_after_iters=validate_after_iters,
log_after_iters=log_after_iters,
validate_iters=validate_iters,
logger=logger)
def fit(self):
for epoch in range(self.num_epoch, self.max_num_epochs):
# train for one epoch
self.logger.info('Start Epoch: {}, lr = {}'.format(epoch, self.optimizer.param_groups[0]['lr']))
should_terminate = self.train(self.loaders['train'])
if epoch % 1 == 0:
# evaluate on validation set
eval_score = self.validate(self.loaders['val'])
# adjust learning rate if necessary
if isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step(eval_score)
elif not isinstance(self.scheduler,torch.optim.lr_scheduler.CyclicLR):
self.scheduler.step()
# log current learning rate in tensorboard
self._log_lr()
# remember best validation metric
is_best = self._is_best_eval_score(eval_score)
# save checkpoint
self._save_checkpoint(is_best)
if should_terminate:
break
self.num_epoch += 1
def train(self, train_loader):
"""Trains the model for 1 epoch.
Args:
train_loader (torch.utils.data.DataLoader): training data loader
Returns:
True if the training should be terminated immediately, False otherwise
"""
train_losses = utils.RunningAverage()
# train_eval_scores = utils.RunningAverage()
# sets the model in training mode
self.model.train()
for i, t in enumerate(tqdm(train_loader)):
# self.logger.info(
# f'Training iteration {self.num_iterations}. Batch {i}. Epoch [{self.num_epoch}/{self.max_num_epochs - 1}]')
#input, target, weight, GP = self._split_training_batch(t)
input, target, weight, slices, zyx, GP = self._split_training_batch(t)
#output, loss = self._forward_pass(input, target, weight, GP)
output, loss = self._forward_pass(input, target, zyx, weight = weight,slices = slices,GP = GP)
train_losses.update(loss.item(), self._batch_size(input))
# compute gradients and update parameters
self.optimizer.zero_grad()
loss.backward()
if isinstance(self.scheduler,torch.optim.lr_scheduler.CyclicLR):
self.scheduler.step()
self.optimizer.step()
#print(self.optimizer.param_groups[0]['lr'])
# if self.num_iterations % self.log_after_iters == 0:
# # if model contains final_activation layer for normalizing logits apply it, otherwise both
# # the evaluation metric as well as images in tensorboard will be incorrectly computed
# if hasattr(self.model, 'final_activation'):
# output = self.model.final_activation(output)
#
# # compute eval criterion
# eval_score = self.eval_criterion(output, target)
# train_eval_scores.update(eval_score.item(), self._batch_size(input))
#
# # log stats, params and images
# self.logger.info(
# f'Training stats. Loss: {train_losses.avg}. Evaluation score: {train_eval_scores.avg}')
# self._log_stats('train', train_losses.avg, train_eval_scores.avg)
# self._log_params()
# self._log_images(input, target, output)
if self.max_num_iterations < self.num_iterations:
self.logger.info(
f'Maximum number of iterations {self.max_num_iterations} exceeded. Finishing training...')
return True
self.num_iterations += 1
self.logger.info(f'Train Loss: {train_losses.avg}')
return False
def validate(self, val_loaders):
#self.logger.info('Validating...')
val_losses = utils.RunningAverage()
val_scores = utils.RunningAverage()
try:
# set the model in evaluation mode; final_activation doesn't need to be called explicitly
self.model.eval()
with torch.no_grad():
for val_loader in tqdm(val_loaders):
ds = val_loader.dataset
nD,nH,nW = ds.zz,ds.yy,ds.xx
nC = self.model.out_channels
# initialize the output prediction arrays
prediction_map = torch.zeros(size = (nC,nD,nH,nW),dtype = torch.float32).to(self.device)
# initialize normalization mask in order to average out probabilities of overlapping patches
normalization_mask = torch.zeros(size = (nC,nD,nH,nW),dtype = torch.float32).to(self.device)
gt_label = torch.from_numpy(ds.labels).long().to(self.device)
for i, t in enumerate(val_loader):
#self.logger.info(f'Validation iteration {i}')
#input, target, weight, GP = self._split_training_batch(t)
input, target, weight, slices, zyx, GP = self._split_training_batch(t)
# output, loss = self._forward_pass(input, target, weight, GP)
output, loss = self._forward_pass(input, target, zyx, weight = weight,slices = slices,GP = GP)
val_losses.update(loss.item(), self._batch_size(input))
output = self.model.final_activation(output)
for nB in range(output.size(0)):
slice_pred = (slice(0, nC,),) + slices[nB][1:] # remove channel slice and add class slice at beginning
prob = output[nB]
if self.eval_criterion.pad_width is not None:
prob, slice_pred = unpad_eval(prob, slice_pred, shape = (nD,nH,nW),pad_width= self.eval_criterion.pad_width)
prediction_map[slice_pred] += prob
normalization_mask[slice_pred] += 1.0
if self.validate_iters is not None and self.validate_iters <= i:
# stop validation
break
# one case merge predict
prediction_map = prediction_map / normalization_mask
eval_score = self.eval_criterion(prediction_map[None,:], gt_label[None,:])
val_scores.update(eval_score.item())
self._log_stats('val', val_losses.avg, val_scores.avg)
self.logger.info(f'Validation Loss: {val_losses.avg}. Evaluation score: {val_scores.avg}')
return val_scores.avg
finally:
# set back in training mode
self.model.train()
def _split_training_batch(self, t):
# def _move_to_device(input):
# if isinstance(input, tuple) or isinstance(input, list):
# return tuple([_move_to_device(x) for x in input])
# elif input is None:
# return None
# else:
# return input.to(self.device)
#
# t = _move_to_device(t)
# weight, GP = None,None
#
#
# if len(t) == 2:
# input, target = t
# return input, target, weight, None
# elif len(t)==3:
# input, target, weight = t
# return input, target, weight, None
# else:
# input, target, weight, GP = t
#
# return input, target, weight,GP
if len(t) == 5:
input, target,weight, slices,zyx = t
input = input.to(self.device)
target = target.to(self.device)
if weight is not None:
weight = weight.to(self.device)
return input, target, weight, slices, zyx,None
elif len(t)==6:
input, target,weight, slices,zyx,GP = t
input = input.to(self.device)
target = target.to(self.device)
if weight is not None:
weight = weight.to(self.device)
if GP is not None:
GP = GP.to(self.device)
return input, target, weight, slices, zyx,GP
# def _forward_pass(self, input, target, weight=None, GP = None):
# # forward pass
# if GP is None:
# output = self.model(input)
# else:
# output = self.model(input, GP = GP)
## target = target.float()
#
# if input.dim() == target.dim()+1:
# # expand-dims =false, set uint8 ->long
# target = target.long()
#
#
# if isinstance(self.loss_criterion,list):
# loss = 0.0
# for crit in self.loss_criterion:
# if weight is None:
# loss += crit(output, target)
# else:
# weight = weight.float()
# loss += crit(output, target, weight)
#
#
# else: # compute the loss
# if weight is None:
# loss = self.loss_criterion(output, target)
# else:
# weight = weight.float()
# loss = self.loss_criterion(output, target, weight)
#
# return output, loss
def _forward_pass(self, input, target, zyx, weight=None,slices=None,GP = None):
# forward pass
if GP is None:
output = self.model(input)
else:
output = self.model(input, GP = GP)
# target = target.float()
if input.dim() == target.dim()+1:
# expand-dims =false, set uint8 ->long
target = target.long()
if weight is not None:
weight = weight.float()
if isinstance(self.loss_criterion,list):
loss = 0.0
for crit in self.loss_criterion:
loss += crit(output, target,shape = zyx, weight = weight, slices = slices)
# if weight is None:
# loss += crit(output, target)
# else:
# weight = weight.float()
# loss += crit(output, target, weight)
else: # compute the loss
loss = self.loss_criterion(output, target,shape = zyx, weight = weight, slices = slices)
# if weight is None:
# loss = self.loss_criterion(output, target)
# else:
# weight = weight.float()
# loss = self.loss_criterion(output, target, weight)
return output, loss
def _is_best_eval_score(self, eval_score):
if self.eval_score_higher_is_better:
is_best = eval_score > self.best_eval_score
else:
is_best = eval_score < self.best_eval_score
if is_best:
self.logger.info(f'Saving new best evaluation metric: {eval_score}')
self.best_eval_score = eval_score
return is_best
def _save_checkpoint(self, is_best):
utils.save_checkpoint({
'epoch': self.num_epoch + 1,
'num_iterations': self.num_iterations,
'model_state_dict': self.model.state_dict(),
'best_eval_score': self.best_eval_score,
'eval_score_higher_is_better': self.eval_score_higher_is_better,
'optimizer_state_dict': self.optimizer.state_dict(),
'device': str(self.device),
'max_num_epochs': self.max_num_epochs,
'max_num_iterations': self.max_num_iterations,
'validate_after_iters': self.validate_after_iters,
'log_after_iters': self.log_after_iters,
'validate_iters': self.validate_iters
}, is_best, checkpoint_dir=self.checkpoint_dir,
logger=self.logger)
def _log_lr(self):
lr = self.optimizer.param_groups[0]['lr']
self.writer.add_scalar('learning_rate', lr, self.num_iterations)
def _log_stats(self, phase, loss_avg, eval_score_avg):
tag_value = {
f'{phase}_loss_avg': loss_avg,
f'{phase}_eval_score_avg': eval_score_avg
}
for tag, value in tag_value.items():
self.writer.add_scalar(tag, value, self.num_iterations)
def _log_params(self):
self.logger.info('Logging model parameters and gradients')
for name, value in self.model.named_parameters():
self.writer.add_histogram(name, value.data.cpu().numpy(), self.num_iterations)
self.writer.add_histogram(name + '/grad', value.grad.data.cpu().numpy(), self.num_iterations)
def _log_images(self, input, target, prediction):
inputs_map = {
'inputs': input,
'targets': target,
'predictions': prediction
}
img_sources = {}
for name, batch in inputs_map.items():
if isinstance(batch, list) or isinstance(batch, tuple):
for i, b in enumerate(batch):
img_sources[f'{name}{i}'] = b.data.cpu().numpy()
else:
img_sources[name] = batch.data.cpu().numpy()
for name, batch in img_sources.items():
for tag, image in self._images_from_batch(name, batch):
self.writer.add_image(tag, image, self.num_iterations, dataformats='HW')
def _images_from_batch(self, name, batch):
tag_template = '{}/batch_{}/channel_{}/slice_{}'
tagged_images = []
if batch.ndim == 5:
# NCDHW
slice_idx = batch.shape[2] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
for channel_idx in range(batch.shape[1]):
tag = tag_template.format(name, batch_idx, channel_idx, slice_idx)
img = batch[batch_idx, channel_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
else:
# batch has no channel dim: NDHW
slice_idx = batch.shape[1] // 2 # get the middle slice
for batch_idx in range(batch.shape[0]):
tag = tag_template.format(name, batch_idx, 0, slice_idx)
img = batch[batch_idx, slice_idx, ...]
tagged_images.append((tag, self._normalize_img(img)))
return tagged_images
@staticmethod
def _normalize_img(img):
return (img - np.min(img)) / np.ptp(img)
@staticmethod
def _batch_size(input):
if isinstance(input, list) or isinstance(input, tuple):
return input[0].size(0)
else:
return input.size(0)
|
[
"tqdm.tqdm",
"torch.no_grad",
"unet3d.utils.unpad_eval",
"numpy.ptp",
"numpy.min",
"torch.device",
"torch.zeros",
"os.path.split",
"os.path.join",
"torch.from_numpy"
] |
[((4275, 4305), 'os.path.split', 'os.path.split', (['checkpoint_path'], {}), '(checkpoint_path)\n', (4288, 4305), False, 'import os\n'), ((4430, 4459), 'torch.device', 'torch.device', (["state['device']"], {}), "(state['device'])\n", (4442, 4459), False, 'import torch\n'), ((5759, 5785), 'os.path.split', 'os.path.split', (['pre_trained'], {}), '(pre_trained)\n', (5772, 5785), False, 'import os\n'), ((8143, 8161), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (8147, 8161), False, 'from tqdm import tqdm\n'), ((21475, 21486), 'numpy.ptp', 'np.ptp', (['img'], {}), '(img)\n', (21481, 21486), True, 'import numpy as np\n'), ((3650, 3686), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""logs"""'], {}), "(checkpoint_dir, 'logs')\n", (3662, 3686), False, 'import os\n'), ((10778, 10793), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10791, 10793), False, 'import torch\n'), ((10829, 10846), 'tqdm.tqdm', 'tqdm', (['val_loaders'], {}), '(val_loaders)\n', (10833, 10846), False, 'from tqdm import tqdm\n'), ((21460, 21471), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (21466, 21471), True, 'import numpy as np\n'), ((11111, 11166), 'torch.zeros', 'torch.zeros', ([], {'size': '(nC, nD, nH, nW)', 'dtype': 'torch.float32'}), '(size=(nC, nD, nH, nW), dtype=torch.float32)\n', (11122, 11166), False, 'import torch\n'), ((11337, 11392), 'torch.zeros', 'torch.zeros', ([], {'size': '(nC, nD, nH, nW)', 'dtype': 'torch.float32'}), '(size=(nC, nD, nH, nW), dtype=torch.float32)\n', (11348, 11392), False, 'import torch\n'), ((12643, 12737), 'unet3d.utils.unpad_eval', 'unpad_eval', (['prob', 'slice_pred'], {'shape': '(nD, nH, nW)', 'pad_width': 'self.eval_criterion.pad_width'}), '(prob, slice_pred, shape=(nD, nH, nW), pad_width=self.\n eval_criterion.pad_width)\n', (12653, 12737), False, 'from unet3d.utils import unpad_eval\n'), ((11461, 11488), 'torch.from_numpy', 'torch.from_numpy', (['ds.labels'], {}), '(ds.labels)\n', (11477, 11488), False, 'import torch\n')]
|
from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter
from arcpy import SpatialReference, SearchCursor
from parseGeometry import getParseFunc
from json import dump
#really the only global
wgs84="GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984',SPHEROID['WGS_1984',6378137.0,298.257223563]],PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]];-400 -400 1000000000;-100000 10000;-100000 10000;8.98315284119522E-09;0.001;0.001;IsHighPrecision"
class parse:
def __init__(self,outFile,featureClass,fileType,includeGeometry, first=True):
self.outFile = outFile
self.fileType = fileType
#first we set put the local variables we'll need
[self.shp,self.shpType]=getShp(featureClass)
self.fields=listFields(featureClass)
self.oid=getOID(self.fields)
sr=SpatialReference()
sr.loadFromString(wgs84)
#the search cursor
self.rows=SearchCursor(featureClass,"",sr)
#don't want the shape field showing up as a property
del self.fields[self.shp]
self.first=first
self.status = statusMessage(featureClass)
#define the correct geometry function if we're exporting geometry
self.parseGeo = getParseFunc(self.shpType,includeGeometry)
self.i=0
if fileType=="geojson":
self.parse = self.parseGeoJSON
elif fileType=="csv":
self.parse = self.parseCSV
elif fileType=="json":
self.parse = self.parseJSON
elif fileType=="sqlite":
self.parse = self.parseSqlite
def cleanUp(self,row):
del row
del self.rows
return True
def parseCSV(self,row):
#more messages
self.status.update()
fc=parseProp(row,self.fields, self.shp)
if self.parseGeo:
try:
fc["geometry"]=self.parseGeo(row.getValue(self.shp))
except:
return
self.outFile[0].writerow(fc)
def parseGeoJSON(self,row):
#more messages
self.status.update()
fc={"type": "Feature"}
if self.parseGeo:
try:
fc["geometry"]=self.parseGeo(row.getValue(self.shp))
except:
return
else:
raise NameError("we need geometry for geojson")
fc["id"]=row.getValue(self.oid)
fc["properties"]=parseProp(row,self.fields, self.shp)
if fc["geometry"]=={}:
return
if self.first:
self.first=False
dump(fc,self.outFile)
else:
#if it isn't the first feature, add a comma
self.outFile.write(",")
dump(fc,self.outFile)
def parseJSON(self,row):
#more messages
self.status.update()
fc=parseProp(row,self.fields, self.shp)
if self.parseGeo:
try:
fc["geometry"]=self.parseGeo(row.getValue(self.shp))
except:
return
if self.first:
self.first=False
dump(fc,self.outFile)
else:
self.outFile.write(",")
dump(fc,self.outFile)
def parseSqlite(self,row):
#more messages
self.status.update()
fc=parseProp(row,self.fields, self.shp)
self.i=self.i+1
fc["OGC_FID"]=self.i
if self.parseGeo:
try:
fc["GEOMETRY"]=self.parseGeo(row.getValue(self.shp))
except:
return
keys = fc.keys()
values = fc.values()
[name,c,conn]=self.outFile
c.execute("""insert into {0}({1})
values({2})
""".format(name,", ".join(keys),makeInter(len(values))),values)
conn.commit()
|
[
"utilities.listFields",
"json.dump",
"parseGeometry.getParseFunc",
"utilities.parseProp",
"arcpy.SearchCursor",
"utilities.getShp",
"utilities.getOID",
"utilities.statusMessage",
"arcpy.SpatialReference"
] |
[((724, 744), 'utilities.getShp', 'getShp', (['featureClass'], {}), '(featureClass)\n', (730, 744), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((765, 789), 'utilities.listFields', 'listFields', (['featureClass'], {}), '(featureClass)\n', (775, 789), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((807, 826), 'utilities.getOID', 'getOID', (['self.fields'], {}), '(self.fields)\n', (813, 826), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((838, 856), 'arcpy.SpatialReference', 'SpatialReference', ([], {}), '()\n', (854, 856), False, 'from arcpy import SpatialReference, SearchCursor\n'), ((935, 969), 'arcpy.SearchCursor', 'SearchCursor', (['featureClass', '""""""', 'sr'], {}), "(featureClass, '', sr)\n", (947, 969), False, 'from arcpy import SpatialReference, SearchCursor\n'), ((1110, 1137), 'utilities.statusMessage', 'statusMessage', (['featureClass'], {}), '(featureClass)\n', (1123, 1137), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((1236, 1279), 'parseGeometry.getParseFunc', 'getParseFunc', (['self.shpType', 'includeGeometry'], {}), '(self.shpType, includeGeometry)\n', (1248, 1279), False, 'from parseGeometry import getParseFunc\n'), ((1780, 1817), 'utilities.parseProp', 'parseProp', (['row', 'self.fields', 'self.shp'], {}), '(row, self.fields, self.shp)\n', (1789, 1817), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((2419, 2456), 'utilities.parseProp', 'parseProp', (['row', 'self.fields', 'self.shp'], {}), '(row, self.fields, self.shp)\n', (2428, 2456), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((2825, 2862), 'utilities.parseProp', 'parseProp', (['row', 'self.fields', 'self.shp'], {}), '(row, self.fields, self.shp)\n', (2834, 2862), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((3282, 3319), 'utilities.parseProp', 'parseProp', (['row', 'self.fields', 'self.shp'], {}), '(row, self.fields, self.shp)\n', (3291, 3319), False, 'from utilities import listFields, getShp, getOID, statusMessage, parseProp, makeInter\n'), ((2570, 2592), 'json.dump', 'dump', (['fc', 'self.outFile'], {}), '(fc, self.outFile)\n', (2574, 2592), False, 'from json import dump\n'), ((2710, 2732), 'json.dump', 'dump', (['fc', 'self.outFile'], {}), '(fc, self.outFile)\n', (2714, 2732), False, 'from json import dump\n'), ((3081, 3103), 'json.dump', 'dump', (['fc', 'self.outFile'], {}), '(fc, self.outFile)\n', (3085, 3103), False, 'from json import dump\n'), ((3165, 3187), 'json.dump', 'dump', (['fc', 'self.outFile'], {}), '(fc, self.outFile)\n', (3169, 3187), False, 'from json import dump\n')]
|
import logging
import pytest
import kopf
# We assume that the handler filtering is tested in details elsewhere (for all handlers).
# Here, we only test if it is applied or not applied.
async def test_daemon_filtration_satisfied(
registry, settings, resource, dummy,
caplog, assert_logs, k8s_mocked, simulate_cycle):
caplog.set_level(logging.DEBUG)
@kopf.daemon(resource.group, resource.version, resource.plural, registry=registry, id='fn',
labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT},
annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT})
async def fn(**kwargs):
dummy.kwargs = kwargs
dummy.steps['called'].set()
finalizer = settings.persistence.finalizer
event_body = {'metadata': {'labels': {'a': 'value', 'b': '...'},
'annotations': {'x': 'value', 'y': '...'},
'finalizers': [finalizer]}}
await simulate_cycle(event_body)
await dummy.steps['called'].wait()
await dummy.wait_for_daemon_done()
@pytest.mark.parametrize('labels, annotations', [
# Annotations mismatching (but labels are matching):
({'a': 'value', 'b': '...'}, {'x': 'mismatching-value', 'b': '...'}, ), # x must be "value".
({'a': 'value', 'b': '...'}, {'x': 'value', 'y': '...', 'z': '...'}), # z must be absent
({'a': 'value', 'b': '...'}, {'x': 'value'}), # y must be present
# labels mismatching (but annotations are matching):
({'a': 'mismatching-value', 'b': '...'}, {'x': 'value', 'y': '...'}),
({'a': 'value', 'b': '...', 'c': '...'}, {'x': 'value', 'y': '...'}),
({'a': 'value'}, {'x': 'value', 'y': '...'}),
])
async def test_daemon_filtration_mismatched(
registry, settings, resource, mocker, labels, annotations,
caplog, assert_logs, k8s_mocked, simulate_cycle):
caplog.set_level(logging.DEBUG)
spawn_resource_daemons = mocker.patch('kopf.reactor.daemons.spawn_resource_daemons')
@kopf.daemon(resource.group, resource.version, resource.plural, registry=registry, id='fn',
labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT},
annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT})
async def fn(**kwargs):
pass
finalizer = settings.persistence.finalizer
event_body = {'metadata': {'labels': labels,
'annotations': annotations,
'finalizers': [finalizer]}}
await simulate_cycle(event_body)
assert spawn_resource_daemons.called
assert spawn_resource_daemons.call_args_list[0][1]['handlers'] == []
|
[
"pytest.mark.parametrize",
"kopf.daemon"
] |
[((1092, 1534), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""labels, annotations"""', "[({'a': 'value', 'b': '...'}, {'x': 'mismatching-value', 'b': '...'}), ({\n 'a': 'value', 'b': '...'}, {'x': 'value', 'y': '...', 'z': '...'}), ({\n 'a': 'value', 'b': '...'}, {'x': 'value'}), ({'a': 'mismatching-value',\n 'b': '...'}, {'x': 'value', 'y': '...'}), ({'a': 'value', 'b': '...',\n 'c': '...'}, {'x': 'value', 'y': '...'}), ({'a': 'value'}, {'x':\n 'value', 'y': '...'})]"], {}), "('labels, annotations', [({'a': 'value', 'b': '...'},\n {'x': 'mismatching-value', 'b': '...'}), ({'a': 'value', 'b': '...'}, {\n 'x': 'value', 'y': '...', 'z': '...'}), ({'a': 'value', 'b': '...'}, {\n 'x': 'value'}), ({'a': 'mismatching-value', 'b': '...'}, {'x': 'value',\n 'y': '...'}), ({'a': 'value', 'b': '...', 'c': '...'}, {'x': 'value',\n 'y': '...'}), ({'a': 'value'}, {'x': 'value', 'y': '...'})])\n", (1115, 1534), False, 'import pytest\n'), ((379, 604), 'kopf.daemon', 'kopf.daemon', (['resource.group', 'resource.version', 'resource.plural'], {'registry': 'registry', 'id': '"""fn"""', 'labels': "{'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}", 'annotations': "{'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}"}), "(resource.group, resource.version, resource.plural, registry=\n registry, id='fn', labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.\n ABSENT}, annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT})\n", (390, 604), False, 'import kopf\n'), ((2020, 2245), 'kopf.daemon', 'kopf.daemon', (['resource.group', 'resource.version', 'resource.plural'], {'registry': 'registry', 'id': '"""fn"""', 'labels': "{'a': 'value', 'b': kopf.PRESENT, 'c': kopf.ABSENT}", 'annotations': "{'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT}"}), "(resource.group, resource.version, resource.plural, registry=\n registry, id='fn', labels={'a': 'value', 'b': kopf.PRESENT, 'c': kopf.\n ABSENT}, annotations={'x': 'value', 'y': kopf.PRESENT, 'z': kopf.ABSENT})\n", (2031, 2245), False, 'import kopf\n')]
|
# -*- coding: utf-8 -*-
import h5py
import yaml
from collections import UserDict
from datetime import datetime
from numpy import string_
from contextlib import contextmanager
TYPEID = '_type_'
@contextmanager
def hdf_file(hdf, lazy=True, *args, **kwargs):
"""Context manager yields h5 file if hdf is str,
otherwise just yield hdf as is."""
if isinstance(hdf, str):
if not lazy:
with h5py.File(hdf, *args, **kwargs) as hdf:
yield hdf
else:
yield h5py.File(hdf, *args, **kwargs)
else:
yield hdf
def unpack_dataset(item):
"""Reconstruct a hdfdict dataset.
Only some special unpacking for yaml and datetime types.
Parameters
----------
item : h5py.Dataset
Returns
-------
value : Unpacked Data
"""
value = item[()]
if TYPEID in item.attrs:
if item.attrs[TYPEID].astype(str) == 'datetime':
if hasattr(value, '__iter__'):
value = [datetime.fromtimestamp(
ts) for ts in value]
else:
value = datetime.fromtimestamp(value)
if item.attrs[TYPEID].astype(str) == 'yaml':
value = yaml.safe_load(value.decode())
return value
class LazyHdfDict(UserDict):
"""Helps loading data only if values from the dict are requested.
This is done by reimplementing the __getitem__ method.
"""
def __init__(self, _h5file=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self._h5file = _h5file # used to close the file on deletion.
def __getitem__(self, key):
"""Returns item and loads dataset if needed."""
item = super().__getitem__(key)
if isinstance(item, h5py.Dataset):
item = unpack_dataset(item)
self.__setitem__(key, item)
return item
def unlazy(self):
"""Unpacks all datasets.
You can call dict(this_instance) then to get a real dict.
"""
load(self, lazy=False)
def close(self):
"""Closes the h5file if provided at initialization."""
if self._h5file and hasattr(self._h5file, 'close'):
self._h5file.close()
def __del__(self):
self.close()
def _ipython_key_completions_(self):
"""Returns a tuple of keys.
Special Method for ipython to get key completion
"""
return tuple(self.keys())
def load(hdf, lazy=True, unpacker=unpack_dataset, *args, **kwargs):
"""Returns a dictionary containing the
groups as keys and the datasets as values
from given hdf file.
Parameters
----------
hdf : string (path to file) or `h5py.File()` or `h5py.Group()`
lazy : bool
If True, the datasets are lazy loaded at the moment an item is requested.
upacker : callable
Unpack function gets `value` of type h5py.Dataset.
Must return the data you would like to have it in the returned dict.
Returns
-------
d : dict
The dictionary containing all groupnames as keys and
datasets as values.
"""
def _recurse(hdfobject, datadict):
for key, value in hdfobject.items():
if type(value) == h5py.Group or isinstance(value, LazyHdfDict):
if lazy:
datadict[key] = LazyHdfDict()
else:
datadict[key] = {}
datadict[key] = _recurse(value, datadict[key])
elif isinstance(value, h5py.Dataset):
if not lazy:
value = unpacker(value)
datadict[key] = value
return datadict
with hdf_file(hdf, lazy=lazy, *args, **kwargs) as hdf:
if lazy:
data = LazyHdfDict(_h5file=hdf)
else:
data = {}
return _recurse(hdf, data)
def pack_dataset(hdfobject, key, value):
"""Packs a given key value pair into a dataset in the given hdfobject."""
isdt = None
if isinstance(value, datetime):
value = value.timestamp()
isdt = True
if hasattr(value, '__iter__'):
if all(isinstance(i, datetime) for i in value):
value = [item.timestamp() for item in value]
isdt = True
try:
ds = hdfobject.create_dataset(name=key, data=value)
if isdt:
ds.attrs.create(
name=TYPEID,
data=string_("datetime"))
except TypeError:
# Obviously the data was not serializable. To give it
# a last try; serialize it to yaml
# and save it to the hdf file:
ds = hdfobject.create_dataset(
name=key,
data=string_(yaml.safe_dump(value))
)
ds.attrs.create(
name=TYPEID,
data=string_("yaml"))
# if this fails again, restructure your data!
def dump(data, hdf, packer=pack_dataset, *args, **kwargs):
"""Adds keys of given dict as groups and values as datasets
to the given hdf-file (by string or object) or group object.
Parameters
----------
data : dict
The dictionary containing only string keys and
data values or dicts again.
hdf : string (path to file) or `h5py.File()` or `h5py.Group()`
packer : callable
Callable gets `hdfobject, key, value` as input.
`hdfobject` is considered to be either a h5py.File or a h5py.Group.
`key` is the name of the dataset.
`value` is the dataset to be packed and accepted by h5py.
Returns
-------
hdf : obj
`h5py.Group()` or `h5py.File()` instance
"""
def _recurse(datadict, hdfobject):
for key, value in datadict.items():
if isinstance(key, tuple):
key = '_'.join((str(i) for i in key))
if isinstance(value, (dict, LazyHdfDict)):
hdfgroup = hdfobject.create_group(key)
_recurse(value, hdfgroup)
else:
packer(hdfobject, key, value)
with hdf_file(hdf, *args, **kwargs) as hdf:
_recurse(data, hdf)
return hdf
|
[
"yaml.safe_dump",
"h5py.File",
"numpy.string_",
"datetime.datetime.fromtimestamp"
] |
[((419, 450), 'h5py.File', 'h5py.File', (['hdf', '*args'], {}), '(hdf, *args, **kwargs)\n', (428, 450), False, 'import h5py\n'), ((517, 548), 'h5py.File', 'h5py.File', (['hdf', '*args'], {}), '(hdf, *args, **kwargs)\n', (526, 548), False, 'import h5py\n'), ((1105, 1134), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['value'], {}), '(value)\n', (1127, 1134), False, 'from datetime import datetime\n'), ((998, 1024), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1020, 1024), False, 'from datetime import datetime\n'), ((4432, 4451), 'numpy.string_', 'string_', (['"""datetime"""'], {}), "('datetime')\n", (4439, 4451), False, 'from numpy import string_\n'), ((4805, 4820), 'numpy.string_', 'string_', (['"""yaml"""'], {}), "('yaml')\n", (4812, 4820), False, 'from numpy import string_\n'), ((4705, 4726), 'yaml.safe_dump', 'yaml.safe_dump', (['value'], {}), '(value)\n', (4719, 4726), False, 'import yaml\n')]
|
import sys
import os
from PIL import Image, ImageFilter, EpsImagePlugin
#grab first and second argument
try:
in_folder = sys.argv[1]
out_folder = sys.argv[2]
os.makedirs(out_folder, exist_ok=True)
#loop through input folder
for filename in os.listdir(in_folder):
if filename.endswith('.jpg'):
img = Image.open(os.path.join(in_folder, filename))
filebase = os.path.splitext(filename)[0]
#convert all images to PNG and save to new folder
img.save (os.path.join(out_folder, filebase) + '.png', 'PNG')
print (f'Converting {in_folder} {filename} ---> {out_folder} {filebase}.png image.')
continue
else:
continue
except IndexError as err:
print(f'Please enter folder name! Error reason: {err} ')
|
[
"os.listdir",
"os.path.splitext",
"os.path.join",
"os.makedirs"
] |
[((175, 213), 'os.makedirs', 'os.makedirs', (['out_folder'], {'exist_ok': '(True)'}), '(out_folder, exist_ok=True)\n', (186, 213), False, 'import os\n'), ((262, 283), 'os.listdir', 'os.listdir', (['in_folder'], {}), '(in_folder)\n', (272, 283), False, 'import os\n'), ((349, 382), 'os.path.join', 'os.path.join', (['in_folder', 'filename'], {}), '(in_folder, filename)\n', (361, 382), False, 'import os\n'), ((405, 431), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (421, 431), False, 'import os\n'), ((506, 540), 'os.path.join', 'os.path.join', (['out_folder', 'filebase'], {}), '(out_folder, filebase)\n', (518, 540), False, 'import os\n')]
|
"""Unit tests for reviewboard.diffviewer.parser.DiffXParser."""
from djblets.testing.decorators import add_fixtures
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.diffviewer.parser import DiffXParser
from reviewboard.scmtools.core import HEAD, PRE_CREATION, UNKNOWN
from reviewboard.testing import TestCase
class DiffXParserTests(TestCase):
"""Unit tests for reviewboard.diffviewer.parser.DiffXParser."""
def test_parse_diff_with_basic_diff(self):
"""Testing DiffXParser.parse_diff with a basic DiffX file"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=156\n'
b'{\n'
b' "path": {\n'
b' "new": "message2.py",\n'
b' "old": "message.py"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=693, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'message.py',
'new': 'message2.py',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message2.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_complex_diff(self):
"""Testing DiffXParser.parse_diff with a complex DiffX file"""
parser = DiffXParser(
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 2)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
# Inspect change #1.
parsed_change = parsed_diff.changes[0]
self.assertEqual(parsed_change.commit_id,
b'a25e7b28af5e3184946068f432122c68c1a30b23')
self.assertIsNone(parsed_change.parent_commit_id,
b'b892d5f833474c59d7851ff46a4b0bd919017e97')
self.assertEqual(parsed_change.extra_data, {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
})
self.assertEqual(len(parsed_change.files), 1)
# Inspect change #1, file #1
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n')
self.assertEqual(parsed_file.orig_filename, b'file1')
self.assertEqual(parsed_file.orig_file_details,
b'c8839177d1a5605aa60abe69db95c84183f0eebe')
self.assertEqual(parsed_file.modified_filename, b'file1')
self.assertEqual(parsed_file.modified_file_details,
b'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
# Inspect change #2.
parsed_change = parsed_diff.changes[1]
self.assertEqual(parsed_change.commit_id,
b'91127b687f583184144161f432222748c1a30b23')
self.assertIsNone(parsed_change.parent_commit_id,
b'a25e7b28af5e3184946068f432122c68c1a30b23')
self.assertEqual(parsed_change.extra_data, {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
})
self.assertEqual(len(parsed_change.files), 2)
# Inspect change #2, file #1
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
})
self.assertEqual(
parsed_file.data,
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00')
self.assertEqual(parsed_file.orig_filename, b'file2')
self.assertEqual(parsed_file.orig_file_details,
b'281bac2b704617e807850e07e54bae3469f6a2e7')
self.assertEqual(parsed_file.modified_filename, b'file2')
self.assertEqual(parsed_file.modified_file_details,
b'389cc6b7ae5a659383eab5dfc253764eccf84732')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
# Inspect change #2, file #2
parsed_file = parsed_change.files[1]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
self.assertEqual(parsed_file.orig_filename, b'file3')
self.assertEqual(parsed_file.orig_file_details,
b'be089b7197974703c83682088a068bef3422c6c2')
self.assertEqual(parsed_file.modified_filename, b'file3')
self.assertEqual(parsed_file.modified_file_details,
b'0d4a0fb8d62b762a26e13591d06d93d79d61102f')
self.assertEqual(parsed_file.insert_count, 2)
self.assertEqual(parsed_file.delete_count, 1)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_path_string(self):
"""Testing DiffXParser.parse_diff with file's meta.path as single
string
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=103\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_old_only(self):
"""Testing DiffXParser.parse_diff with file's revision.old and no
revision.new
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=78\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'old': 'abc123',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, HEAD)
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_new_only(self):
"""Testing DiffXParser.parse_diff with file's revision.new and no
revision.old
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=78\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, UNKNOWN)
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_new_only_op_create(self):
"""Testing DiffXParser.parse_diff with file's revision.new and no
revision.old and op=create
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=98\n'
b'{\n'
b' "op": "create",\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'create',
'path': 'message.py',
'revision': {
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, PRE_CREATION)
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_binary_file(self):
"""Testing DiffXParser.parse_diff with binary file"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=104\n'
b'{\n'
b' "path": "message.bin",\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=23, type=binary, line_endings=unix\n'
b'This is a binary file.\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
'type': 'binary',
},
'metadata': {
'path': 'message.bin',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'This is a binary file.\n')
self.assertEqual(parsed_file.orig_filename, b'message.bin')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.bin')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_file_op_delete(self):
"""Testing DiffXParser.parse_diff with file op=delete"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=123\n'
b'{\n'
b' "op": "delete",\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=29, line_endings=unix\n'
b'@@ -1 +0,0 @@\n'
b'-Goodbye, file\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'delete',
'path': 'message.py',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'@@ -1 +0,0 @@\n'
b'-Goodbye, file\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.deleted)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_move(self):
"""Testing DiffXParser.parse_diff with file op=move"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=169\n'
b'{\n'
b' "op": "move",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'metadata': {
'op': 'move',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(parsed_file.data, b'')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.moved)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_move_modify(self):
"""Testing DiffXParser.parse_diff with file op=move-modify"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=176\n'
b'{\n'
b' "op": "move-modify",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'move-modify',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.moved)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_copy(self):
"""Testing DiffXParser.parse_diff with file op=copy"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=169\n'
b'{\n'
b' "op": "copy",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'metadata': {
'op': 'copy',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(parsed_file.data, b'')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.copied)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_op_copy_modify(self):
"""Testing DiffXParser.parse_diff with file op=copy-modify"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=176\n'
b'{\n'
b' "op": "copy-modify",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'copy-modify',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.copied)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_existing_stats(self):
"""Testing DiffXParser.parse_diff with existing file stats"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=225\n'
b'{\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' },\n'
b' "stats": {\n'
b' "deletions": 100,\n'
b' "insertions": 200\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
'stats': {
'deletions': 100,
'insertions': 200,
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 200)
self.assertEqual(parsed_file.delete_count, 100)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_type_symlink(self):
"""Testing DiffXParser.parse_diff with file type=symlink"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=212\n'
b'{\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' },\n'
b' "type": "symlink",\n'
b' "symlink target": "target/path/"\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
'type': 'symlink',
'symlink target': 'target/path/',
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.is_symlink)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_invalid_diffx(self):
"""Testing DiffXParser.parse_diff with invalid DiffX file contents"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'BLARGH\n'
)
message = (
"Error on line 2: Unexpected or improperly formatted header: %r"
% b'BLARGH'
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_invalid_type(self):
"""Testing DiffXParser.parse_diff with invalid file path type"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=94\n'
b'{\n'
b' "path": 123,\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = (
'Unexpected type %s for "path" key in change 1, file 1'
% int
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_dict_missing_old(self):
"""Testing DiffXParser.parse_diff with file path as dictionary with
missing "old" key
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=120\n'
b'{\n'
b' "path": {\n'
b' "new": "file"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = 'Missing the "path.old" key in change 1, file 1'
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_dict_missing_new(self):
"""Testing DiffXParser.parse_diff with file path as dictionary with
missing "new" key
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=120\n'
b'{\n'
b' "path": {\n'
b' "old": "file"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = 'Missing the "path.new" key in change 1, file 1'
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_revision_invalid_type(self):
"""Testing DiffXParser.parse_diff with invalid file revision type"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=44\n'
b'{\n'
b' "path": "file",\n'
b' "revision": 123\n'
b'}\n'
)
message = (
'Unexpected type %s for "revision" key in change 1, file 1'
% int
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_simple(self):
"""Testing DiffXParser.raw_diff with DiffSet and simple diff"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
}
diffset.save(update_fields=('extra_data',))
diffcommit = self.create_diffcommit(diffset=diffset,
with_diff=False)
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
save=False,
diff=(
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict '
b'does not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'message.py',
'new': 'message2.py',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=156\n'
b'{\n'
b' "path": {\n'
b' "new": "message2.py",\n'
b' "old": "message.py"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=693, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_complex(self):
"""Testing DiffXParser.raw_diff with DiffSet and complex diff"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
}
diffset.save(update_fields=('extra_data',))
# Create DiffCommit #1.
diffcommit = self.create_diffcommit(
diffset=diffset,
commit_id='a25e7b28af5e3184946068f432122c68c1a30b23',
with_diff=False)
diffcommit.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
}
diffcommit.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
# Create DiffCommit #2.
diffcommit = self.create_diffcommit(
diffset=diffset,
commit_id='91127b687f583184144161f432222748c1a30b23',
with_diff=False)
diffcommit.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
}
diffcommit.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file3',
source_revision='be089b7197974703c83682088a068bef3422c6c2',
dest_file='file3',
dest_detail='0d4a0fb8d62b762a26e13591d06d93d79d61102f',
save=False,
diff=(
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_no_diffcommits(self):
"""Testing DiffXParser.raw_diff with DiffSet and no DiffCommits"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
'change_extra_data': {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id':
'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
},
}
diffset.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': '<KEY>',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffcommit(self):
"""Testing DiffXParser.raw_diff with DiffCommit"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
}
diffset.save(update_fields=('extra_data',))
# Create DiffCommit #1.
diffcommit1 = self.create_diffcommit(
diffset=diffset,
commit_id='a25e7b28af5e3184946068f432122c68c1a30b23',
with_diff=False)
diffcommit1.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
}
diffcommit1.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit1,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
# Create DiffCommit #2. This one won't be used.
diffcommit2 = self.create_diffcommit(
diffset=diffset,
commit_id='91127b687f583184144161f432222748c1a30b23',
with_diff=False)
diffcommit2.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <<EMAIL>>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <<EMAIL>>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
}
diffcommit2.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit2,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit2,
source_file='file3',
source_revision='be089b7197974703c83682088a068bef3422c6c2',
dest_file='file3',
dest_detail='0d4a0fb8d62b762a26e13591d06d93d79d61102f',
save=False,
diff=(
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffcommit1),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n')
self.assertEqual(
parser.raw_diff(diffcommit2),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <<EMAIL>>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <<EMAIL>>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
|
[
"djblets.testing.decorators.add_fixtures",
"reviewboard.diffviewer.parser.DiffXParser"
] |
[((65489, 65520), 'djblets.testing.decorators.add_fixtures', 'add_fixtures', (["['test_scmtools']"], {}), "(['test_scmtools'])\n", (65501, 65520), False, 'from djblets.testing.decorators import add_fixtures\n'), ((69688, 69719), 'djblets.testing.decorators.add_fixtures', 'add_fixtures', (["['test_scmtools']"], {}), "(['test_scmtools'])\n", (69700, 69719), False, 'from djblets.testing.decorators import add_fixtures\n'), ((83110, 83141), 'djblets.testing.decorators.add_fixtures', 'add_fixtures', (["['test_scmtools']"], {}), "(['test_scmtools'])\n", (83122, 83141), False, 'from djblets.testing.decorators import add_fixtures\n'), ((92498, 92529), 'djblets.testing.decorators.add_fixtures', 'add_fixtures', (["['test_scmtools']"], {}), "(['test_scmtools'])\n", (92510, 92529), False, 'from djblets.testing.decorators import add_fixtures\n'), ((575, 1613), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=156\\n{\\n "path": {\\n "new": "message2.py",\\n "old": "message.py"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n#...diff: length=693, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message2.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=156\\n{\\n "path": {\\n "new": "message2.py",\\n "old": "message.py"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n#...diff: length=693, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message2.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\'\n )\n', (586, 1613), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((5357, 9932), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-16, version=1.0\\n#.preamble: encoding=ascii, indent=2, length=36, line_endings=dos, mimetype=text/plain\\n This is the file-level preamble.\\r\\n#.meta: encoding=utf-32, format=json, length=96\\n\\xff\\xfe\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00k\\x00\\x00\\x00e\\x00\\x00\\x00y\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00v\\x00\\x00\\x00a\\x00\\x00\\x00l\\x00\\x00\\x00u\\x00\\x00\\x00e\\x00\\x00\\x00"\\x00\\x00\\x00\\n\\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00#.change:\\n#..preamble: indent=2, length=14, line_endings=unix, mimetype=text/markdown\\n \\xff\\xfet\\x00e\\x00s\\x00t\\x00\\n\\x00#..meta: encoding=utf-8, format=json, length=302\\n{\\n "author": "Test User <<EMAIL>>",\\n "author date": "2021-06-01T13:12:06-07:00",\\n "committer": "Test User <<EMAIL>>",\\n "date": "2021-06-02T19:26:31-07:00",\\n "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\\n "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\\n}\\n#..file:\\n#...meta: encoding=latin1, format=json, length=166\\n{\\n "path": "file1",\\n "revision": {\\n "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\\n "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\\n }\\n}\\n#...diff: length=60, line_endings=unix\\n--- /file1\\n+++ /file1\\n@@ -498,7 +498,7 @@\\n ... diff content\\n#.change:\\n#..preamble: encoding=utf-8, indent=4, length=56, line_endings=unix\\n Summary of commit #2\\n \\n Here\\\'s a description.\\n#..meta: encoding=utf-8, format=json, length=302\\n{\\n "author": "Test User <<EMAIL>>",\\n "author date": "2021-06-01T19:46:22-07:00",\\n "committer": "Test User <<EMAIL>>",\\n "date": "2021-06-02T19:46:25-07:00",\\n "id": "91127b687f583184144161f432222748c1a30b23",\\n "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\\n}\\n#..file:\\n#...meta: encoding=utf-32, format=json, length=668\\n\\xff\\xfe\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00p\\x00\\x00\\x00a\\x00\\x00\\x00t\\x00\\x00\\x00h\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00f\\x00\\x00\\x00i\\x00\\x00\\x00l\\x00\\x00\\x00e\\x00\\x00\\x002\\x00\\x00\\x00"\\x00\\x00\\x00,\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00r\\x00\\x00\\x00e\\x00\\x00\\x00v\\x00\\x00\\x00i\\x00\\x00\\x00s\\x00\\x00\\x00i\\x00\\x00\\x00o\\x00\\x00\\x00n\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00n\\x00\\x00\\x00e\\x00\\x00\\x00w\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x003\\x00\\x00\\x008\\x00\\x00\\x009\\x00\\x00\\x00c\\x00\\x00\\x00c\\x00\\x00\\x006\\x00\\x00\\x00b\\x00\\x00\\x007\\x00\\x00\\x00a\\x00\\x00\\x00e\\x00\\x00\\x005\\x00\\x00\\x00a\\x00\\x00\\x006\\x00\\x00\\x005\\x00\\x00\\x009\\x00\\x00\\x003\\x00\\x00\\x008\\x00\\x00\\x003\\x00\\x00\\x00e\\x00\\x00\\x00a\\x00\\x00\\x00b\\x00\\x00\\x005\\x00\\x00\\x00d\\x00\\x00\\x00f\\x00\\x00\\x00c\\x00\\x00\\x002\\x00\\x00\\x005\\x00\\x00\\x003\\x00\\x00\\x007\\x00\\x00\\x006\\x00\\x00\\x004\\x00\\x00\\x00e\\x00\\x00\\x00c\\x00\\x00\\x00c\\x00\\x00\\x00f\\x00\\x00\\x008\\x00\\x00\\x004\\x00\\x00\\x007\\x00\\x00\\x003\\x00\\x00\\x002\\x00\\x00\\x00"\\x00\\x00\\x00,\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00o\\x00\\x00\\x00l\\x00\\x00\\x00d\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x002\\x00\\x00\\x008\\x00\\x00\\x001\\x00\\x00\\x00b\\x00\\x00\\x00a\\x00\\x00\\x00c\\x00\\x00\\x002\\x00\\x00\\x00b\\x00\\x00\\x007\\x00\\x00\\x000\\x00\\x00\\x004\\x00\\x00\\x006\\x00\\x00\\x001\\x00\\x00\\x007\\x00\\x00\\x00e\\x00\\x00\\x008\\x00\\x00\\x000\\x00\\x00\\x007\\x00\\x00\\x008\\x00\\x00\\x005\\x00\\x00\\x000\\x00\\x00\\x00e\\x00\\x00\\x000\\x00\\x00\\x007\\x00\\x00\\x00e\\x00\\x00\\x005\\x00\\x00\\x004\\x00\\x00\\x00b\\x00\\x00\\x00a\\x00\\x00\\x00e\\x00\\x00\\x003\\x00\\x00\\x004\\x00\\x00\\x006\\x00\\x00\\x009\\x00\\x00\\x00f\\x00\\x00\\x006\\x00\\x00\\x00a\\x00\\x00\\x002\\x00\\x00\\x00e\\x00\\x00\\x007\\x00\\x00\\x00"\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00#...diff: encoding=utf-16, length=22, line_endings=unix\\n\\xff\\xfe \\x00.\\x00.\\x00.\\x00 \\x00d\\x00i\\x00f\\x00f\\x00\\n\\x00#..file:\\n#...meta: encoding=utf-8, format=json, length=166\\n{\\n "path": "file3",\\n "revision": {\\n "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\\n "old": "be089b7197974703c83682088a068bef3422c6c2"\\n }\\n}\\n#...diff: length=87, line_endings=dos\\n--- a/file3\\r\\n+++ b/file3\\r\\n@@ -258,1 +258,2 @@\\r\\n- old line\\r\\n+ new line 1\\r\\n+ new line 2\\r\\n\''], {}), '(\n b\'#diffx: encoding=utf-16, version=1.0\\n#.preamble: encoding=ascii, indent=2, length=36, line_endings=dos, mimetype=text/plain\\n This is the file-level preamble.\\r\\n#.meta: encoding=utf-32, format=json, length=96\\n\\xff\\xfe\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00k\\x00\\x00\\x00e\\x00\\x00\\x00y\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00v\\x00\\x00\\x00a\\x00\\x00\\x00l\\x00\\x00\\x00u\\x00\\x00\\x00e\\x00\\x00\\x00"\\x00\\x00\\x00\\n\\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00#.change:\\n#..preamble: indent=2, length=14, line_endings=unix, mimetype=text/markdown\\n \\xff\\xfet\\x00e\\x00s\\x00t\\x00\\n\\x00#..meta: encoding=utf-8, format=json, length=302\\n{\\n "author": "Test User <<EMAIL>>",\\n "author date": "2021-06-01T13:12:06-07:00",\\n "committer": "Test User <<EMAIL>>",\\n "date": "2021-06-02T19:26:31-07:00",\\n "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\\n "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\\n}\\n#..file:\\n#...meta: encoding=latin1, format=json, length=166\\n{\\n "path": "file1",\\n "revision": {\\n "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\\n "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\\n }\\n}\\n#...diff: length=60, line_endings=unix\\n--- /file1\\n+++ /file1\\n@@ -498,7 +498,7 @@\\n ... diff content\\n#.change:\\n#..preamble: encoding=utf-8, indent=4, length=56, line_endings=unix\\n Summary of commit #2\\n \\n Here\\\'s a description.\\n#..meta: encoding=utf-8, format=json, length=302\\n{\\n "author": "Test User <<EMAIL>>",\\n "author date": "2021-06-01T19:46:22-07:00",\\n "committer": "Test User <<EMAIL>>",\\n "date": "2021-06-02T19:46:25-07:00",\\n "id": "91127b687f583184144161f432222748c1a30b23",\\n "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\\n}\\n#..file:\\n#...meta: encoding=utf-32, format=json, length=668\\n\\xff\\xfe\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00p\\x00\\x00\\x00a\\x00\\x00\\x00t\\x00\\x00\\x00h\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00f\\x00\\x00\\x00i\\x00\\x00\\x00l\\x00\\x00\\x00e\\x00\\x00\\x002\\x00\\x00\\x00"\\x00\\x00\\x00,\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00r\\x00\\x00\\x00e\\x00\\x00\\x00v\\x00\\x00\\x00i\\x00\\x00\\x00s\\x00\\x00\\x00i\\x00\\x00\\x00o\\x00\\x00\\x00n\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00{\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00n\\x00\\x00\\x00e\\x00\\x00\\x00w\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x003\\x00\\x00\\x008\\x00\\x00\\x009\\x00\\x00\\x00c\\x00\\x00\\x00c\\x00\\x00\\x006\\x00\\x00\\x00b\\x00\\x00\\x007\\x00\\x00\\x00a\\x00\\x00\\x00e\\x00\\x00\\x005\\x00\\x00\\x00a\\x00\\x00\\x006\\x00\\x00\\x005\\x00\\x00\\x009\\x00\\x00\\x003\\x00\\x00\\x008\\x00\\x00\\x003\\x00\\x00\\x00e\\x00\\x00\\x00a\\x00\\x00\\x00b\\x00\\x00\\x005\\x00\\x00\\x00d\\x00\\x00\\x00f\\x00\\x00\\x00c\\x00\\x00\\x002\\x00\\x00\\x005\\x00\\x00\\x003\\x00\\x00\\x007\\x00\\x00\\x006\\x00\\x00\\x004\\x00\\x00\\x00e\\x00\\x00\\x00c\\x00\\x00\\x00c\\x00\\x00\\x00f\\x00\\x00\\x008\\x00\\x00\\x004\\x00\\x00\\x007\\x00\\x00\\x003\\x00\\x00\\x002\\x00\\x00\\x00"\\x00\\x00\\x00,\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x00o\\x00\\x00\\x00l\\x00\\x00\\x00d\\x00\\x00\\x00"\\x00\\x00\\x00:\\x00\\x00\\x00 \\x00\\x00\\x00"\\x00\\x00\\x002\\x00\\x00\\x008\\x00\\x00\\x001\\x00\\x00\\x00b\\x00\\x00\\x00a\\x00\\x00\\x00c\\x00\\x00\\x002\\x00\\x00\\x00b\\x00\\x00\\x007\\x00\\x00\\x000\\x00\\x00\\x004\\x00\\x00\\x006\\x00\\x00\\x001\\x00\\x00\\x007\\x00\\x00\\x00e\\x00\\x00\\x008\\x00\\x00\\x000\\x00\\x00\\x007\\x00\\x00\\x008\\x00\\x00\\x005\\x00\\x00\\x000\\x00\\x00\\x00e\\x00\\x00\\x000\\x00\\x00\\x007\\x00\\x00\\x00e\\x00\\x00\\x005\\x00\\x00\\x004\\x00\\x00\\x00b\\x00\\x00\\x00a\\x00\\x00\\x00e\\x00\\x00\\x003\\x00\\x00\\x004\\x00\\x00\\x006\\x00\\x00\\x009\\x00\\x00\\x00f\\x00\\x00\\x006\\x00\\x00\\x00a\\x00\\x00\\x002\\x00\\x00\\x00e\\x00\\x00\\x007\\x00\\x00\\x00"\\x00\\x00\\x00\\n\\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00 \\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00}\\x00\\x00\\x00\\n\\x00\\x00\\x00#...diff: encoding=utf-16, length=22, line_endings=unix\\n\\xff\\xfe \\x00.\\x00.\\x00.\\x00 \\x00d\\x00i\\x00f\\x00f\\x00\\n\\x00#..file:\\n#...meta: encoding=utf-8, format=json, length=166\\n{\\n "path": "file3",\\n "revision": {\\n "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\\n "old": "be089b7197974703c83682088a068bef3422c6c2"\\n }\\n}\\n#...diff: length=87, line_endings=dos\\n--- a/file3\\r\\n+++ b/file3\\r\\n@@ -258,1 +258,2 @@\\r\\n- old line\\r\\n+ new line 1\\r\\n+ new line 2\\r\\n\'\n )\n', (5368, 9932), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((20491, 21472), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=103\\n{\\n "path": "message.py",\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=103\\n{\\n "path": "message.py",\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\'\n )\n', (20502, 21472), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((25105, 26059), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=78\\n{\\n "path": "message.py",\\n "revision": {\\n "old": "abc123"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=78\\n{\\n "path": "message.py",\\n "revision": {\\n "old": "abc123"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\'\n )\n', (25116, 26059), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((29630, 30584), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=78\\n{\\n "path": "message.py",\\n "revision": {\\n "new": "def456"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=78\\n{\\n "path": "message.py",\\n "revision": {\\n "new": "def456"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\'\n )\n', (29641, 30584), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((34182, 35157), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=98\\n{\\n "op": "create",\\n "path": "message.py",\\n "revision": {\\n "new": "def456"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=98\\n{\\n "op": "create",\\n "path": "message.py",\\n "revision": {\\n "new": "def456"\\n }\\n}\\n#...diff: length=692, line_endings=unix\\n--- message.py\\t2021-07-02 13:20:12.285875444 -0700\\n+++ message.py\\t2021-07-02 13:21:31.428383873 -0700\\n@@ -164,10 +164,10 @@\\n not isinstance(headers, MultiValueDict)):\\n # Instantiating a MultiValueDict from a dict does not ensure that\\n # values are lists, so we have to ensure that ourselves.\\n- headers = MultiValueDict(dict(\\n- (key, [value])\\n- for key, value in six.iteritems(headers)\\n- ))\\n+ headers = MultiValueDict({\\n+ key: [value]\\n+ for key, value in headers.items()\\n+ })\\n \\n if in_reply_to:\\n headers["In-Reply-To"] = in_reply_to\\n\'\n )\n', (34193, 35157), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((38737, 39044), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=104\\n{\\n "path": "message.bin",\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=23, type=binary, line_endings=unix\\nThis is a binary file.\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=104\\n{\\n "path": "message.bin",\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=23, type=binary, line_endings=unix\\nThis is a binary file.\\n\'\n )\n', (38748, 39044), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((41409, 41730), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=123\\n{\\n "op": "delete",\\n "path": "message.py",\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=29, line_endings=unix\\n@@ -1 +0,0 @@\\n-Goodbye, file\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=123\\n{\\n "op": "delete",\\n "path": "message.py",\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=29, line_endings=unix\\n@@ -1 +0,0 @@\\n-Goodbye, file\\n\'\n )\n', (41420, 41730), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((44135, 44434), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=169\\n{\\n "op": "move",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=169\\n{\\n "op": "move",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n\'\n )\n', (44146, 44434), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((46775, 47184), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=176\\n{\\n "op": "move-modify",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=176\\n{\\n "op": "move-modify",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\'\n )\n', (46786, 47184), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((49863, 50162), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=169\\n{\\n "op": "copy",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=169\\n{\\n "op": "copy",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n\'\n )\n', (49874, 50162), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((52503, 52912), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=176\\n{\\n "op": "copy-modify",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=176\\n{\\n "op": "copy-modify",\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\'\n )\n', (52514, 52912), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((55605, 56066), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=225\\n{\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n },\\n "stats": {\\n "deletions": 100,\\n "insertions": 200\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=225\\n{\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n },\\n "stats": {\\n "deletions": 100,\\n "insertions": 200\\n }\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\'\n )\n', (55616, 56066), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((58906, 59352), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=212\\n{\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n },\\n "type": "symlink",\\n "symlink target": "target/path/"\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=212\\n{\\n "path": {\\n "old": "old-name",\\n "new": "new-name"\\n },\\n "revision": {\\n "old": "abc123",\\n "new": "def456"\\n },\\n "type": "symlink",\\n "symlink target": "target/path/"\\n}\\n#...diff: length=58, line_endings=unix\\n--- old-name\\n+++ new-name\\n@@ -1 +1 @@\\n-old line\\n+new line\\n\'\n )\n', (58917, 59352), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((62120, 62181), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (["b'#diffx: encoding=utf-8, version=1.0\\nBLARGH\\n'"], {}), "(b'#diffx: encoding=utf-8, version=1.0\\nBLARGH\\n')\n", (62131, 62181), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((62595, 62814), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=94\\n{\\n "path": 123,\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=94\\n{\\n "path": 123,\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\'\n )\n', (62606, 62814), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((63392, 63640), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=120\\n{\\n "path": {\\n "new": "file"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=120\\n{\\n "path": {\\n "new": "file"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\'\n )\n', (63403, 63640), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((64201, 64449), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=120\\n{\\n "path": {\\n "old": "file"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=120\\n{\\n "path": {\\n "old": "file"\\n },\\n "revision": {\\n "new": "def456",\\n "old": "abc123"\\n }\\n}\\n\'\n )\n', (64212, 64449), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((64973, 65139), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (['b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=44\\n{\\n "path": "file",\\n "revision": 123\\n}\\n\''], {}), '(\n b\'#diffx: encoding=utf-8, version=1.0\\n#.change:\\n#..file:\\n#...meta: format=json, length=44\\n{\\n "path": "file",\\n "revision": 123\\n}\\n\'\n )\n', (64984, 65139), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((68044, 68060), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (["b''"], {}), "(b'')\n", (68055, 68060), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((76617, 76633), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (["b''"], {}), "(b'')\n", (76628, 76633), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((87355, 87371), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (["b''"], {}), "(b'')\n", (87366, 87371), False, 'from reviewboard.diffviewer.parser import DiffXParser\n'), ((99441, 99457), 'reviewboard.diffviewer.parser.DiffXParser', 'DiffXParser', (["b''"], {}), "(b'')\n", (99452, 99457), False, 'from reviewboard.diffviewer.parser import DiffXParser\n')]
|
# coding: utf-8
from __future__ import absolute_import
"""
Copyright 2020 Jackpine Technologies Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
cons3rt - Copyright Jackpine Technologies Corp.
NOTE: This file is auto-generated. Do not edit the file manually.
"""
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from cons3rt.api_client import ApiClient
from cons3rt.exceptions import (
ApiTypeError,
ApiValueError
)
__author__ = 'Jackpine Technologies Corporation'
__copyright__ = 'Copyright 2020, Jackpine Technologies Corporation'
__license__ = 'Apache 2.0',
__version__ = '1.0.0'
__maintainer__ = 'API Support'
__email__ = '<EMAIL>'
class StorageApi(object):
"""NOTE: This class is auto-generated. Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_bucket(self, bucket, **kwargs): # noqa: E501
"""Create Storage Bucket # noqa: E501
Creates a storage bucket as defined by the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_bucket(bucket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Bucket bucket: The bucket creation information (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_bucket_with_http_info(bucket, **kwargs) # noqa: E501
def create_bucket_with_http_info(self, bucket, **kwargs): # noqa: E501
"""Create Storage Bucket # noqa: E501
Creates a storage bucket as defined by the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_bucket_with_http_info(bucket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Bucket bucket: The bucket creation information (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['bucket'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'bucket' is set
if self.api_client.client_side_validation and ('bucket' not in local_var_params or # noqa: E501
local_var_params['bucket'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bucket` when calling `create_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bucket' in local_var_params:
body_params = local_var_params['bucket']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_bucket(self, id, **kwargs): # noqa: E501
"""Delete Storage Buckets # noqa: E501
Deletes an existing storage bucket by identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_bucket(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_bucket_with_http_info(id, **kwargs) # noqa: E501
def delete_bucket_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete Storage Buckets # noqa: E501
Deletes an existing storage bucket by identifier # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_bucket_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(bool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def download_file_from_bucket(self, id, file_name, **kwargs): # noqa: E501
"""Download File From Bucket # noqa: E501
Downloads the Asset in the form of a zip file. Download is only available for importable Asset types (i.e. Software, Test, and Container).<br> <br> Based on the background flag, the download will be done in the foreground (false), background (true), or in a location as determined by Asset size (default).<br> <br> If the background flag is set to true (or if no value for the background flag is provided), and the Asset is larger than the site threshold, the Asset will be prepared for download in the background.In that case, an email with a link to retrieve the Asset will be sent.If the Asset is larger than download threshold, it will be prepared for download in the background, and an email with a download link will be sent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_file_from_bucket(id, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param str file_name: The filename within the bucket to download (required)
:param bool background: Force the download to happen in the background
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.download_file_from_bucket_with_http_info(id, file_name, **kwargs) # noqa: E501
def download_file_from_bucket_with_http_info(self, id, file_name, **kwargs): # noqa: E501
"""Download File From Bucket # noqa: E501
Downloads the Asset in the form of a zip file. Download is only available for importable Asset types (i.e. Software, Test, and Container).<br> <br> Based on the background flag, the download will be done in the foreground (false), background (true), or in a location as determined by Asset size (default).<br> <br> If the background flag is set to true (or if no value for the background flag is provided), and the Asset is larger than the site threshold, the Asset will be prepared for download in the background.In that case, an email with a link to retrieve the Asset will be sent.If the Asset is larger than download threshold, it will be prepared for download in the background, and an email with a download link will be sent. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_file_from_bucket_with_http_info(id, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param str file_name: The filename within the bucket to download (required)
:param bool background: Force the download to happen in the background
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(bool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'file_name', 'background'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method download_file_from_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `download_file_from_bucket`") # noqa: E501
# verify the required parameter 'file_name' is set
if self.api_client.client_side_validation and ('file_name' not in local_var_params or # noqa: E501
local_var_params['file_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `file_name` when calling `download_file_from_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
if 'file_name' in local_var_params and local_var_params['file_name'] is not None: # noqa: E501
query_params.append(('fileName', local_var_params['file_name'])) # noqa: E501
if 'background' in local_var_params and local_var_params['background'] is not None: # noqa: E501
query_params.append(('background', local_var_params['background'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bucket(self, id, **kwargs): # noqa: E501
"""Retrieve Storage Buckets # noqa: E501
returns a storage bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_bucket(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_bucket_with_http_info(id, **kwargs) # noqa: E501
def get_bucket_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieve Storage Buckets # noqa: E501
returns a storage bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_bucket_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Bucket, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_bucket_listing(self, id, **kwargs): # noqa: E501
"""List Bucket Contents # noqa: E501
lists all files contents for an existing bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_bucket_listing(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_bucket_listing_with_http_info(id, **kwargs) # noqa: E501
def get_bucket_listing_with_http_info(self, id, **kwargs): # noqa: E501
"""List Bucket Contents # noqa: E501
lists all files contents for an existing bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_bucket_listing_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_bucket_listing" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_bucket_listing`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}/listing', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_available_clouds_for_buckets(self, **kwargs): # noqa: E501
"""List Clouds Available For Bucket Creation # noqa: E501
returns a collection of clouds accessible to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_available_clouds_for_buckets(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int maxresults: Maximum number of results to return
:param int page: Requested page number
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[MinimalCloud]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_available_clouds_for_buckets_with_http_info(**kwargs) # noqa: E501
def list_available_clouds_for_buckets_with_http_info(self, **kwargs): # noqa: E501
"""List Clouds Available For Bucket Creation # noqa: E501
returns a collection of clouds accessible to the user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_available_clouds_for_buckets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int maxresults: Maximum number of results to return
:param int page: Requested page number
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[MinimalCloud], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['maxresults', 'page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_available_clouds_for_buckets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'maxresults' in local_var_params and local_var_params['maxresults'] is not None: # noqa: E501
query_params.append(('maxresults', local_var_params['maxresults'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/clouds', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[MinimalCloud]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_buckets(self, **kwargs): # noqa: E501
"""List Storage Buckets # noqa: E501
returns a collection of storage buckets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_buckets(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str cloud: ID of the cloud
:param bool project: Include project buckets
:param int maxresults: Maximum number of results to return
:param int page: Requested page number
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[Bucket]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_buckets_with_http_info(**kwargs) # noqa: E501
def list_buckets_with_http_info(self, **kwargs): # noqa: E501
"""List Storage Buckets # noqa: E501
returns a collection of storage buckets # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_buckets_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str cloud: ID of the cloud
:param bool project: Include project buckets
:param int maxresults: Maximum number of results to return
:param int page: Requested page number
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[Bucket], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['cloud', 'project', 'maxresults', 'page'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_buckets" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'cloud' in local_var_params and local_var_params['cloud'] is not None: # noqa: E501
query_params.append(('cloud', local_var_params['cloud'])) # noqa: E501
if 'project' in local_var_params and local_var_params['project'] is not None: # noqa: E501
query_params.append(('project', local_var_params['project'])) # noqa: E501
if 'maxresults' in local_var_params and local_var_params['maxresults'] is not None: # noqa: E501
query_params.append(('maxresults', local_var_params['maxresults'])) # noqa: E501
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Bucket]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def submit_bucket_resource_to_submission_service(self, id, submission_service_id, file_name, **kwargs): # noqa: E501
"""Submit Bucket Resource to the Project's Submission Service # noqa: E501
Publishes a resource in the specified bucket to the requested Submission Service.<br> <br> The requested Project Submission Service will act as a template. Credentials provided when submitting to the Service will override the Project Submission Service's credentials. However, neither the Host nor Port of the Service can be overridden.<br> <br> If the Service's endpoint is an SFTP Host, the Submission will only be able to override the remote path (i.e. if one has not already been defined in this default Submission Service).<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_bucket_resource_to_submission_service(id, submission_service_id, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of the bucket to publish a resource from (required)
:param str submission_service_id: ID of project submission service (required)
:param str file_name: The filename within the bucket to download (required)
:param InputSubmissionServiceForAssetSubmission input_submission_service_for_asset_submission: Submission service override values
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: bool
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_bucket_resource_to_submission_service_with_http_info(id, submission_service_id, file_name, **kwargs) # noqa: E501
def submit_bucket_resource_to_submission_service_with_http_info(self, id, submission_service_id, file_name, **kwargs): # noqa: E501
"""Submit Bucket Resource to the Project's Submission Service # noqa: E501
Publishes a resource in the specified bucket to the requested Submission Service.<br> <br> The requested Project Submission Service will act as a template. Credentials provided when submitting to the Service will override the Project Submission Service's credentials. However, neither the Host nor Port of the Service can be overridden.<br> <br> If the Service's endpoint is an SFTP Host, the Submission will only be able to override the remote path (i.e. if one has not already been defined in this default Submission Service).<br> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_bucket_resource_to_submission_service_with_http_info(id, submission_service_id, file_name, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of the bucket to publish a resource from (required)
:param str submission_service_id: ID of project submission service (required)
:param str file_name: The filename within the bucket to download (required)
:param InputSubmissionServiceForAssetSubmission input_submission_service_for_asset_submission: Submission service override values
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(bool, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'submission_service_id', 'file_name', 'input_submission_service_for_asset_submission'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_bucket_resource_to_submission_service" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `submit_bucket_resource_to_submission_service`") # noqa: E501
# verify the required parameter 'submission_service_id' is set
if self.api_client.client_side_validation and ('submission_service_id' not in local_var_params or # noqa: E501
local_var_params['submission_service_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `submission_service_id` when calling `submit_bucket_resource_to_submission_service`") # noqa: E501
# verify the required parameter 'file_name' is set
if self.api_client.client_side_validation and ('file_name' not in local_var_params or # noqa: E501
local_var_params['file_name'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `file_name` when calling `submit_bucket_resource_to_submission_service`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
if 'submission_service_id' in local_var_params:
path_params['submission_service_id'] = local_var_params['submission_service_id'] # noqa: E501
query_params = []
if 'file_name' in local_var_params and local_var_params['file_name'] is not None: # noqa: E501
query_params.append(('fileName', local_var_params['file_name'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'input_submission_service_for_asset_submission' in local_var_params:
body_params = local_var_params['input_submission_service_for_asset_submission']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}/submit/{submission_service_id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='bool', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_bucket(self, id, bucket, **kwargs): # noqa: E501
"""Update Storage Buckets # noqa: E501
updates the configuration information for an existing bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_bucket(id, bucket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param Bucket bucket: The bucket creation information (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Bucket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_bucket_with_http_info(id, bucket, **kwargs) # noqa: E501
def update_bucket_with_http_info(self, id, bucket, **kwargs): # noqa: E501
"""Update Storage Buckets # noqa: E501
updates the configuration information for an existing bucket # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_bucket_with_http_info(id, bucket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param Bucket bucket: The bucket creation information (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Bucket, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'bucket'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `update_bucket`") # noqa: E501
# verify the required parameter 'bucket' is set
if self.api_client.client_side_validation and ('bucket' not in local_var_params or # noqa: E501
local_var_params['bucket'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bucket` when calling `update_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bucket' in local_var_params:
body_params = local_var_params['bucket']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Bucket', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def upload_file_to_bucket(self, id, **kwargs): # noqa: E501
"""Upload File to Bucket # noqa: E501
Uploads a file to a bucket.<br> <br> File must be submitted as multipart-form data, with a file element named \"file\" and a filename field <br> <br> A \"Connection: Keep-Alive\" configuration may be needed for larger sized files, due to the time it takes to copy to the server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file_to_bucket(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param list[file] file:
:param str filename:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: int
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.upload_file_to_bucket_with_http_info(id, **kwargs) # noqa: E501
def upload_file_to_bucket_with_http_info(self, id, **kwargs): # noqa: E501
"""Upload File to Bucket # noqa: E501
Uploads a file to a bucket.<br> <br> File must be submitted as multipart-form data, with a file element named \"file\" and a filename field <br> <br> A \"Connection: Keep-Alive\" configuration may be needed for larger sized files, due to the time it takes to copy to the server. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file_to_bucket_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: ID of bucket (required)
:param list[file] file:
:param str filename:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(int, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'file', 'filename'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method upload_file_to_bucket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `upload_file_to_bucket`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'file' in local_var_params:
local_var_files['file'] = local_var_params['file'] # noqa: E501
collection_formats['file'] = 'csv' # noqa: E501
if 'filename' in local_var_params:
form_params.append(('filename', local_var_params['filename'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['APIKeyHeader', 'Username'] # noqa: E501
return self.api_client.call_api(
'/api/buckets/{id}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='int', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"cons3rt.api_client.ApiClient",
"cons3rt.exceptions.ApiValueError",
"six.iteritems",
"cons3rt.exceptions.ApiTypeError"
] |
[((4473, 4514), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (4486, 4514), False, 'import six\n'), ((9567, 9608), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (9580, 9608), False, 'import six\n'), ((16302, 16343), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (16315, 16343), False, 'import six\n'), ((21977, 22018), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (21990, 22018), False, 'import six\n'), ((26882, 26923), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (26895, 26923), False, 'import six\n'), ((32097, 32138), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (32110, 32138), False, 'import six\n'), ((37254, 37295), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (37267, 37295), False, 'import six\n'), ((44546, 44587), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (44559, 44587), False, 'import six\n'), ((51395, 51436), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (51408, 51436), False, 'import six\n'), ((57610, 57651), 'six.iteritems', 'six.iteritems', (["local_var_params['kwargs']"], {}), "(local_var_params['kwargs'])\n", (57623, 57651), False, 'import six\n'), ((1420, 1431), 'cons3rt.api_client.ApiClient', 'ApiClient', ([], {}), '()\n', (1429, 1431), False, 'from cons3rt.api_client import ApiClient\n'), ((5088, 5178), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `bucket` when calling `create_bucket`"""'], {}), "(\n 'Missing the required parameter `bucket` when calling `create_bucket`')\n", (5101, 5178), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((10170, 10256), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `delete_bucket`"""'], {}), "(\n 'Missing the required parameter `id` when calling `delete_bucket`')\n", (10183, 10256), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((16917, 17020), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `download_file_from_bucket`"""'], {}), "(\n 'Missing the required parameter `id` when calling `download_file_from_bucket`'\n )\n", (16930, 17020), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((17320, 17430), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `file_name` when calling `download_file_from_bucket`"""'], {}), "(\n 'Missing the required parameter `file_name` when calling `download_file_from_bucket`'\n )\n", (17333, 17430), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((22577, 22655), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `get_bucket`"""'], {}), "('Missing the required parameter `id` when calling `get_bucket`')\n", (22590, 22655), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((27490, 27581), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `get_bucket_listing`"""'], {}), "(\n 'Missing the required parameter `id` when calling `get_bucket_listing`')\n", (27503, 27581), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((45180, 45302), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `submit_bucket_resource_to_submission_service`"""'], {}), "(\n 'Missing the required parameter `id` when calling `submit_bucket_resource_to_submission_service`'\n )\n", (45193, 45302), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((45638, 45779), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `submission_service_id` when calling `submit_bucket_resource_to_submission_service`"""'], {}), "(\n 'Missing the required parameter `submission_service_id` when calling `submit_bucket_resource_to_submission_service`'\n )\n", (45651, 45779), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((46079, 46208), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `file_name` when calling `submit_bucket_resource_to_submission_service`"""'], {}), "(\n 'Missing the required parameter `file_name` when calling `submit_bucket_resource_to_submission_service`'\n )\n", (46092, 46208), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((51998, 52084), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `update_bucket`"""'], {}), "(\n 'Missing the required parameter `id` when calling `update_bucket`')\n", (52011, 52084), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((52380, 52470), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `bucket` when calling `update_bucket`"""'], {}), "(\n 'Missing the required parameter `bucket` when calling `update_bucket`')\n", (52393, 52470), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((58221, 58315), 'cons3rt.exceptions.ApiValueError', 'ApiValueError', (['"""Missing the required parameter `id` when calling `upload_file_to_bucket`"""'], {}), "(\n 'Missing the required parameter `id` when calling `upload_file_to_bucket`')\n", (58234, 58315), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((4576, 4666), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method create_bucket" % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method create_bucket" % key)\n', (4588, 4666), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((9670, 9760), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method delete_bucket" % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method delete_bucket" % key)\n', (9682, 9760), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((16405, 16512), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method download_file_from_bucket"\n % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method download_file_from_bucket"\n % key)\n', (16417, 16512), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((22080, 22166), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method get_bucket" % key)'], {}), '("Got an unexpected keyword argument \'%s\' to method get_bucket" %\n key)\n', (22092, 22166), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((26985, 27084), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method get_bucket_listing" % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method get_bucket_listing" %\n key)\n', (26997, 27084), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((32200, 32315), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method list_available_clouds_for_buckets"\n % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method list_available_clouds_for_buckets"\n % key)\n', (32212, 32315), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((37357, 37446), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method list_buckets" % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method list_buckets" % key)\n', (37369, 37446), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((44649, 44775), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method submit_bucket_resource_to_submission_service"\n % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method submit_bucket_resource_to_submission_service"\n % key)\n', (44661, 44775), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((51498, 51588), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method update_bucket" % key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method update_bucket" % key)\n', (51510, 51588), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n'), ((57713, 57815), 'cons3rt.exceptions.ApiTypeError', 'ApiTypeError', (['("Got an unexpected keyword argument \'%s\' to method upload_file_to_bucket" %\n key)'], {}), '(\n "Got an unexpected keyword argument \'%s\' to method upload_file_to_bucket" %\n key)\n', (57725, 57815), False, 'from cons3rt.exceptions import ApiTypeError, ApiValueError\n')]
|
def pooled_cohen_kappa(samples_a, samples_b, weight_type=None, questions=None):
"""
Compute the pooled Cohen's Kappa for the given samples.
From:
<NAME>., <NAME>., <NAME>., & <NAME>. (2008).
Using pooled kappa to summarize interrater agreement across many items.
Field methods, 20(3), 272-282.
With pooled kappa:
k_p = (average_accuracy - average_expected_random_agreement) / (1 - average_expected_random_agreement)
Where:
average_agreement = np.mean(colum_wise_agreements)
average_expected_random_agreement = np.mean(expected_random_agreements)
colum_wise_agreements = [agreement(samples_a[:,col], samples_b[:,col]) for col in range(n_cols)]
expected_random_agreements = [expected_random_agreement(samples_a[:,col], samples_b[:,col]) for col in range(n_cols)]
A weighted version of the pooled Cohen's Kappa is also available in which the contingency table is weighted using
either quadratic or linear weights. If weight_type is None, then the weight matrix is the identity matrix.
To compute the weighted Cohen's Kappa, the questions parameter must be provided.
:param samples_a: list of samples from the first rater
:param samples_b: list of samples from the second rater
:param weight_type: Union[None, "linear", "quadratic"] weights type to use for the agreement calculation
:param questions: List[Question] if weights is not None, this is the list of questions and their values
:return: pooled Cohen's Kappa
"""
n = len(samples_a)
ncols = len(samples_a[0])
if n == 0 or ncols == 0:
return 0
if n != len(samples_b) or ncols != len(samples_b[0]):
raise Exception("samples_a and samples_b must have the same length")
if weight_type is not None and (weight_type not in ["linear", "quadratic"] or questions is None):
raise Exception("weights must be None, 'linear' or 'quadratic'")
import numpy as np
# Convert to numpy arrays
samples_a = np.array(samples_a)
samples_b = np.array(samples_b)
def weight(i, j, c):
"""
Compute the weight for a pair of values.
"""
if weight_type == "linear":
return 1 - (abs(i - j) / (c - 1))
elif weight_type == "quadratic":
return 1 - (abs(i - j) / (c - 1)) ** 2
else:
return 1 if i == j else 0
def agreement(colum_a, colum_b, values=None):
"""
Compute the agreement between two columns.
"""
if weight_type is not None:
# Build the contingency table
c = len(values)
contingency_table = np.zeros((c, c))
for i, value_a in enumerate(values):
for j, value_b in enumerate(values):
contingency_table[i, j] = np.mean(
weight(i, j, c) * (colum_a == value_a) * (colum_b == value_b))
# Compute the agreement
return np.sum(contingency_table)
else:
return np.mean(colum_a == colum_b)
def expected_random_agreement(colum_a, colum_b, values=None):
"""
Compute the expected random agreement between two columns.
"""
if weight_type is not None:
# Build the contingency table
c = len(values)
contingency_table = np.zeros((c, c))
for i, value_a in enumerate(values):
for j, value_b in enumerate(values):
contingency_table[i, j] = np.sum((colum_a == value_a) * (colum_b == value_b))
# Compute row and column sums
row_sums = np.sum(contingency_table, axis=1)
col_sums = np.sum(contingency_table, axis=0)
# Build the expected contingency table if independent
expected_contingency_table = np.zeros((c, c))
for i in range(c):
for j in range(c):
expected_contingency_table[i, j] = weight(i,j,c) * (row_sums[i] * col_sums[j]) / n**2
# Compute the expected random agreement
return np.sum(expected_contingency_table)
else:
# For each potential value of the column, compute the marginal probability of each rater
unique_values = np.unique(np.concatenate((colum_a, colum_b)))
expected_independent_agreement = []
for value in unique_values:
marg_probabilities_a = np.mean(samples_a[:, col] == value)
marg_probabilities_b = np.mean(samples_b[:, col] == value)
expected_independent_agreement.append(marg_probabilities_a * marg_probabilities_b)
# Compute the expected random agreement
return np.sum(expected_independent_agreement)
# Compute accuracy (joint probability of agreement) and marginal probability of agreement on each column between samples_a and samples_b
accuracies = np.zeros(ncols)
marg_probabilities = np.zeros(ncols)
for col in range(ncols):
values = None if weight_type is None or questions is None else questions[col].values
accuracies[col] = agreement(samples_a[:, col], samples_b[:, col], values=values)
marg_probabilities[col] = expected_random_agreement(samples_a[:, col], samples_b[:, col], values=values)
# Compute pooled accuracy
average_accuracy = np.mean(accuracies)
# Compute pooled expected random agreement
average_expected_random_agreement = np.mean(marg_probabilities)
# Compute pooled Cohen's Kappa
pooled_cohen_kappa = (average_accuracy - average_expected_random_agreement) / (
1 - average_expected_random_agreement)
return pooled_cohen_kappa
|
[
"numpy.sum",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] |
[((2022, 2041), 'numpy.array', 'np.array', (['samples_a'], {}), '(samples_a)\n', (2030, 2041), True, 'import numpy as np\n'), ((2058, 2077), 'numpy.array', 'np.array', (['samples_b'], {}), '(samples_b)\n', (2066, 2077), True, 'import numpy as np\n'), ((4942, 4957), 'numpy.zeros', 'np.zeros', (['ncols'], {}), '(ncols)\n', (4950, 4957), True, 'import numpy as np\n'), ((4983, 4998), 'numpy.zeros', 'np.zeros', (['ncols'], {}), '(ncols)\n', (4991, 4998), True, 'import numpy as np\n'), ((5377, 5396), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (5384, 5396), True, 'import numpy as np\n'), ((5484, 5511), 'numpy.mean', 'np.mean', (['marg_probabilities'], {}), '(marg_probabilities)\n', (5491, 5511), True, 'import numpy as np\n'), ((2667, 2683), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (2675, 2683), True, 'import numpy as np\n'), ((2985, 3010), 'numpy.sum', 'np.sum', (['contingency_table'], {}), '(contingency_table)\n', (2991, 3010), True, 'import numpy as np\n'), ((3044, 3071), 'numpy.mean', 'np.mean', (['(colum_a == colum_b)'], {}), '(colum_a == colum_b)\n', (3051, 3071), True, 'import numpy as np\n'), ((3368, 3384), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (3376, 3384), True, 'import numpy as np\n'), ((3652, 3685), 'numpy.sum', 'np.sum', (['contingency_table'], {'axis': '(1)'}), '(contingency_table, axis=1)\n', (3658, 3685), True, 'import numpy as np\n'), ((3709, 3742), 'numpy.sum', 'np.sum', (['contingency_table'], {'axis': '(0)'}), '(contingency_table, axis=0)\n', (3715, 3742), True, 'import numpy as np\n'), ((3851, 3867), 'numpy.zeros', 'np.zeros', (['(c, c)'], {}), '((c, c))\n', (3859, 3867), True, 'import numpy as np\n'), ((4112, 4146), 'numpy.sum', 'np.sum', (['expected_contingency_table'], {}), '(expected_contingency_table)\n', (4118, 4146), True, 'import numpy as np\n'), ((4744, 4782), 'numpy.sum', 'np.sum', (['expected_independent_agreement'], {}), '(expected_independent_agreement)\n', (4750, 4782), True, 'import numpy as np\n'), ((4300, 4334), 'numpy.concatenate', 'np.concatenate', (['(colum_a, colum_b)'], {}), '((colum_a, colum_b))\n', (4314, 4334), True, 'import numpy as np\n'), ((4463, 4498), 'numpy.mean', 'np.mean', (['(samples_a[:, col] == value)'], {}), '(samples_a[:, col] == value)\n', (4470, 4498), True, 'import numpy as np\n'), ((4538, 4573), 'numpy.mean', 'np.mean', (['(samples_b[:, col] == value)'], {}), '(samples_b[:, col] == value)\n', (4545, 4573), True, 'import numpy as np\n'), ((3534, 3585), 'numpy.sum', 'np.sum', (['((colum_a == value_a) * (colum_b == value_b))'], {}), '((colum_a == value_a) * (colum_b == value_b))\n', (3540, 3585), True, 'import numpy as np\n')]
|
# Unit tests related to 'Pickups' (https://www.easypost.com/docs/api#pickups).
import time
import datetime
import easypost
import pytest
import pytz
ONE_DAY = datetime.timedelta(days=1)
@pytest.fixture
def noon_on_next_monday():
today = datetime.date.today()
next_monday = today + datetime.timedelta(days=(7 - today.weekday()))
noon_est = datetime.time(12, 0, tzinfo=pytz.timezone('America/New_York'))
return datetime.datetime.combine(next_monday, noon_est)
@pytest.mark.vcr()
def test_pickup_batch(noon_on_next_monday, vcr):
# Create a Batch containing multiple Shipments. Then we try to buy a Pickup and assert if it was bought.
pickup_address = easypost.Address.create(
verify=['delivery'],
name='<NAME>',
company='EasyPost',
street1='2889 W ASHTON BLVD',
street2='SUITE 325',
city='Lehi',
state='UT',
zip='84042',
country='US',
phone='415-456-7890'
)
shipments = [
{
'to_address': {
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
'from_address': pickup_address,
'parcel': {
'weight': 10.2
},
'carrier': 'USPS',
'service': 'Priority'
}, {
'to_address': {
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
'from_address': {
'name': 'Saw<NAME>',
'company': 'EasyPost',
'street1': '164 Townsend St',
'city': 'San Francisco',
'state': 'CA',
'zip': '94107',
'phone': '415-456-7890'
},
'parcel': {
'weight': 10.2
},
'carrier': 'USPS',
'service': 'Priority'
}
]
batch = easypost.Batch.create_and_buy(shipments=shipments)
while batch.state in ('creating', 'queued_for_purchase', 'purchasing'):
if vcr.record_mode != 'none':
time.sleep(0.1)
batch.refresh()
# Insure the shipments after purchase
if batch.state == 'purchased':
for shipment in batch.shipments:
shipment.insure(amount=100)
pickup = easypost.Pickup.create(
address=pickup_address,
batch=batch,
reference='internal_id_1234',
min_datetime=noon_on_next_monday.isoformat(),
max_datetime=(noon_on_next_monday + ONE_DAY).isoformat(),
is_account_address=True,
instructions='Special pickup instructions'
)
assert pickup.pickup_rates != [], pickup.messages
pickup.buy(
carrier=pickup.pickup_rates[0].carrier,
service=pickup.pickup_rates[0].service
)
@pytest.mark.vcr()
def test_single_pickup(noon_on_next_monday):
"""Create a Shipment, buy it, and then buy a pickup for it"""
pickup_address = easypost.Address.create(
verify=['delivery'],
name='<NAME>',
company='EasyPost',
street1='2889 W ASHTON BLVD',
street2='SUITE 325',
city='Lehi',
state='UT',
zip='84042',
country='US',
phone='415-456-7890'
)
shipment = easypost.Shipment.create(
to_address={
'name': 'Customer',
'street1': '8308 Fenway Rd',
'city': 'Bethesda',
'state': 'MD',
'zip': '20817',
'country': 'US'
},
from_address=pickup_address,
parcel={
'weight': 21.2
},
)
shipment.buy(rate=shipment.lowest_rate('USPS', 'Priority'), insurance=100.00)
pickup = easypost.Pickup.create(
address=pickup_address,
shipment=shipment,
reference='internal_id_1234',
min_datetime=noon_on_next_monday.isoformat(),
max_datetime=(noon_on_next_monday + ONE_DAY).isoformat(),
is_account_address=True,
instructions='Special pickup instructions'
)
assert pickup.pickup_rates != [], pickup.messages
pickup.buy(
carrier=pickup.pickup_rates[0].carrier,
service=pickup.pickup_rates[0].service
)
|
[
"pytest.mark.vcr",
"easypost.Shipment.create",
"easypost.Batch.create_and_buy",
"datetime.date.today",
"easypost.Address.create",
"time.sleep",
"datetime.timedelta",
"pytz.timezone",
"datetime.datetime.combine"
] |
[((161, 187), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (179, 187), False, 'import datetime\n'), ((481, 498), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (496, 498), False, 'import pytest\n'), ((3067, 3084), 'pytest.mark.vcr', 'pytest.mark.vcr', ([], {}), '()\n', (3082, 3084), False, 'import pytest\n'), ((245, 266), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (264, 266), False, 'import datetime\n'), ((429, 477), 'datetime.datetime.combine', 'datetime.datetime.combine', (['next_monday', 'noon_est'], {}), '(next_monday, noon_est)\n', (454, 477), False, 'import datetime\n'), ((679, 893), 'easypost.Address.create', 'easypost.Address.create', ([], {'verify': "['delivery']", 'name': '"""<NAME>"""', 'company': '"""EasyPost"""', 'street1': '"""2889 W ASHTON BLVD"""', 'street2': '"""SUITE 325"""', 'city': '"""Lehi"""', 'state': '"""UT"""', 'zip': '"""84042"""', 'country': '"""US"""', 'phone': '"""415-456-7890"""'}), "(verify=['delivery'], name='<NAME>', company=\n 'EasyPost', street1='2889 W ASHTON BLVD', street2='SUITE 325', city=\n 'Lehi', state='UT', zip='84042', country='US', phone='415-456-7890')\n", (702, 893), False, 'import easypost\n'), ((2176, 2226), 'easypost.Batch.create_and_buy', 'easypost.Batch.create_and_buy', ([], {'shipments': 'shipments'}), '(shipments=shipments)\n', (2205, 2226), False, 'import easypost\n'), ((3218, 3432), 'easypost.Address.create', 'easypost.Address.create', ([], {'verify': "['delivery']", 'name': '"""<NAME>"""', 'company': '"""EasyPost"""', 'street1': '"""2889 W ASHTON BLVD"""', 'street2': '"""SUITE 325"""', 'city': '"""Lehi"""', 'state': '"""UT"""', 'zip': '"""84042"""', 'country': '"""US"""', 'phone': '"""415-456-7890"""'}), "(verify=['delivery'], name='<NAME>', company=\n 'EasyPost', street1='2889 W ASHTON BLVD', street2='SUITE 325', city=\n 'Lehi', state='UT', zip='84042', country='US', phone='415-456-7890')\n", (3241, 3432), False, 'import easypost\n'), ((3525, 3741), 'easypost.Shipment.create', 'easypost.Shipment.create', ([], {'to_address': "{'name': 'Customer', 'street1': '8308 Fenway Rd', 'city': 'Bethesda',\n 'state': 'MD', 'zip': '20817', 'country': 'US'}", 'from_address': 'pickup_address', 'parcel': "{'weight': 21.2}"}), "(to_address={'name': 'Customer', 'street1':\n '8308 Fenway Rd', 'city': 'Bethesda', 'state': 'MD', 'zip': '20817',\n 'country': 'US'}, from_address=pickup_address, parcel={'weight': 21.2})\n", (3549, 3741), False, 'import easypost\n'), ((383, 416), 'pytz.timezone', 'pytz.timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (396, 416), False, 'import pytz\n'), ((2353, 2368), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2363, 2368), False, 'import time\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from .occ_targets_template import OccTargetsTemplate
from ....utils import coords_utils, point_box_utils
class OccTargets3D(OccTargetsTemplate):
def __init__(
self,
model_cfg,
voxel_size,
point_cloud_range,
data_cfg,
grid_size,
num_class,
voxel_centers,
):
super().__init__(
model_cfg,
voxel_size,
point_cloud_range,
data_cfg,
grid_size,
num_class,
voxel_centers,
)
self.reg = model_cfg.PARAMS.get("REG", False) # default = True
def create_predict_area(
self, voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict
):
return self.create_predict_area2d(
voxel_bnysynxsxnzsz, voxel_num_points_float, batch_size, batch_dict
)
def forward(self, batch_dict, **kwargs):
# voxels: [M, max_points, ndim] float tensor. only contain points.
# voxel_coords: [M, 3] int32 tensor. zyx format.
# voxel_num_points: [M] int32 tensor.
voxel_features, voxel_num_points, coords = (
batch_dict["voxels"],
batch_dict["voxel_num_points"],
batch_dict["voxel_coords"],
)
# print("voxel_features", voxel_features.shape)
voxel_count = voxel_features.shape[1]
# print("voxel_count", voxel_features.shape[0])
mask = self.get_paddings_indicator(voxel_num_points, voxel_count, axis=0)
batch_dict["voxel_point_mask"] = mask
batch_dict = (
self.create_voxel_res_label(batch_dict, mask)
if self.reg
else self.create_voxel_label(batch_dict, mask)
)
# if test inference speed
# if batch_dict["is_train"]:
# batch_dict = self.create_voxel_res_label(batch_dict, mask)
# else:
# batch_dict["point_dist_mask"] = torch.zeros((batch_dict["gt_boxes"].shape[0], self.ny, self.nx, self.nz * self.sz * self.sy * self.sx), device="cuda")
if "point_drop_inds" in batch_dict.keys():
inds = batch_dict["point_drop_inds"]
mask[inds[:, 0], inds[:, 1]] = torch.zeros_like(
inds[:, 0], dtype=torch.bool
)
batch_dict["final_point_mask"] = mask
return batch_dict
def create_voxel_res_label(self, batch_dict, valid_mask):
occ_pnts = torch.cat(
[
coords_utils.uvd2absxyz(
batch_dict["voxels"][..., 0],
batch_dict["voxels"][..., 1],
batch_dict["voxels"][..., 2],
self.data_cfg.OCC.COORD_TYPE,
),
batch_dict["voxels"][..., 3:],
],
dim=-1,
)
if self.point_coding == "absxyz" or self.point_coding == True:
batch_dict["voxels"] = occ_pnts
elif self.point_coding == "both":
batch_dict["voxels"] = torch.cat(
[occ_pnts[..., :3], batch_dict["voxels"]], dim=-1
)
voxel_features, voxel_coords, gt_boxes_num, gt_boxes, bs = (
occ_pnts,
batch_dict["voxel_coords"],
batch_dict["gt_boxes_num"],
batch_dict["gt_boxes"],
batch_dict["gt_boxes"].shape[0],
)
if self.num_class == 1:
gt_label = (gt_boxes[..., -1:] > 1e-2).to(torch.float32)
gt_boxes = torch.cat([gt_boxes[..., :-1], gt_label], dim=-1)
valid_coords_bnznynx, valid_voxel_features = self.get_valid(
valid_mask, voxel_coords, voxel_features
)
voxelwise_mask = self.get_voxelwise_mask(valid_coords_bnznynx, bs)
vcc_mask = self.create_predict_area3d(bs, valid_coords_bnznynx)
occ_voxelwise_mask = self.filter_occ(
self.occ_from_ocp(
vcc_mask,
batch_dict,
bs,
voxelwise_mask,
valid_voxel_features[..., :3],
valid_coords_bnznynx[..., 0],
empty_sur_thresh=self.data_cfg.OCC.EMPT_SUR_THRESH,
type=self.data_cfg.OCC.COORD_TYPE,
),
occ_pnts,
voxelwise_mask,
)
(
fore_voxelwise_mask,
fore_res_mtrx,
mirr_fore_voxelwise_mask,
mirr_res_mtrx,
) = self.get_fore_mirr_voxelwise_mask_res(
batch_dict,
bs,
valid_coords_bnznynx,
valid_voxel_features,
gt_boxes_num,
gt_boxes,
)
mirr_fore_voxelwise_mask = mirr_fore_voxelwise_mask * (
1 - voxelwise_mask
) # exclude original occupied
mirr_res_mtrx = mirr_res_mtrx * (1 - voxelwise_mask).unsqueeze(1)
if self.model_cfg.TARGETS.TMPLT: # default = True
bm_voxelwise_mask, bm_res_mtrx = self.get_bm_voxelwise_mask_res(
batch_dict, bs, gt_boxes_num, gt_boxes
)
bm_voxelwise_mask = (
bm_voxelwise_mask
* (1 - voxelwise_mask)
* (1 - mirr_fore_voxelwise_mask)
)
bm_res_mtrx = (
bm_res_mtrx
* (1 - voxelwise_mask).unsqueeze(1)
* (1 - mirr_fore_voxelwise_mask).unsqueeze(1)
)
else:
bm_voxelwise_mask = torch.zeros_like(
voxelwise_mask, dtype=voxelwise_mask.dtype, device=voxelwise_mask.device
)
##### forebox_label #####
forebox_label = None
if self.data_cfg.OCC.BOX_WEIGHT != 1.0:
bs, max_num_box, box_c = list(gt_boxes.shape)
forebox_label = torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.int8, device="cuda"
)
shift = torch.tensor(
np.asarray([[0.0, 0.0, 0.0]]), device="cuda", dtype=torch.float32
)
for i in range(bs):
cur_gt_boxes = gt_boxes[i, : gt_boxes_num[i]]
all_voxel_centers_2d = (
point_box_utils.rotatez(
self.all_voxel_centers_2d, batch_dict["rot_z"][i]
)
if "rot_z" in batch_dict
else self.all_voxel_centers_2d
)
voxel_box_label2d = (
point_box_utils.torch_points_in_box_2d_mask(
all_voxel_centers_2d, cur_gt_boxes, shift=shift[..., :2]
)
.view(self.ny, self.nx)
.nonzero()
)
if voxel_box_label2d.shape[0] > 0:
all_voxel_centers_filtered = self.all_voxel_centers[
:, voxel_box_label2d[:, 0], voxel_box_label2d[:, 1], ...
].reshape(-1, 3)
if "rot_z" in batch_dict:
all_voxel_centers_filtered = point_box_utils.rotatez(
all_voxel_centers_filtered, batch_dict["rot_z"][i]
)
voxel_box_label = point_box_utils.torch_points_in_box_3d_label(
all_voxel_centers_filtered,
cur_gt_boxes,
gt_boxes_num[i],
shift=shift,
)[0]
forebox_label[
i, :, voxel_box_label2d[:, 0], voxel_box_label2d[:, 1]
] = voxel_box_label.view(self.nz, -1)
if self.data_cfg.OCC.DROPOUT_RATE > 1e-3 and batch_dict["is_train"]:
batch_dict = self.dropout(batch_dict, fore_voxelwise_mask)
batch_dict = self.prepare_cls_loss_map(
batch_dict,
vcc_mask,
voxelwise_mask,
occ_voxelwise_mask,
fore_voxelwise_mask,
mirr_fore_voxelwise_mask,
bm_voxelwise_mask,
forebox_label=forebox_label,
)
batch_dict = self.prepare_reg_loss_map(
batch_dict, fore_res_mtrx, mirr_res_mtrx, bm_res_mtrx
)
return batch_dict
def get_bm_voxelwise_mask_res(self, batch_dict, bs, gt_boxes_num, gt_boxes):
bm_voxelwise_mask = torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda"
)
# bm_points added during augmentation, see BtcDet/btcdet/datasets/augmentor/multi_best_match_querier.py
if "bm_points" in batch_dict and len(batch_dict["bm_points"]) > 0:
bm_binds, bm_carte_points = (
batch_dict["bm_points"][..., 0:1].to(torch.int64),
batch_dict["bm_points"][..., 1:],
)
label_array = torch.nonzero(
point_box_utils.torch_points_in_box_3d_label_batch(
bm_carte_points, bm_binds, gt_boxes, gt_boxes_num, bs
)
)[..., 0]
bm_binds = bm_binds[..., 0][label_array]
bm_carte_points = bm_carte_points[label_array, :]
occ_coords_bm_points = coords_utils.cartesian_occ_coords(
bm_carte_points, type=self.data_cfg.OCC.COORD_TYPE
)
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][bm_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_bm_points = common_utils.rotate_points_along_z(
occ_coords_bm_points.unsqueeze(1), noise_rotation
).squeeze(1)
else:
occ_coords_bm_points[..., 1] += rot_z
inrange_coords_bm, inrange_inds_bm = self.point2coords_inrange(
occ_coords_bm_points,
self.point_origin_tensor,
self.point_max_tensor,
self.max_grid_tensor,
self.min_grid_tensor,
self.voxel_size,
)
bm_coords = torch.cat(
[
bm_binds[inrange_inds_bm].unsqueeze(-1),
self.xyz2zyx(inrange_coords_bm),
],
dim=-1,
)
bm_res_mtrx = self.get_mean_res(
bm_carte_points[inrange_inds_bm],
bm_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
bm_voxelwise_mask[
bm_coords[..., 0],
bm_coords[..., 1],
bm_coords[..., 2],
bm_coords[..., 3],
] = torch.ones_like(
bm_coords[..., 0], dtype=torch.uint8, device=bm_voxelwise_mask.device
) ##
else:
bm_res_mtrx = torch.zeros(
[bs, 3, self.nz, self.ny, self.nx], dtype=torch.float32, device="cuda"
)
return bm_voxelwise_mask, bm_res_mtrx
def get_mean_res(self, feat, coords, bs, nz, ny, nx, batch_dict, rot=False):
xyz_spatial = torch.zeros(
[bs, 3, nz, ny, nx], dtype=torch.float32, device="cuda"
)
if len(coords) > 0:
uni_coords, inverse_indices, labels_count = torch.unique(
coords, return_inverse=True, return_counts=True, dim=0
)
mean_xyz = (
torch.zeros(
[uni_coords.shape[0], 3], dtype=feat.dtype, device=feat.device
).scatter_add_(
0,
inverse_indices.view(inverse_indices.size(0), 1).expand(-1, 3),
feat[..., :3],
)
/ labels_count.float().unsqueeze(1)
)
# mean_xyz = torch_scatter.scatter_mean(feat[..., :3], inverse_indices, dim=0)
mean_xyz -= self.get_voxel_center_xyz(uni_coords, batch_dict, rot=rot)
xyz_spatial[
uni_coords[..., 0],
:,
uni_coords[..., 1],
uni_coords[..., 2],
uni_coords[..., 3],
] = mean_xyz
return xyz_spatial
def get_voxel_center_xyz(self, coords, batch_dict, rot=True):
voxel_centers = (
coords[:, [3, 2, 1]].float() + 0.5
) * self.voxel_size + self.point_origin_tensor
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
noise_rotation = rot_z * np.pi / 180
voxel_centers = common_utils.rotate_points_along_z(
voxel_centers.unsqueeze(1), noise_rotation
).squeeze(1)
else:
if "rot_z" in batch_dict and rot:
rot_z = batch_dict["rot_z"][coords[:, 0]]
voxel_centers[..., 1] -= rot_z
voxel_centers = coords_utils.uvd2absxyz(
voxel_centers[..., 0],
voxel_centers[..., 1],
voxel_centers[..., 2],
self.data_cfg.OCC.COORD_TYPE,
)
return voxel_centers
def get_fore_mirr_voxelwise_mask_res(
self,
batch_dict,
bs,
valid_coords_bnznynx,
valid_voxel_features,
gt_boxes_num,
gt_boxes,
):
fore_voxelwise_mask, mirr_fore_voxelwise_mask = [
torch.zeros(
[bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device="cuda"
)
for i in range(2)
]
(
fore_inds,
mirr_inbox_point,
mirr_binds,
) = point_box_utils.torch_points_and_sym_in_box_3d_batch(
valid_voxel_features[..., :3],
valid_coords_bnznynx,
gt_boxes,
gt_boxes_num,
bs,
batch_dict["box_mirr_flag"],
)
fore_coords = valid_coords_bnznynx[fore_inds] # b zyx
fore_voxelwise_mask[
fore_coords[..., 0],
fore_coords[..., 1],
fore_coords[..., 2],
fore_coords[..., 3],
] = torch.ones_like(
fore_coords[..., 0], dtype=torch.uint8, device=fore_voxelwise_mask.device
)
fore_res_mtrx = self.get_mean_res(
valid_voxel_features[fore_inds],
fore_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
mirr_res_mtrx = torch.zeros(
[bs, 3, self.nz, self.ny, self.nx],
device=fore_voxelwise_mask.device,
dtype=torch.float32,
)
if mirr_inbox_point is not None:
occ_coords_mirr_points = coords_utils.cartesian_occ_coords(
mirr_inbox_point, type=self.data_cfg.OCC.COORD_TYPE
) # sphere x y z
if "rot_z" in batch_dict:
rot_z = batch_dict["rot_z"][mirr_binds]
if self.data_cfg.OCC.COORD_TYPE == "cartesian":
noise_rotation = -rot_z * np.pi / 180
occ_coords_mirr_points = common_utils.rotate_points_along_z(
occ_coords_mirr_points.unsqueeze(1), noise_rotation
).squeeze(1)
else:
occ_coords_mirr_points[..., 1] += rot_z
inrange_coords_mirr, inrange_inds_mirr = self.point2coords_inrange(
occ_coords_mirr_points,
self.point_origin_tensor,
self.point_max_tensor,
self.max_grid_tensor,
self.min_grid_tensor,
self.voxel_size,
)
mirr_coords = torch.cat(
[
mirr_binds[inrange_inds_mirr].unsqueeze(-1),
self.xyz2zyx(inrange_coords_mirr),
],
dim=-1,
) # mirror sphere b z y x
mirr_res_mtrx = self.get_mean_res(
mirr_inbox_point[inrange_inds_mirr],
mirr_coords,
bs,
self.nz,
self.ny,
self.nx,
batch_dict,
rot=True,
)
mirr_fore_voxelwise_mask[
mirr_coords[..., 0],
mirr_coords[..., 1],
mirr_coords[..., 2],
mirr_coords[..., 3],
] = torch.ones_like(
mirr_coords[..., 0],
dtype=torch.uint8,
device=mirr_fore_voxelwise_mask.device,
)
return (
fore_voxelwise_mask,
fore_res_mtrx,
mirr_fore_voxelwise_mask,
mirr_res_mtrx,
)
|
[
"torch.ones_like",
"torch.unique",
"torch.zeros_like",
"numpy.asarray",
"torch.cat",
"torch.zeros"
] |
[((8412, 8490), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.uint8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device='cuda')\n", (8423, 8490), False, 'import torch\n'), ((11282, 11350), 'torch.zeros', 'torch.zeros', (['[bs, 3, nz, ny, nx]'], {'dtype': 'torch.float32', 'device': '"""cuda"""'}), "([bs, 3, nz, ny, nx], dtype=torch.float32, device='cuda')\n", (11293, 11350), False, 'import torch\n'), ((14353, 14448), 'torch.ones_like', 'torch.ones_like', (['fore_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'fore_voxelwise_mask.device'}), '(fore_coords[..., 0], dtype=torch.uint8, device=\n fore_voxelwise_mask.device)\n', (14368, 14448), False, 'import torch\n'), ((14738, 14846), 'torch.zeros', 'torch.zeros', (['[bs, 3, self.nz, self.ny, self.nx]'], {'device': 'fore_voxelwise_mask.device', 'dtype': 'torch.float32'}), '([bs, 3, self.nz, self.ny, self.nx], device=fore_voxelwise_mask.\n device, dtype=torch.float32)\n', (14749, 14846), False, 'import torch\n'), ((2286, 2332), 'torch.zeros_like', 'torch.zeros_like', (['inds[:, 0]'], {'dtype': 'torch.bool'}), '(inds[:, 0], dtype=torch.bool)\n', (2302, 2332), False, 'import torch\n'), ((3564, 3613), 'torch.cat', 'torch.cat', (['[gt_boxes[..., :-1], gt_label]'], {'dim': '(-1)'}), '([gt_boxes[..., :-1], gt_label], dim=-1)\n', (3573, 3613), False, 'import torch\n'), ((5529, 5624), 'torch.zeros_like', 'torch.zeros_like', (['voxelwise_mask'], {'dtype': 'voxelwise_mask.dtype', 'device': 'voxelwise_mask.device'}), '(voxelwise_mask, dtype=voxelwise_mask.dtype, device=\n voxelwise_mask.device)\n', (5545, 5624), False, 'import torch\n'), ((5847, 5924), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.int8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.int8, device='cuda')\n", (5858, 5924), False, 'import torch\n'), ((10856, 10947), 'torch.ones_like', 'torch.ones_like', (['bm_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'bm_voxelwise_mask.device'}), '(bm_coords[..., 0], dtype=torch.uint8, device=\n bm_voxelwise_mask.device)\n', (10871, 10947), False, 'import torch\n'), ((11017, 11105), 'torch.zeros', 'torch.zeros', (['[bs, 3, self.nz, self.ny, self.nx]'], {'dtype': 'torch.float32', 'device': '"""cuda"""'}), "([bs, 3, self.nz, self.ny, self.nx], dtype=torch.float32, device\n ='cuda')\n", (11028, 11105), False, 'import torch\n'), ((11457, 11525), 'torch.unique', 'torch.unique', (['coords'], {'return_inverse': '(True)', 'return_counts': '(True)', 'dim': '(0)'}), '(coords, return_inverse=True, return_counts=True, dim=0)\n', (11469, 11525), False, 'import torch\n'), ((13623, 13701), 'torch.zeros', 'torch.zeros', (['[bs, self.nz, self.ny, self.nx]'], {'dtype': 'torch.uint8', 'device': '"""cuda"""'}), "([bs, self.nz, self.ny, self.nx], dtype=torch.uint8, device='cuda')\n", (13634, 13701), False, 'import torch\n'), ((16665, 16765), 'torch.ones_like', 'torch.ones_like', (['mirr_coords[..., 0]'], {'dtype': 'torch.uint8', 'device': 'mirr_fore_voxelwise_mask.device'}), '(mirr_coords[..., 0], dtype=torch.uint8, device=\n mirr_fore_voxelwise_mask.device)\n', (16680, 16765), False, 'import torch\n'), ((3087, 3147), 'torch.cat', 'torch.cat', (["[occ_pnts[..., :3], batch_dict['voxels']]"], {'dim': '(-1)'}), "([occ_pnts[..., :3], batch_dict['voxels']], dim=-1)\n", (3096, 3147), False, 'import torch\n'), ((6005, 6034), 'numpy.asarray', 'np.asarray', (['[[0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0]])\n', (6015, 6034), True, 'import numpy as np\n'), ((11597, 11672), 'torch.zeros', 'torch.zeros', (['[uni_coords.shape[0], 3]'], {'dtype': 'feat.dtype', 'device': 'feat.device'}), '([uni_coords.shape[0], 3], dtype=feat.dtype, device=feat.device)\n', (11608, 11672), False, 'import torch\n')]
|
import dash
from dash.testing import wait
from dash_table import DataTable
from dash_html_components import Div
from selenium.webdriver.common.keys import Keys
import pandas as pd
url = "https://github.com/plotly/datasets/raw/master/" "26k-consumer-complaints.csv"
rawDf = pd.read_csv(url, nrows=100)
df = rawDf.to_dict("records")
def get_app(props=dict()):
app = dash.Dash(__name__)
baseProps = dict(
columns=[dict(name=i, id=i, selectable=True) for i in rawDf.columns],
data=df,
editable=True,
filter_action="native",
fixed_columns={"headers": True},
fixed_rows={"headers": True},
page_action="native",
row_deletable=True,
row_selectable=True,
sort_action="native",
)
baseProps.update(props)
app.layout = Div([DataTable(**baseProps), DataTable(**baseProps)])
return app
def test_tbmu001_select_row(test):
test.start_server(get_app())
wait.until(lambda: len(test.get_table_ids()) == 2, 3)
ids = test.get_table_ids()
table1 = test.table(ids[0])
table2 = test.table(ids[1])
table2.row(1).select()
wait.until(lambda: table2.row(1).is_selected(), 3)
table1.row(0).select()
wait.until(lambda: table1.row(0).is_selected(), 3)
wait.until(lambda: table2.row(1).is_selected(), 3)
assert test.get_log_errors() == []
def test_tbmu002_select_column(test):
test.start_server(get_app(dict(column_selectable="single")))
wait.until(lambda: len(test.get_table_ids()) == 2, 3)
ids = test.get_table_ids()
table1 = test.table(ids[0])
table2 = test.table(ids[1])
table1.column("Complaint ID").select()
table2.column("Product").select()
assert table1.column("Complaint ID").is_selected()
assert table2.column("Product").is_selected()
def test_tbmu003_edit_on_enter(test):
test.start_server(get_app())
wait.until(lambda: len(test.get_table_ids()) == 2, 3)
ids = test.get_table_ids()
table1 = test.table(ids[0])
table2 = test.table(ids[1])
initial_text = table2.cell(0, 0).get_text()
table1.cell(0, 0).click()
test.send_keys("abc" + Keys.ENTER)
assert table1.cell(0, 0).get_text() == "abc"
assert table2.cell(0, 0).get_text() == initial_text
def test_tbmu004_edit_click_outside(test):
test.start_server(get_app())
wait.until(lambda: len(test.get_table_ids()) == 2, 3)
ids = test.get_table_ids()
table1 = test.table(ids[0])
table2 = test.table(ids[1])
initial_text = table2.cell(0, 0).get_text()
table1.cell(0, 0).click()
test.send_keys("abc")
table1.cell(1, 0).click()
assert table1.cell(0, 0).get_text() == "abc"
assert table2.cell(0, 0).get_text() == initial_text
|
[
"pandas.read_csv",
"dash.Dash",
"dash_table.DataTable"
] |
[((277, 304), 'pandas.read_csv', 'pd.read_csv', (['url'], {'nrows': '(100)'}), '(url, nrows=100)\n', (288, 304), True, 'import pandas as pd\n'), ((374, 393), 'dash.Dash', 'dash.Dash', (['__name__'], {}), '(__name__)\n', (383, 393), False, 'import dash\n'), ((821, 843), 'dash_table.DataTable', 'DataTable', ([], {}), '(**baseProps)\n', (830, 843), False, 'from dash_table import DataTable\n'), ((845, 867), 'dash_table.DataTable', 'DataTable', ([], {}), '(**baseProps)\n', (854, 867), False, 'from dash_table import DataTable\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate, add_days
test_dependencies = ["Shift Type"]
class TestShiftAssignment(unittest.TestCase):
def setUp(self):
frappe.db.sql("delete from `tabShift Assignment`")
def test_make_shift_assignment(self):
shift_assignment = frappe.get_doc({
"doctype": "Shift Assignment",
"shift_type": "Day Shift",
"company": "_Test Company",
"employee": "_T-Employee-00001",
"start_date": nowdate()
}).insert()
shift_assignment.submit()
self.assertEqual(shift_assignment.docstatus, 1)
def test_overlapping_for_ongoing_shift(self):
# shift should be Ongoing if Only start_date is present and status = Active
shift_assignment_1 = frappe.get_doc({
"doctype": "Shift Assignment",
"shift_type": "Day Shift",
"company": "_Test Company",
"employee": "_T-Employee-00001",
"start_date": nowdate(),
"status": 'Active'
}).insert()
shift_assignment_1.submit()
self.assertEqual(shift_assignment_1.docstatus, 1)
shift_assignment = frappe.get_doc({
"doctype": "Shift Assignment",
"shift_type": "Day Shift",
"company": "_Test Company",
"employee": "_T-Employee-00001",
"start_date": add_days(nowdate(), 2)
})
self.assertRaises(frappe.ValidationError, shift_assignment.save)
def test_overlapping_for_fixed_period_shift(self):
# shift should is for Fixed period if Only start_date and end_date both are present and status = Active
shift_assignment_1 = frappe.get_doc({
"doctype": "Shift Assignment",
"shift_type": "Day Shift",
"company": "_Test Company",
"employee": "_T-Employee-00001",
"start_date": nowdate(),
"end_date": add_days(nowdate(), 30),
"status": 'Active'
}).insert()
shift_assignment_1.submit()
# it should not allowed within period of any shift.
shift_assignment_3 = frappe.get_doc({
"doctype": "Shift Assignment",
"shift_type": "Day Shift",
"company": "_Test Company",
"employee": "_T-Employee-00001",
"start_date":add_days(nowdate(), 10),
"end_date": add_days(nowdate(), 35),
"status": 'Active'
})
self.assertRaises(frappe.ValidationError, shift_assignment_3.save)
|
[
"frappe.db.sql",
"frappe.utils.nowdate"
] |
[((329, 379), 'frappe.db.sql', 'frappe.db.sql', (['"""delete from `tabShift Assignment`"""'], {}), "('delete from `tabShift Assignment`')\n", (342, 379), False, 'import frappe\n'), ((1350, 1359), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (1357, 1359), False, 'from frappe.utils import nowdate, add_days\n'), ((2171, 2180), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (2178, 2180), False, 'from frappe.utils import nowdate, add_days\n'), ((2212, 2221), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (2219, 2221), False, 'from frappe.utils import nowdate, add_days\n'), ((606, 615), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (613, 615), False, 'from frappe.utils import nowdate, add_days\n'), ((1024, 1033), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (1031, 1033), False, 'from frappe.utils import nowdate, add_days\n'), ((1791, 1800), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (1798, 1800), False, 'from frappe.utils import nowdate, add_days\n'), ((1827, 1836), 'frappe.utils.nowdate', 'nowdate', ([], {}), '()\n', (1834, 1836), False, 'from frappe.utils import nowdate, add_days\n')]
|
# critiquebrainz - Repository for Creative Commons licensed reviews
#
# Copyright (C) 2018 <NAME>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from unittest.mock import MagicMock
from flask import url_for
import critiquebrainz.db.users as db_users
import critiquebrainz.frontend.external.musicbrainz_db.exceptions as mb_exceptions
import critiquebrainz.frontend.external.musicbrainz_db.release_group as mb_release_group
import critiquebrainz.frontend.external.spotify as spotify_api
from critiquebrainz.db.user import User
from critiquebrainz.frontend.external import mbspotify
from critiquebrainz.frontend.external.exceptions import ExternalServiceException
from critiquebrainz.frontend.testing import FrontendTestCase
class SpotifyMappingViewsTestCase(FrontendTestCase):
def setUp(self):
super(SpotifyMappingViewsTestCase, self).setUp()
self.user = User(db_users.get_or_create(1, "aef06569-098f-4218-a577-b413944d9493", new_user_data={
"display_name": u"Tester",
}))
self.test_spotify_id = "6IH6co1QUS7uXoyPDv0rIr"
self.test_release_group = {
'id': '6b3cd75d-7453-39f3-86c4-1441f360e121',
'title': 'Test Release Group',
'first-release-year': 1970,
'artist-credit': [{
'name': '<NAME>'
}]
}
self.test_spotify_get_multiple_albums_response = {
'6IH6co1QUS7uXoyPDv0rIr': {
'type': 'album',
'album_type': 'album',
'id': '6IH6co1QUS7uXoyPDv0rIr',
'name': 'Test Album',
'release_date': '1970-01-01',
'external_urls': {
'spotify': 'https://open.spotify.com/album/6IH6co1QUS7uXoyPDv0rIr'
},
'artists': [{'name': '<NAME>'}],
'tracks': {
'items': [{
'artists': [{
'name': '<NAME>'
}]
}]
},
'uri': 'spotify:album:6IH6co1QUS7uXoyPDv0rIr'
}
}
self.test_spotify_search_response = {
'albums': {
'items': [{
'id': "6IH6co1QUS7uXoyPDv0rIr"
}],
'total': 1
}
}
def test_spotify_list(self):
# test for non-existent release group
mbspotify.mappings = MagicMock(return_value=[])
mb_release_group.get_release_group_by_id = MagicMock(side_effect=mb_exceptions.NoDataFoundException)
response = self.client.get("/mapping/6b3cd75d-7453-39f3-86c4-1441f360e121")
self.assert404(response, "Can't find release group with a specified ID.")
# test for release group with no mappings
mb_release_group.get_release_group_by_id = MagicMock(return_value=self.test_release_group)
response = self.client.get("/mapping/6b3cd75d-7453-39f3-86c4-1441f360e121")
self.assert200(response)
self.assertIn("No mappings", str(response.data))
# test release group with mappings
mbspotify.mappings = MagicMock(return_value=['spotify:album:6IH6co1QUS7uXoyPDv0rIr'])
spotify_api.get_multiple_albums = MagicMock(return_value=self.test_spotify_get_multiple_albums_response)
response = self.client.get("/mapping/6b3cd75d-7453-39f3-86c4-1441f360e121")
self.assert200(response)
self.assertIn(self.test_spotify_id, str(response.data))
self.assertIn("Test Album", str(response.data))
self.assertIn("1970-01-01", str(response.data))
# test spotify service unavailable
spotify_api.get_multiple_albums = MagicMock(side_effect=ExternalServiceException)
response = self.client.get("/mapping/6b3cd75d-7453-39f3-86c4-1441f360e121")
self.assertStatus(response, 503)
def test_spotify_add(self):
# test `release_group_id` variable not supplied
response = self.client.get("/mapping/spotify/add")
self.assertRedirects(response, url_for('frontend.index'))
# test for non-existent release group
mb_release_group.get_release_group_by_id = MagicMock(side_effect=mb_exceptions.NoDataFoundException)
response = self.client.get("/mapping/spotify/add",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"},
follow_redirects=True)
self.assert200(response)
self.assertIn("Only existing release groups can be mapped to Spotify!", str(response.data))
# test Spotify service unavailable
mb_release_group.get_release_group_by_id = MagicMock(return_value=self.test_release_group)
spotify_api.search = MagicMock(side_effect=ExternalServiceException)
response = self.client.get("/mapping/spotify/add",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"})
self.assertStatus(response, 503)
# test when response has no albums for given id
spotify_api.search = MagicMock(return_value=self.test_spotify_search_response)
spotify_api.get_multiple_albums = MagicMock(return_value={})
response = self.client.get("/mapping/spotify/add",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"})
self.assert200(response)
self.assertIn("No similar albums found", str(response.data))
# test when response has 1 album
spotify_api.get_multiple_albums = MagicMock(return_value=self.test_spotify_get_multiple_albums_response)
response = self.client.get("/mapping/spotify/add",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"})
self.assert200(response)
self.assertIn("Listen on Spotify", str(response.data))
self.assertIn("1970", str(response.data))
self.assertIn("Test Album", str(response.data))
self.assertIn("Test Artist", str(response.data))
def test_spotify_confirm(self):
self.temporary_login(self.user)
# test `release_group_id` variable not supplied
response = self.client.get("/mapping/spotify/confirm",
follow_redirects=True)
self.assert400(response, "Didn't provide `release_group_id`!")
# test for non-existent release group
mb_release_group.get_release_group_by_id = MagicMock(side_effect=mb_exceptions.NoDataFoundException)
response = self.client.get("/mapping/spotify/confirm",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"},
follow_redirects=True)
self.assert200(response)
self.assertIn("Only existing release groups can be mapped to Spotify!", str(response.data))
# test `spotify_ref` variable not supplied
mb_release_group.get_release_group_by_id = MagicMock(return_value=self.test_release_group)
response = self.client.get("/mapping/spotify/confirm",
query_string={"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"},
follow_redirects=True)
self.assert200(response)
self.assertIn("You need to select an album from Spotify!", str(response.data))
# test when wrong type of `spotify_ref` is supplied
response = self.client.get("/mapping/spotify/confirm",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_ref": "Unsupported Spotify URI"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("You need to specify a correct link to this album on Spotify!", str(response.data))
# test Spotify service unavailable or uri supplied is not available on Spotify
spotify_api.get_album = MagicMock(side_effect=ExternalServiceException)
response = self.client.get("/mapping/spotify/confirm",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_ref": "spotify:album:6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("You need to specify existing album from Spotify!", str(response.data))
# test when uri supplied is available on Spotify
spotify_api.get_album = MagicMock(
return_value=self.test_spotify_get_multiple_albums_response[self.test_spotify_id])
response = self.client.get("/mapping/spotify/confirm",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_ref": "spotify:album:6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Spotify album mapping confirmation", str(response.data))
self.assertIn("Are you sure you want to create this mapping?", str(response.data))
# test POST for spotify_confirm
mbspotify.add_mapping = MagicMock(return_value=(False, None))
# test when failed to add mapping while posting uri
response = self.client.post("/mapping/spotify/confirm",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_ref": "spotify:album:6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Could not add Spotify mapping!", str(response.data))
# test when successfully added mapping
mbspotify.add_mapping = MagicMock(return_value=(True, None))
response = self.client.post("/mapping/spotify/confirm",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_ref": "spotify:album:6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Spotify mapping has been added!", str(response.data))
def test_spotify_report(self):
self.temporary_login(self.user)
# test `release_group_id` variable not supplied
response = self.client.get("/mapping/spotify/report",
follow_redirects=True)
self.assert400(response, "Didn't provide `release_group_id`!")
# test `spotify_id` variable not supplied
response = self.client.get("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121"
},
follow_redirects=True)
self.assert400(response, "Didn't provide `spotify_id`!")
# test for non-existent release group
mb_release_group.get_release_group_by_id = MagicMock(side_effect=mb_exceptions.NoDataFoundException)
response = self.client.get("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert404(response, "Can't find release group with a specified ID.")
# test release group not mapped to supplied spotify uri
mb_release_group.get_release_group_by_id = MagicMock(return_value=self.test_release_group)
mbspotify.mappings = MagicMock(return_value=[])
response = self.client.get("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("This album is not mapped to Spotify yet!", str(response.data))
# test when release group is mapped to supplied spotify uri, but, album for supplied uri doesn't exist
mbspotify.mappings = MagicMock(return_value=["spotify:album:6IH6co1QUS7uXoyPDv0rIr"])
spotify_api.get_album = MagicMock(side_effect=ExternalServiceException)
spotify_api.get_multiple_albums = MagicMock(return_value=self.test_spotify_get_multiple_albums_response)
response = self.client.get("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
# self.assertRedirects(response, url_for('mapping.spotify_list', release_group_id=self.test_release_group['id']))
self.assert200(response)
self.assertIn("You need to specify existing album from Spotify!", str(response.data))
# test confirmation page for reporting
spotify_api.get_album = MagicMock(
return_value=self.test_spotify_get_multiple_albums_response[self.test_spotify_id])
response = self.client.get("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Are you sure you want to report incorrect mapping?", str(response.data))
# test POST for spotify_report
# test when successfully added mapping
mbspotify.vote = MagicMock(return_value=(True, None))
response = self.client.post("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Incorrect Spotify mapping has been reported. Thank you!", str(response.data))
# test when failed to vote for incorrect mapping
mbspotify.vote = MagicMock(return_value=(False, None))
response = self.client.post("/mapping/spotify/report",
query_string={
"release_group_id": "6b3cd75d-7453-39f3-86c4-1441f360e121",
"spotify_id": "6IH6co1QUS7uXoyPDv0rIr"
},
follow_redirects=True)
self.assert200(response)
self.assertIn("Could not report incorrect Spotify mapping!", str(response.data))
|
[
"flask.url_for",
"unittest.mock.MagicMock",
"critiquebrainz.db.users.get_or_create"
] |
[((3122, 3148), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (3131, 3148), False, 'from unittest.mock import MagicMock\n'), ((3200, 3257), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mb_exceptions.NoDataFoundException'}), '(side_effect=mb_exceptions.NoDataFoundException)\n', (3209, 3257), False, 'from unittest.mock import MagicMock\n'), ((3526, 3573), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_release_group'}), '(return_value=self.test_release_group)\n', (3535, 3573), False, 'from unittest.mock import MagicMock\n'), ((3821, 3885), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': "['spotify:album:6IH6co1QUS7uXoyPDv0rIr']"}), "(return_value=['spotify:album:6IH6co1QUS7uXoyPDv0rIr'])\n", (3830, 3885), False, 'from unittest.mock import MagicMock\n'), ((3928, 3998), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_get_multiple_albums_response'}), '(return_value=self.test_spotify_get_multiple_albums_response)\n', (3937, 3998), False, 'from unittest.mock import MagicMock\n'), ((4378, 4425), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'ExternalServiceException'}), '(side_effect=ExternalServiceException)\n', (4387, 4425), False, 'from unittest.mock import MagicMock\n'), ((4863, 4920), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mb_exceptions.NoDataFoundException'}), '(side_effect=mb_exceptions.NoDataFoundException)\n', (4872, 4920), False, 'from unittest.mock import MagicMock\n'), ((5376, 5423), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_release_group'}), '(return_value=self.test_release_group)\n', (5385, 5423), False, 'from unittest.mock import MagicMock\n'), ((5453, 5500), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'ExternalServiceException'}), '(side_effect=ExternalServiceException)\n', (5462, 5500), False, 'from unittest.mock import MagicMock\n'), ((5797, 5854), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_search_response'}), '(return_value=self.test_spotify_search_response)\n', (5806, 5854), False, 'from unittest.mock import MagicMock\n'), ((5898, 5924), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '{}'}), '(return_value={})\n', (5907, 5924), False, 'from unittest.mock import MagicMock\n'), ((6280, 6350), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_get_multiple_albums_response'}), '(return_value=self.test_spotify_get_multiple_albums_response)\n', (6289, 6350), False, 'from unittest.mock import MagicMock\n'), ((7203, 7260), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mb_exceptions.NoDataFoundException'}), '(side_effect=mb_exceptions.NoDataFoundException)\n', (7212, 7260), False, 'from unittest.mock import MagicMock\n'), ((7728, 7775), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_release_group'}), '(return_value=self.test_release_group)\n', (7737, 7775), False, 'from unittest.mock import MagicMock\n'), ((8835, 8882), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'ExternalServiceException'}), '(side_effect=ExternalServiceException)\n', (8844, 8882), False, 'from unittest.mock import MagicMock\n'), ((9501, 9598), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_get_multiple_albums_response[self.test_spotify_id]'}), '(return_value=self.test_spotify_get_multiple_albums_response[self.\n test_spotify_id])\n', (9510, 9598), False, 'from unittest.mock import MagicMock\n'), ((10285, 10322), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False, None)'}), '(return_value=(False, None))\n', (10294, 10322), False, 'from unittest.mock import MagicMock\n'), ((10980, 11016), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(True, None)'}), '(return_value=(True, None))\n', (10989, 11016), False, 'from unittest.mock import MagicMock\n'), ((12378, 12435), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'mb_exceptions.NoDataFoundException'}), '(side_effect=mb_exceptions.NoDataFoundException)\n', (12387, 12435), False, 'from unittest.mock import MagicMock\n'), ((13019, 13066), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_release_group'}), '(return_value=self.test_release_group)\n', (13028, 13066), False, 'from unittest.mock import MagicMock\n'), ((13096, 13122), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '[]'}), '(return_value=[])\n', (13105, 13122), False, 'from unittest.mock import MagicMock\n'), ((13768, 13832), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': "['spotify:album:6IH6co1QUS7uXoyPDv0rIr']"}), "(return_value=['spotify:album:6IH6co1QUS7uXoyPDv0rIr'])\n", (13777, 13832), False, 'from unittest.mock import MagicMock\n'), ((13865, 13912), 'unittest.mock.MagicMock', 'MagicMock', ([], {'side_effect': 'ExternalServiceException'}), '(side_effect=ExternalServiceException)\n', (13874, 13912), False, 'from unittest.mock import MagicMock\n'), ((13955, 14025), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_get_multiple_albums_response'}), '(return_value=self.test_spotify_get_multiple_albums_response)\n', (13964, 14025), False, 'from unittest.mock import MagicMock\n'), ((14740, 14837), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': 'self.test_spotify_get_multiple_albums_response[self.test_spotify_id]'}), '(return_value=self.test_spotify_get_multiple_albums_response[self.\n test_spotify_id])\n', (14749, 14837), False, 'from unittest.mock import MagicMock\n'), ((15472, 15508), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(True, None)'}), '(return_value=(True, None))\n', (15481, 15508), False, 'from unittest.mock import MagicMock\n'), ((16117, 16154), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(False, None)'}), '(return_value=(False, None))\n', (16126, 16154), False, 'from unittest.mock import MagicMock\n'), ((1544, 1656), 'critiquebrainz.db.users.get_or_create', 'db_users.get_or_create', (['(1)', '"""aef06569-098f-4218-a577-b413944d9493"""'], {'new_user_data': "{'display_name': u'Tester'}"}), "(1, 'aef06569-098f-4218-a577-b413944d9493',\n new_user_data={'display_name': u'Tester'})\n", (1566, 1656), True, 'import critiquebrainz.db.users as db_users\n'), ((4738, 4763), 'flask.url_for', 'url_for', (['"""frontend.index"""'], {}), "('frontend.index')\n", (4745, 4763), False, 'from flask import url_for\n')]
|
# -*- coding: utf-8 -*-
# Time : 2021/7/25 13:59
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
import json
import os
from datetime import datetime
from bs4 import BeautifulSoup
from selenium.common.exceptions import (
StaleElementReferenceException,
WebDriverException,
)
from selenium.webdriver import Chrome
from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN
from src.BusinessLogicLayer.cluster.master import ActionMasterGeneral
class SSPanelParser(ActionMasterGeneral):
def __init__(self, url, silence=False, assault=True, anti_slider=True):
super(SSPanelParser, self).__init__(
url,
silence,
assault,
anti_slider=anti_slider,
)
self.obj_parser = {}
self.cache_db_name = "parser_cache"
self.cache_db_path = self.create_cache_db(database_dir=SERVER_DIR_DATABASE)
def create_cache_db(self, database_dir=None):
database_dir = "database" if database_dir is None else database_dir
if not os.path.exists(database_dir):
os.mkdir(database_dir)
cache_db = os.path.join(database_dir, self.cache_db_name)
if not os.path.exists(cache_db):
os.mkdir(cache_db)
return cache_db
def capture_cache(self, signs, flow):
output_path = os.path.join(self.cache_db_path, signs)
with open(output_path, "w", encoding="utf8") as f:
f.write(flow)
def parse(self, **kwargs):
"""
:return:
"""
api: Chrome = kwargs.get("api")
self.obj_parser.update({"parse_url": self.register_url})
# ----------------------------------------
# 解析可用流量和可用时长
# 优先调用,等待流体动画加载完成[耗时任务]
# 以保证后续解析无需等待
# ----------------------------------------
fluid = set()
fluid_density = []
i = 0
while True:
try:
i += 1
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[:2]
card_body = [tag.text.strip() for tag in card_body]
fluid.update(card_body)
fluid_density.append(len(fluid))
# 流体释放
if len(fluid_density) < 10 or len(fluid) < 3:
continue
# 流体相对均衡
if max(fluid_density[:10]) == min(fluid_density[:10]):
self.obj_parser.update(
{"time": card_body[0], "flow": card_body[-1]}
)
break
except StaleElementReferenceException:
pass
# 存储cookie
with open("123.json", "w", encoding="utf8") as f:
f.write(json.dumps(api.get_cookies()))
# 读取cookie
# cookie_json = " ".join([f"{i['name']}={i['value']};" for i in json.loads(f.read())])
# ----------------------------------------
# 解析站点名称
# ----------------------------------------
try:
parse_name = api.find_element_by_xpath(
"//aside//div[@class='sidebar-brand']"
).text.strip()
self.obj_parser.update({"parse_name": parse_name})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site name resolution failed -- {self.register_url}"
)
# ----------------------------------------
# 解析站点公告
# ----------------------------------------
reference_links = {}
try:
card_body = api.find_elements_by_xpath("//div[@class='card-body']")[4]
self.obj_parser.update({"desc": card_body.text.strip()})
related_href = card_body.find_elements_by_tag_name("a")
for tag in related_href:
href = tag.get_attribute("href")
if href:
href = href.strip()
if "https" not in href:
href = f"{self.register_url}{href}"
href_desc = tag.text.strip() if tag.text else href
reference_links.update({href: href_desc})
self.obj_parser.update({"reference_links": reference_links})
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site announcement parsing error -- {self.register_url}"
)
# ----------------------------------------
# 解析[链接导入]
# ----------------------------------------
subscribes = {}
support = []
try:
# 清洗订阅链接
soup = BeautifulSoup(api.page_source, "html.parser")
for i in soup.find_all("a"):
if i.get("data-clipboard-text"):
subscribes.update({i.get("data-clipboard-text"): i.text.strip()})
# 识别支持的订阅类型
buttons = api.find_elements_by_xpath("//div[@class='card'][2]//a")
for tag in buttons:
support_ = tag.get_attribute("class")
if support_:
support_ = [
i
for i in [i for i in support_.split() if i.startswith("btn-")]
if i
not in [
"btn-icon",
"btn-primary",
"btn-lg",
"btn-round",
"btn-progress",
]
]
if len(support_) == 1:
class_name = support_[0].replace("btn-", "")
support.append(class_name)
# 残差补全
for tag in subscribes.values():
if "surge" in tag.lower():
support.append("surge")
if "ssr" in tag.lower():
support.append("ssr")
self.obj_parser.update(
{"subscribes": subscribes, "support": list(set(support))}
)
except WebDriverException:
logger.error(
f"<SSPanelParserError> Site subscription resolution failed -- {self.register_url}"
)
self.obj_parser.update(
{
"email": self.email,
"password": self.password,
"recently_login": datetime.now(tz=TIME_ZONE_CN),
}
)
return self.obj_parser
def parse_by_login(self, **kwargs) -> dict:
return self.seep("login", self.parse, **kwargs)
def parse_by_register(self, **kwargs):
return self.seep("register", self.parse, **kwargs)
def refresh_cookie(self, **kwargs):
def get_cookie():
cookies = kwargs.get("api")
return json.dumps(cookies.get_cookies()) if cookies else {}
return self.seep("login", get_cookie, **kwargs)
def seep(self, method, business, **kwargs):
# 获取任务设置
api = self.set_spider_option()
# 执行核心业务逻辑
try:
self.get_html_handle(api=api, url=self.register_url, wait_seconds=45)
if method == "login":
self.sign_in(api, **kwargs)
elif method == "register":
self.sign_up(api)
self.wait(api, 40, "//div[@class='card-body']")
kwargs.setdefault("api", api)
return business(**kwargs)
finally:
api.quit()
|
[
"os.mkdir",
"os.path.exists",
"datetime.datetime.now",
"bs4.BeautifulSoup",
"src.BusinessCentralLayer.setting.logger.error",
"os.path.join"
] |
[((1175, 1221), 'os.path.join', 'os.path.join', (['database_dir', 'self.cache_db_name'], {}), '(database_dir, self.cache_db_name)\n', (1187, 1221), False, 'import os\n'), ((1383, 1422), 'os.path.join', 'os.path.join', (['self.cache_db_path', 'signs'], {}), '(self.cache_db_path, signs)\n', (1395, 1422), False, 'import os\n'), ((1090, 1118), 'os.path.exists', 'os.path.exists', (['database_dir'], {}), '(database_dir)\n', (1104, 1118), False, 'import os\n'), ((1132, 1154), 'os.mkdir', 'os.mkdir', (['database_dir'], {}), '(database_dir)\n', (1140, 1154), False, 'import os\n'), ((1237, 1261), 'os.path.exists', 'os.path.exists', (['cache_db'], {}), '(cache_db)\n', (1251, 1261), False, 'import os\n'), ((1275, 1293), 'os.mkdir', 'os.mkdir', (['cache_db'], {}), '(cache_db)\n', (1283, 1293), False, 'import os\n'), ((4646, 4691), 'bs4.BeautifulSoup', 'BeautifulSoup', (['api.page_source', '"""html.parser"""'], {}), "(api.page_source, 'html.parser')\n", (4659, 4691), False, 'from bs4 import BeautifulSoup\n'), ((3294, 3387), 'src.BusinessCentralLayer.setting.logger.error', 'logger.error', (['f"""<SSPanelParserError> Site name resolution failed -- {self.register_url}"""'], {}), "(\n f'<SSPanelParserError> Site name resolution failed -- {self.register_url}')\n", (3306, 3387), False, 'from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN\n'), ((4303, 4405), 'src.BusinessCentralLayer.setting.logger.error', 'logger.error', (['f"""<SSPanelParserError> Site announcement parsing error -- {self.register_url}"""'], {}), "(\n f'<SSPanelParserError> Site announcement parsing error -- {self.register_url}'\n )\n", (4315, 4405), False, 'from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN\n'), ((6115, 6221), 'src.BusinessCentralLayer.setting.logger.error', 'logger.error', (['f"""<SSPanelParserError> Site subscription resolution failed -- {self.register_url}"""'], {}), "(\n f'<SSPanelParserError> Site subscription resolution failed -- {self.register_url}'\n )\n", (6127, 6221), False, 'from src.BusinessCentralLayer.setting import logger, SERVER_DIR_DATABASE, TIME_ZONE_CN\n'), ((6403, 6432), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'TIME_ZONE_CN'}), '(tz=TIME_ZONE_CN)\n', (6415, 6432), False, 'from datetime import datetime\n')]
|
import csv
from collections import defaultdict
import numpy as np
from PySAM.ResourceTools import SAM_CSV_to_solar_data
from hybrid.keys import get_developer_nrel_gov_key
from hybrid.log import hybrid_logger as logger
from hybrid.resource.resource import *
class SolarResource(Resource):
"""
Class to manage Solar Resource data
"""
def __init__(self, lat, lon, year, path_resource="", filepath="", **kwargs):
"""
:param lat: float
:param lon: float
:param year: int
:param path_resource: directory where to save downloaded files
:param filepath: file path of resource file to load
:param kwargs:
"""
super().__init__(lat, lon, year)
if os.path.isdir(path_resource):
self.path_resource = path_resource
self.solar_attributes = 'ghi,dhi,dni,wind_speed,air_temperature,solar_zenith_angle'
self.path_resource = os.path.join(self.path_resource, 'solar')
# Force override any internal definitions if passed in
self.__dict__.update(kwargs)
# resource_files files
if filepath == "":
filepath = os.path.join(self.path_resource,
str(lat) + "_" + str(lon) + "_psmv3_" + str(self.interval) + "_" + str(
year) + ".csv")
self.filename = filepath
self.check_download_dir()
if not os.path.isfile(self.filename):
self.download_resource()
self.format_data()
logger.info("SolarResource: {}".format(self.filename))
def download_resource(self):
url = 'https://developer.nrel.gov/api/nsrdb/v2/solar/psm3-download.csv?wkt=POINT({lon}+{lat})&names={year}&leap_day={leap}&interval={interval}&utc={utc}&full_name={name}&email={email}&affiliation={affiliation}&mailing_list={mailing_list}&reason={reason}&api_key={api}&attributes={attr}'.format(
year=self.year, lat=self.latitude, lon=self.longitude, leap=self.leap_year, interval=self.interval,
utc=self.utc, name=self.name, email=self.email,
mailing_list=self.mailing_list, affiliation=self.affiliation, reason=self.reason, api=get_developer_nrel_gov_key(),
attr=self.solar_attributes)
success = self.call_api(url, filename=self.filename)
return success
def format_data(self):
"""
Format as 'solar_resource_data' dictionary for use in PySAM.
"""
if not os.path.isfile(self.filename):
raise FileNotFoundError(self.filename + " does not exist. Try `download_resource` first.")
self.data = self.filename
@Resource.data.setter
def data(self, data_dict):
"""
Sets the solar resource data
For hourly resource, year, month, day, hour, and minute will be auto-filled if not provided.
:key tz: time zone, not UTC
:key elev: elevation in meters
:key year: array
:key month: array
:key day: array
:key hour: array
:key minute: array
:key dn: array, direct normal irradiance
:key df: array, direct horizontal irradiance
:key wspd: array, wind speed [m/s]
:key tdry: array, dry bulb temp [C]
"""
self._data = SAM_CSV_to_solar_data(data_dict)
def roll_timezone(self, roll_hours, timezone):
"""
:param roll_hours:
:param timezone:
:return:
"""
rollable_keys = ['dn', 'df', 'gh', 'wspd', 'tdry']
for key in rollable_keys:
if any(k == key for k in rollable_keys):
roll_range = range(0, -roll_hours + 1)
weather_array = np.array(self._data[key])
weather_array_rolled = np.delete(weather_array, roll_range)
weather_array_rolled = np.pad(weather_array_rolled, (0, -roll_hours + 1), 'constant')
self._data[key] = weather_array_rolled.tolist()
self._data['tz'] = timezone
logger.info('Rolled solar data by {} hours for timezone {}'.format(roll_hours, timezone))
|
[
"numpy.pad",
"hybrid.keys.get_developer_nrel_gov_key",
"numpy.array",
"PySAM.ResourceTools.SAM_CSV_to_solar_data",
"numpy.delete"
] |
[((3314, 3346), 'PySAM.ResourceTools.SAM_CSV_to_solar_data', 'SAM_CSV_to_solar_data', (['data_dict'], {}), '(data_dict)\n', (3335, 3346), False, 'from PySAM.ResourceTools import SAM_CSV_to_solar_data\n'), ((2219, 2247), 'hybrid.keys.get_developer_nrel_gov_key', 'get_developer_nrel_gov_key', ([], {}), '()\n', (2245, 2247), False, 'from hybrid.keys import get_developer_nrel_gov_key\n'), ((3727, 3752), 'numpy.array', 'np.array', (['self._data[key]'], {}), '(self._data[key])\n', (3735, 3752), True, 'import numpy as np\n'), ((3793, 3829), 'numpy.delete', 'np.delete', (['weather_array', 'roll_range'], {}), '(weather_array, roll_range)\n', (3802, 3829), True, 'import numpy as np\n'), ((3869, 3931), 'numpy.pad', 'np.pad', (['weather_array_rolled', '(0, -roll_hours + 1)', '"""constant"""'], {}), "(weather_array_rolled, (0, -roll_hours + 1), 'constant')\n", (3875, 3931), True, 'import numpy as np\n')]
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
from spack.version import ver
def get_best_target(microarch, compiler_name, compiler_version):
for compiler_entry in microarch.compilers[compiler_name]:
if compiler_version.satisfies(ver(compiler_entry["versions"])):
return compiler_entry.get("name", microarch.name)
raise InstallError("Could not find a target architecture")
class Julia(MakefilePackage):
"""The Julia Language: A fresh approach to technical computing"""
homepage = "https://julialang.org"
url = "https://github.com/JuliaLang/julia/releases/download/v1.7.0/julia-1.7.0.tar.gz"
git = "https://github.com/JuliaLang/julia.git"
maintainers = ['glennpj', 'vchuravy', 'haampie']
version('master', branch='master')
version('1.7.2', sha256='0847943dd65001f3322b00c7dc4e12f56e70e98c6b798ccbd4f02d27ce161fef')
version('1.7.1', sha256='17d298e50e4e3dd897246ccebd9f40ce5b89077fa36217860efaec4576aa718e')
version('1.7.0', sha256='8e870dbef71bc72469933317a1a18214fd1b4b12f1080784af7b2c56177efcb4')
version('1.6.5', sha256='b70ae299ff6b63a9e9cbf697147a48a31b4639476d1947cb52e4201e444f23cb')
version('1.6.4', sha256='a4aa921030250f58015201e28204bff604a007defc5a379a608723e6bb1808d4')
# We've deprecated these versions, so that we can remove them in Spack 0.18
# They are still available in Spack 0.17. Julia 0.17.0 is the first version that
# can be built enitrely from Spack packages, without a network connection during
# the build.
for v in [
'1.6.3', '1.6.2', '1.6.1', '1.6.0', '1.5.4', '1.5.3', '1.5.2', '1.5.1', '1.5.0',
'1.4.2', '1.4.1', '1.4.0', '1.3.1', '1.2.0', '1.1.1', '1.0.0', '0.6.2', '0.5.2',
'0.5.1', '0.5.0', '0.4.7', '0.4.6', '0.4.5', '0.4.3'
]:
version(v, deprecated=True)
variant('precompile', default=True, description='Improve julia startup time')
variant('openlibm', default=True, description='Use openlibm instead of libm')
# Note, we just use link_llvm_dylib so that we not only get a libLLVM,
# but also so that llvm-config --libfiles gives only the dylib. Without
# it it also gives static libraries, and breaks Julia's build.
depends_on('llvm targets=amdgpu,bpf,nvptx,webassembly version_suffix=jl +link_llvm_dylib ~internal_unwind')
depends_on('libuv')
with when('@1.7.0:1.7'):
# libssh2.so.1, libpcre2-8.so.0, mbedtls.so.13, mbedcrypto.so.5, mbedx509.so.1
# openlibm.so.3, (todo: complete this list for upperbounds...)
depends_on('llvm@12.0.1')
depends_on('libuv@1.42.0')
depends_on('mbedtls@2.24.0:2.24')
depends_on('openlibm@0.7.0:0.7', when='+openlibm')
depends_on('libblastrampoline@3.0.0:3')
with when('@1.6.0:1.6'):
# libssh2.so.1, libpcre2-8.so.0, mbedtls.so.13, mbedcrypto.so.5, mbedx509.so.1
# openlibm.so.3, (todo: complete this list for upperbounds...)
depends_on('llvm@11.0.1')
depends_on('libuv@1.39.0')
depends_on('mbedtls@2.24.0:2.24')
depends_on('openlibm@0.7.0:0.7', when='+openlibm')
# Patches for llvm
depends_on('llvm', patches='llvm7-symver-jlprefix.patch')
depends_on('llvm', when='^llvm@11.0.1', patches=patch(
'https://raw.githubusercontent.com/spack/patches/0b543955683a903d711a3e95ff29a4ce3951ca13/julia/llvm-11.0.1-julia-1.6.patch',
sha256='8866ee0595272b826b72d173301a2e625855e80680a84af837f1ed6db4657f42'))
depends_on('llvm', when='^llvm@12.0.1', patches=patch(
'https://github.com/JuliaLang/llvm-project/compare/fed41342a82f5a3a9201819a82bf7a48313e296b...980d2f60a8524c5546397db9e8bbb7d6ea56c1b7.patch',
sha256='10cb42f80c2eaad3e9c87cb818b6676f1be26737bdf972c77392d71707386aa4'))
depends_on('llvm', when='^llvm@13.0.0', patches=patch(
'https://github.com/JuliaLang/llvm-project/compare/d7b669b3a30345cfcdb2fde2af6f48aa4b94845d...6ced34d2b63487a88184c3c468ceda166d10abba.patch',
sha256='92f022176ab85ded517a9b7aa04df47e19a5def88f291e0c31100128823166c1'))
# Patches for libuv
depends_on('libuv', when='^libuv@1.39.0', patches=patch(
'https://raw.githubusercontent.com/spack/patches/b59ca193423c4c388254f528afabb906b5373162/julia/libuv-1.39.0.patch',
sha256='f7c1e7341e89dc35dfd85435ba35833beaef575b997c3f978c27d0dbf805149b'))
depends_on('libuv', when='^libuv@1.42.0', patches=patch(
'https://raw.githubusercontent.com/spack/patches/89b6d14eb1f3c3d458a06f1e06f7dda3ab67bd38/julia/libuv-1.42.0.patch',
sha256='d9252fbe67ac8f15e15653f0f6b00dffa07ae1a42f013d4329d17d8b492b7cdb'))
# patchelf 0.13 is required because the rpath patch uses --add-rpath
depends_on('patchelf@0.13:', type='build')
depends_on('perl', type='build')
depends_on('libwhich', type='build')
depends_on('blas') # note: for now openblas is fixed...
depends_on('curl tls=mbedtls +nghttp2 +libssh2')
depends_on('dsfmt@2.2.4:') # apparently 2.2.3->2.2.4 breaks API
depends_on('gmp')
depends_on('lapack') # note: for now openblas is fixed...
depends_on('libblastrampoline', when='@1.7.0:')
depends_on('libgit2')
depends_on('libssh2 crypto=mbedtls')
depends_on('mbedtls libs=shared')
depends_on('mpfr')
depends_on('nghttp2')
depends_on('openblas +ilp64 symbol_suffix=64_')
depends_on('openlibm', when='+openlibm')
depends_on('p7zip')
depends_on('pcre2')
depends_on('suite-sparse +pic')
depends_on('unwind')
depends_on('utf8proc')
depends_on('zlib +shared +pic +optimize')
# Patches for julia
patch('julia-1.6-system-libwhich-and-p7zip-symlink.patch', when='@1.6.0:1.6')
patch('use-add-rpath.patch')
# Fix gfortran abi detection https://github.com/JuliaLang/julia/pull/44026
patch('fix-gfortran.patch', when='@1.7.0:1.7.1')
def patch(self):
# The system-libwhich-libblastrampoline.patch causes a rebuild of docs as it
# touches the main Makefile, so we reset the a/m-time to doc/_build's.
f = os.path.join("doc", "_build", "html", "en", "index.html")
if os.path.exists(f):
time = (os.path.getatime(f), os.path.getmtime(f))
os.utime(os.path.join("base", "Makefile"), time)
def setup_build_environment(self, env):
# this is a bit ridiculous, but we are setting runtime linker paths to
# dependencies so that libwhich can locate them.
if (
self.spec.satisfies('platform=linux') or
self.spec.satisfies('platform=cray')
):
linker_var = 'LD_LIBRARY_PATH'
elif self.spec.satisfies('platform=darwin'):
linker_var = 'DYLD_FALLBACK_LIBRARY_PATH'
else:
return
pkgs = [
'curl', 'dsfmt', 'gmp', 'libgit2', 'libssh2', 'libunwind', 'mbedtls',
'mpfr', 'nghttp2', 'openblas', 'openlibm', 'pcre2', 'suite-sparse',
'utf8proc', 'zlib'
]
if self.spec.satisfies('@1.7.0:'):
pkgs.append('libblastrampoline')
for pkg in pkgs:
for dir in self.spec[pkg].libs.directories:
env.prepend_path(linker_var, dir)
def edit(self, spec, prefix):
# TODO: use a search query for blas / lapack?
libblas = os.path.splitext(spec['blas'].libs.basenames[0])[0]
liblapack = os.path.splitext(spec['lapack'].libs.basenames[0])[0]
# Host compiler target name
march = get_best_target(spec.target, spec.compiler.name, spec.compiler.version)
# LLVM compatible name for the JIT
julia_cpu_target = get_best_target(spec.target, 'clang', spec['llvm'].version)
options = [
'prefix:={0}'.format(prefix),
'MARCH:={0}'.format(march),
'JULIA_CPU_TARGET:={0}'.format(julia_cpu_target),
'USE_BINARYBUILDER:=0',
'VERBOSE:=1',
# Spack managed dependencies
'USE_SYSTEM_BLAS:=1',
'USE_SYSTEM_CSL:=1',
'USE_SYSTEM_CURL:=1',
'USE_SYSTEM_DSFMT:=1',
'USE_SYSTEM_GMP:=1',
'USE_SYSTEM_LAPACK:=1',
'USE_SYSTEM_LIBBLASTRAMPOLINE:=1',
'USE_SYSTEM_LIBGIT2:=1',
'USE_SYSTEM_LIBSSH2:=1',
'USE_SYSTEM_LIBSUITESPARSE:=1', # @1.7:
'USE_SYSTEM_SUITESPARSE:=1', # @:1.6
'USE_SYSTEM_LIBUNWIND:=1',
'USE_SYSTEM_LIBUV:=1',
'USE_SYSTEM_LIBWHICH:=1',
'USE_SYSTEM_LLVM:=1',
'USE_SYSTEM_MBEDTLS:=1',
'USE_SYSTEM_MPFR:=1',
'USE_SYSTEM_P7ZIP:=1',
'USE_SYSTEM_PATCHELF:=1',
'USE_SYSTEM_PCRE:=1',
'USE_SYSTEM_UTF8PROC:=1',
'USE_SYSTEM_ZLIB:=1',
# todo: ilp depends on arch
'USE_BLAS64:=1',
'LIBBLASNAME:={0}'.format(libblas),
'LIBLAPACKNAME:={0}'.format(liblapack),
'override LIBUV:={0}'.format(spec['libuv'].libs.libraries[0]),
'override LIBUV_INC:={0}'.format(spec['libuv'].headers.directories[0]),
'override USE_LLVM_SHLIB:=1',
# make rebuilds a bit faster for now, not sure if this should be kept
'JULIA_PRECOMPILE:={0}'.format(
'1' if spec.variants['precompile'].value else '0'),
]
# libm or openlibm?
if spec.variants['openlibm'].value:
options.append('USE_SYSTEM_LIBM=0')
options.append('USE_SYSTEM_OPENLIBM=1')
else:
options.append('USE_SYSTEM_LIBM=1')
options.append('USE_SYSTEM_OPENLIBM=0')
with open('Make.user', 'w') as f:
f.write('\n'.join(options) + '\n')
|
[
"os.path.exists",
"spack.version.ver",
"os.path.splitext",
"os.path.getmtime",
"os.path.getatime",
"os.path.join"
] |
[((6242, 6299), 'os.path.join', 'os.path.join', (['"""doc"""', '"""_build"""', '"""html"""', '"""en"""', '"""index.html"""'], {}), "('doc', '_build', 'html', 'en', 'index.html')\n", (6254, 6299), False, 'import os\n'), ((6311, 6328), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (6325, 6328), False, 'import os\n'), ((426, 457), 'spack.version.ver', 'ver', (["compiler_entry['versions']"], {}), "(compiler_entry['versions'])\n", (429, 457), False, 'from spack.version import ver\n'), ((7489, 7537), 'os.path.splitext', 'os.path.splitext', (["spec['blas'].libs.basenames[0]"], {}), "(spec['blas'].libs.basenames[0])\n", (7505, 7537), False, 'import os\n'), ((7561, 7611), 'os.path.splitext', 'os.path.splitext', (["spec['lapack'].libs.basenames[0]"], {}), "(spec['lapack'].libs.basenames[0])\n", (7577, 7611), False, 'import os\n'), ((6350, 6369), 'os.path.getatime', 'os.path.getatime', (['f'], {}), '(f)\n', (6366, 6369), False, 'import os\n'), ((6371, 6390), 'os.path.getmtime', 'os.path.getmtime', (['f'], {}), '(f)\n', (6387, 6390), False, 'import os\n'), ((6413, 6445), 'os.path.join', 'os.path.join', (['"""base"""', '"""Makefile"""'], {}), "('base', 'Makefile')\n", (6425, 6445), False, 'import os\n')]
|
"""
@author: lxy
@email: <EMAIL>
@date: 2021/12/9
@description: 这里维护各种模型的复现结果。用户进行实验后,可以拉取实验结果,一键生成对应的 latex 表格,就不用手动抄写到论文中了
https://pandas.pydata.org/docs/reference/api/pandas.io.formats.style.Styler.to_latex.html
"""
import os
from pathlib import Path
from typing import List, Union, Dict
import pandas as pd
from toolbox.exp.OutputSchema import OutputPathSchema
def load_excel(filename: str, path: str = os.getcwd(), **kwargs) -> pd.DataFrame:
"""
Load the .xlsx-file via pandas into pd.DataFrame format.
args:
filename (str): name of .xlsx-file
path (str, optional): absolute path to file
**kwargs: see pandas.read_excel documentation for additional arguments
returns:
pd.DataFrame: dataframe with data
"""
if not os.path.isfile(os.path.join(path, filename)):
raise OSError(f"File '{os.path.join(path, filename)}' not found.")
dataframe = pd.read_excel(os.path.join(path, filename), **kwargs)
return dataframe
def generate_header(orientation: list, number_columns: int, complete_document: bool, disable_debug: bool) -> str:
"""
Generate table header.
args:
orientation (list): orientation of individual columns (left | center | right).
If only one format is specified it will be applied to all columns.
number_columns (int): number of columns
returns:
str: table header
"""
ORIENTATION = {'left': 'l', 'center': 'c', 'right': 'r'}
column_orientation = ''
if number_columns != len(orientation):
for index in range(number_columns):
column_orientation += ORIENTATION[orientation[0]]
else:
for index, column in enumerate(orientation):
column_orientation += ORIENTATION[column.lower()]
if not disable_debug and not complete_document:
return '''% Include these packages\n% Figure Orientation\n% \\usepackage{float}\n% Booktabs for nice tables\n% \\usepackage{booktabs}\n% color for row coloring\n% \\usepackage{xcolor, colortbl}\n% \\definecolor{gray}{rgb}{0.85, 0.85, 0.85}\n\n\\begin{table}[H]\n\\centering\n\\begin{tabular}{''' + column_orientation + '''}\n'''
elif complete_document:
return '''\\documentclass[a4paper, 12pt]{article}\n\n\\usepackage[utf8]{inputenc}\n\\usepackage[T1]{fontenc}\n\\usepackage[english]{babel}\n\\usepackage[a4paper, left=2.5cm, right=2.5cm, top=2.5cm, bottom=3cm]{geometry}\n\\usepackage{float}\n\\usepackage{booktabs}\n\\usepackage{xcolor, colortbl}\n\\definecolor{gray}{rgb}{0.85, 0.85, 0.85}\n\n\\begin{document}\n\n\\begin{table}[H]\n\\centering\n\\begin{tabular}{''' + column_orientation + '''}\n'''
else:
return '''\\begin{table}[H]\n\\centering\n\\begin{tabular}{''' + column_orientation + '''}\n'''
def generate_body(dataframe: pd.DataFrame, striped: bool = True, is_numeric: bool = True, decimal_sep: str = '.') -> str:
"""
Generate table body.
args:
dataframe (pd.DataFrame):
striped (bool, optional): True for striped row color
is_numeric (bool, optional): columns contain numeric values, additional math mode signs will be added
decimal_sep (str, optional): convert decimal separator (decimal point as default)
returns:
str: table body
"""
body = '''\\toprule\n'''
column_names = dataframe.columns
for index, column in enumerate(column_names):
body += column
if index == (len(column_names) - 1):
body += '\\\\ \n\\midrule\n'
else:
body += ' & '
for row_index, row in dataframe.iterrows():
if isinstance(row_index, int) and (row_index % 2) == 0:
body += '\\rowcolor{gray} '
for index, item in enumerate(row):
if is_numeric:
body += "$%s$" % str(item).replace('.', decimal_sep)
else:
body += str(item).replace('.', decimal_sep)
if index == (len(row) - 1):
body += '\\\\\n'
else:
body += ' & '
return body + '\\bottomrule\n'
def generate_footer(caption: str = '', complete_document: bool = False) -> str:
"""
Generate table footer.
args:
caption (str, optional): table caption, blank if not specified
returns:
str: table footer
"""
if not complete_document:
return '''\\end{tabular}\n\\caption{''' + caption + '''}\n\\end{table}'''
else:
return '''\\end{tabular}\n\\caption{''' + caption + '''}\n\\end{table}\n\n\\end{document}'''
def save_dataframe_to_latex_by_path(dataframe: pd.DataFrame,
path: Path,
orientation: list = ['left'],
caption: str = 'Table',
striped: bool = True,
is_numeric: bool = False,
decimal_sep: str = '.',
overwrite: bool = True,
complete_document: bool = False,
disable_debug: bool = False) -> None:
save_dataframe_to_latex(dataframe,
str(path.name),
str(path.parent.absolute()),
orientation, caption, striped, is_numeric, decimal_sep, overwrite, complete_document, disable_debug)
def save_dataframe_to_latex(dataframe: pd.DataFrame,
filename: str,
path: str = os.getcwd(),
orientation: list = ['left'],
caption: str = 'Table',
striped: bool = True,
is_numeric: bool = False,
decimal_sep: str = '.',
overwrite: bool = True,
complete_document: bool = False,
disable_debug: bool = False) -> None:
"""
Parse pandas dataframe to LaTeX table format.
args:
dataframe (pd.DataFrame): dataframe with data
filename (str): filename of output file
path (str, optional): path to output file
orientation (list, optional): orientation of individial columns
caption (str, optoinal): table caption
striped (bool, optional): True if rows with striped color
is_numeric (bool, optional): True if columns contain numeric values
decimal_sep (str, optional): specify decimal separator
overwrite (bool, optional): overwrite output file if already exists
complete_document (bool, optional): if True outputs a minimalist LaTeX document
disable_debug (bool, optional): if True package-info won't be written into the output file
"""
table = generate_header(orientation, len(dataframe.columns), complete_document=complete_document, disable_debug=disable_debug) + \
generate_body(dataframe, striped=striped, is_numeric=is_numeric, decimal_sep=decimal_sep) + \
generate_footer(caption=caption, complete_document=complete_document)
if os.path.isfile(os.path.join(path, filename)) and not overwrite:
raise IOError(f'File {os.path.join(path, filename)} already exists. Specify a different filename or set overwrite=True.')
with open(os.path.join(path, filename), 'w') as file:
file.write(table)
def result_dict_to_dataframe(result_dict: Dict[str, List[Union[str, int, float]]]) -> pd.DataFrame:
header_key = list(result_dict.keys())[0]
header = result_dict[header_key]
new_dict = {}
for key, value in result_dict.items():
if key == header_key:
continue
new_dict[key] = value
return pd.DataFrame.from_dict(new_dict, orient='index', columns=header)
class LaTeXStoreSchema:
"""保存结果为LaTeX"""
def __init__(self, path: OutputPathSchema, scope: str, best_latex_filename="best.tex"):
self.path = path
self.scope = scope
self.best_latex_path: Path = path.latex_path(self.scope + best_latex_filename)
self.last_best_score = 0
def save_best_result(self, result_dict: Dict[str, List[Union[str, int, float]]]):
df = result_dict_to_dataframe(result_dict)
self.save_best(df)
def save_result_by_score(self, result_dict: Dict[str, List[Union[str, int, float]]], score):
df = result_dict_to_dataframe(result_dict)
self.save_by_score(df, score)
def save_best(self, df: pd.DataFrame):
save_dataframe_to_latex_by_path(df, self.best_latex_path)
def save_by_score(self, df: pd.DataFrame, score: float):
save_dataframe_to_latex_by_path(df, self.latex_path_with_score(score), caption=f"Table-score-{score}")
def latex_path_with_score(self, score: float):
return self.path.latex_path(self.scope + "-score-" + str(score) + ".tex")
class EvaluateLaTeXStoreSchema:
"""保存结果为LaTeX"""
def __init__(self, path: OutputPathSchema, best_latex_filename="best.tex"):
self.valid_latex_store = LaTeXStoreSchema(path, "valid", best_latex_filename)
self.test_latex_store = LaTeXStoreSchema(path, "test", best_latex_filename)
def save_best_valid_result(self, result_dict: Dict[str, List[Union[str, int, float]]]):
self.valid_latex_store.save_best_result(result_dict)
def save_valid_result_by_score(self, result_dict: Dict[str, List[Union[str, int, float]]], score):
self.valid_latex_store.save_result_by_score(result_dict, score)
def save_best_test_result(self, result_dict: Dict[str, List[Union[str, int, float]]]):
self.test_latex_store.save_best_result(result_dict)
def save_test_result_by_score(self, result_dict: Dict[str, List[Union[str, int, float]]], score):
self.test_latex_store.save_result_by_score(result_dict, score)
|
[
"os.getcwd",
"os.path.join",
"pandas.DataFrame.from_dict"
] |
[((411, 422), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (420, 422), False, 'import os\n'), ((5605, 5616), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5614, 5616), False, 'import os\n'), ((7827, 7891), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['new_dict'], {'orient': '"""index"""', 'columns': 'header'}), "(new_dict, orient='index', columns=header)\n", (7849, 7891), True, 'import pandas as pd\n'), ((933, 961), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (945, 961), False, 'import os\n'), ((796, 824), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (808, 824), False, 'import os\n'), ((7226, 7254), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7238, 7254), False, 'import os\n'), ((7420, 7448), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7432, 7448), False, 'import os\n'), ((858, 886), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (870, 886), False, 'import os\n'), ((7305, 7333), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7317, 7333), False, 'import os\n')]
|
#Copyright (c) 2018-2020 Analog Devices, Inc. All Rights Reserved.
#This software is proprietary to Analog Devices, Inc. and its licensors.
#
#Author: <NAME>
#requires pythonnet to be installed (pip install pythonnet)
import clr
from time import sleep
import os
#get path to resources folder and dll
topDir = os.path.join(os.getcwd(), '..\..\..')
#Load FX3 API Wrapper DLL
clr.AddReference(topDir + '\\resources\\FX3ApiWrapper.dll')
#Allows wrapper to be treated like standard python library
from FX3ApiWrapper import *
from System import Array
from System import String
#Create FX3 Wrapper and load ADIS1650x regmap
Dut = Wrapper(topDir + '\\resources\\', topDir + '\\src_wrapper\\regmaps\\ADIS1650x_Regmap.csv',0)
print(Dut.FX3.GetFirmwareVersion)
Dut.UserLEDBlink(2.0)
#Create reg list
regs_py = ['DIAG_STAT','DATA_CNTR','X_GYRO_OUT','Y_GYRO_OUT','Z_GYRO_OUT','X_ACCL_OUT','Y_ACCL_OUT','Z_ACCL_OUT']
regs = Array[String](regs_py)
data = []
while True:
data = Dut.ReadSigned(regs)
for i in (data):
print(i, end =" ")
print()
sleep(0.5)
|
[
"os.getcwd",
"clr.AddReference",
"time.sleep"
] |
[((377, 436), 'clr.AddReference', 'clr.AddReference', (["(topDir + '\\\\resources\\\\FX3ApiWrapper.dll')"], {}), "(topDir + '\\\\resources\\\\FX3ApiWrapper.dll')\n", (393, 436), False, 'import clr\n'), ((325, 336), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (334, 336), False, 'import os\n'), ((1063, 1073), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (1068, 1073), False, 'from time import sleep\n')]
|
import time
from torba.server.block_processor import BlockProcessor
from lbry.schema.claim import Claim
from lbry.wallet.server.db.writer import SQLDB
class Timer:
def __init__(self, name):
self.name = name
self.total = 0
self.count = 0
self.sub_timers = {}
self._last_start = None
def add_timer(self, name):
if name not in self.sub_timers:
self.sub_timers[name] = Timer(name)
return self.sub_timers[name]
def run(self, func, *args, forward_timer=False, timer_name=None, **kwargs):
t = self.add_timer(timer_name or func.__name__)
t.start()
try:
if forward_timer:
return func(*args, **kwargs, timer=t)
else:
return func(*args, **kwargs)
finally:
t.stop()
def start(self):
self._last_start = time.time()
return self
def stop(self):
self.total += (time.time() - self._last_start)
self.count += 1
self._last_start = None
return self
def show(self, depth=0, height=None):
if depth == 0:
print('='*100)
if height is not None:
print(f'STATISTICS AT HEIGHT {height}')
print('='*100)
else:
print(
f"{' '*depth} {self.total/60:4.2f}mins {self.name}"
# f"{self.total/self.count:.5f}sec/call, "
)
for sub_timer in self.sub_timers.values():
sub_timer.show(depth+1)
if depth == 0:
print('='*100)
class LBRYBlockProcessor(BlockProcessor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.env.coin.NET == "regtest":
self.prefetcher.polling_delay = 0.5
self.should_validate_signatures = self.env.boolean('VALIDATE_CLAIM_SIGNATURES', False)
self.logger.info(f"LbryumX Block Processor - Validating signatures: {self.should_validate_signatures}")
self.sql: SQLDB = self.db.sql
self.timer = Timer('BlockProcessor')
self.search_cache = {}
def advance_blocks(self, blocks):
self.sql.begin()
try:
self.timer.run(super().advance_blocks, blocks)
except:
self.logger.exception(f'Error while advancing transaction in new block.')
raise
finally:
self.sql.commit()
if self.db.first_sync and self.height == self.daemon.cached_height():
self.timer.run(self.sql.db.executescript, self.sql.SEARCH_INDEXES, timer_name='executing SEARCH_INDEXES')
if self.env.individual_tag_indexes:
self.timer.run(self.sql.db.executescript, self.sql.TAG_INDEXES, timer_name='executing TAG_INDEXES')
for cache in self.search_cache.values():
cache.clear()
def advance_txs(self, height, txs, header):
timer = self.timer.sub_timers['advance_blocks']
undo = timer.run(super().advance_txs, height, txs, header, timer_name='super().advance_txs')
timer.run(self.sql.advance_txs, height, txs, header, self.daemon.cached_height(), forward_timer=True)
if (height % 10000 == 0 or not self.db.first_sync) and self.logger.isEnabledFor(20):
self.timer.show(height=height)
return undo
def _checksig(self, value, address):
try:
claim_dict = Claim.from_bytes(value)
cert_id = claim_dict.signing_channel_hash
if not self.should_validate_signatures:
return cert_id
if cert_id:
cert_claim = self.db.get_claim_info(cert_id)
if cert_claim:
certificate = Claim.from_bytes(cert_claim.value)
claim_dict.validate_signature(address, certificate)
return cert_id
except Exception:
pass
|
[
"lbry.schema.claim.Claim.from_bytes",
"time.time"
] |
[((890, 901), 'time.time', 'time.time', ([], {}), '()\n', (899, 901), False, 'import time\n'), ((966, 977), 'time.time', 'time.time', ([], {}), '()\n', (975, 977), False, 'import time\n'), ((3431, 3454), 'lbry.schema.claim.Claim.from_bytes', 'Claim.from_bytes', (['value'], {}), '(value)\n', (3447, 3454), False, 'from lbry.schema.claim import Claim\n'), ((3742, 3776), 'lbry.schema.claim.Claim.from_bytes', 'Claim.from_bytes', (['cert_claim.value'], {}), '(cert_claim.value)\n', (3758, 3776), False, 'from lbry.schema.claim import Claim\n')]
|
from django.urls import path
from .views import emailView, successView
urlpatterns = [
path('contact/', emailView, name='contact'),
path('success/', successView, name='success'),
]
|
[
"django.urls.path"
] |
[((93, 136), 'django.urls.path', 'path', (['"""contact/"""', 'emailView'], {'name': '"""contact"""'}), "('contact/', emailView, name='contact')\n", (97, 136), False, 'from django.urls import path\n'), ((142, 187), 'django.urls.path', 'path', (['"""success/"""', 'successView'], {'name': '"""success"""'}), "('success/', successView, name='success')\n", (146, 187), False, 'from django.urls import path\n')]
|
## Process profiling traces
# exctracted with: python -m cProfile -o profile $(which py.test)
# python -m cProfile -o profile c:\Python27\Scripts\py.test-2.7-script.py
import pstats
p = pstats.Stats('profile')
p.sort_stats("tottime")
p.print_stats()
|
[
"pstats.Stats"
] |
[((188, 211), 'pstats.Stats', 'pstats.Stats', (['"""profile"""'], {}), "('profile')\n", (200, 211), False, 'import pstats\n')]
|
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tarfile
import tempfile
import shutil
import json
import pydicom
from loguru import logger
from pymedphys.experimental.pinnacle import PinnacleExport
from platipy.backend import app, DataObject, celery
PINNACLE_EXPORT_SETTINGS_DEFAULTS = {
"exportModalities": ["CT", "RTSTRUCT", "RTPLAN", "RTDOSE"],
"exportSeriesUIDs": [],
}
@app.register("Pinnacle Export", default_settings=PINNACLE_EXPORT_SETTINGS_DEFAULTS)
def pinnacle_export_service(data_objects, working_dir, settings):
"""
Implements the platipy framework to provide a pinnacle tar export service
"""
logger.info("Running Pinnacle Export")
logger.info("Using settings: " + str(settings))
return_objects = []
for data_object in data_objects:
logger.info("Running on data object: " + data_object.path)
if not data_object.type == "FILE" or not tarfile.is_tarfile(data_object.path):
logger.error(
f"Can only process TAR file. Skipping file: {data_object.path}"
)
continue
archive_path = tempfile.mkdtemp()
# Extract the tar archive
tar = tarfile.open(data_object.path)
for member in tar.getmembers():
if not ":" in member.name:
tar.extract(member, path=archive_path)
# Read the path to the patient directory from the data object meta data
pat_path = data_object.meta_data["patient_path"]
pinn_extracted = os.path.join(archive_path, pat_path)
pinn = PinnacleExport(pinn_extracted, None)
# Find the plan we want to export in the list of plans
if len(pinn.plans) == 0:
logger.error("No Plans found for patient")
continue
export_plan = None
for plan in pinn.plans:
if (
"plan_name" in data_object.meta_data.keys()
and plan.plan_info["PlanName"] == data_object.meta_data["plan_name"]
):
export_plan = plan
break
if export_plan is None:
export_plan = plan
# If a trial was given, try to find it and set it
for trial in export_plan.trials:
trial_name = trial["Name"]
if (
"trial" in data_object.meta_data.keys()
and trial_name == data_object.meta_data["trial"]
):
export_plan.active_trial = trial_name
output_dir = os.path.join(working_dir, str(data_object.id))
if os.path.exists(output_dir):
# Just in case it was already run for this data object, lets remove all old output
shutil.rmtree(output_dir)
os.makedirs(output_dir)
if "CT" in settings["exportModalities"]:
logger.info("Exporting Primary CT")
pinn.export_image(export_plan.primary_image, export_path=output_dir)
if "RTSTRUCT" in settings["exportModalities"]:
logger.info("Exporting RTSTRUCT")
pinn.export_struct(export_plan, output_dir)
if "RTPLAN" in settings["exportModalities"]:
logger.info("Exporting RTPLAN")
pinn.export_plan(export_plan, output_dir)
if "RTDOSE" in settings["exportModalities"]:
logger.info("Exporting RTDOSE")
pinn.export_dose(export_plan, output_dir)
for image in pinn.images:
if image.image_info[0]["SeriesUID"] in settings["exportSeriesUIDs"]:
pinn.export_image(image, export_path=output_dir)
# Find the output files
output_files = os.listdir(output_dir)
output_files.sort()
output_objects = [os.path.join(output_dir, f) for f in output_files]
# Create the output data objects
for obj in output_objects:
# Write some meta data to patient comments field
file_name = os.path.basename(obj)
if file_name.startswith("R"): # Don't add to the image series
dicom_dataset = pydicom.read_file(obj)
meta_data = {}
meta_data["service"] = {
"tool": "Pinnacel Export Tool",
"trial": export_plan.active_trial["Name"],
"plan_date": export_plan.active_trial["ObjectVersion"][
"WriteTimeStamp"
],
"plan_locked": export_plan.plan_info["PlanIsLocked"],
}
if dicom_dataset.Modality == "RTPLAN":
meta_data["warning"] = (
"WARNING: OUTPUT GENERATED FOR RTPLAN FILE IS "
"UNVERIFIED AND MOST LIKELY INCORRECT!"
)
if "meta" in data_object.meta_data.keys():
meta_data["meta"] = data_object.meta_data["meta"]
if dicom_dataset.Modality == "RTPLAN":
dicom_dataset.RTPlanDescription = (
"Pinnacle Export Meta Data written to "
"SOPAuthorizationComment"
)
dicom_dataset.SOPAuthorizationComment = json.dumps(meta_data)
dicom_dataset.save_as(obj)
output_data_object = DataObject(type="DICOM", path=obj, parent=data_object)
return_objects.append(output_data_object)
# Delete files extracted from TAR
shutil.rmtree(archive_path)
logger.info("Finished Pinnacle Export")
return return_objects
if __name__ == "__main__":
# Run app by calling "python service.py" from the command line
DICOM_LISTENER_PORT = 7777
DICOM_LISTENER_AETITLE = "PINNACLE_EXPORT_SERVICE"
app.run(
debug=True,
host="0.0.0.0",
port=8001,
dicom_listener_port=DICOM_LISTENER_PORT,
dicom_listener_aetitle=DICOM_LISTENER_AETITLE,
)
|
[
"os.listdir",
"platipy.backend.DataObject",
"os.makedirs",
"pydicom.read_file",
"loguru.logger.error",
"os.path.basename",
"tarfile.is_tarfile",
"os.path.exists",
"json.dumps",
"platipy.backend.app.register",
"loguru.logger.info",
"tempfile.mkdtemp",
"pymedphys.experimental.pinnacle.PinnacleExport",
"tarfile.open",
"shutil.rmtree",
"os.path.join",
"platipy.backend.app.run"
] |
[((989, 1077), 'platipy.backend.app.register', 'app.register', (['"""Pinnacle Export"""'], {'default_settings': 'PINNACLE_EXPORT_SETTINGS_DEFAULTS'}), "('Pinnacle Export', default_settings=\n PINNACLE_EXPORT_SETTINGS_DEFAULTS)\n", (1001, 1077), False, 'from platipy.backend import app, DataObject, celery\n'), ((1238, 1276), 'loguru.logger.info', 'logger.info', (['"""Running Pinnacle Export"""'], {}), "('Running Pinnacle Export')\n", (1249, 1276), False, 'from loguru import logger\n'), ((6083, 6122), 'loguru.logger.info', 'logger.info', (['"""Finished Pinnacle Export"""'], {}), "('Finished Pinnacle Export')\n", (6094, 6122), False, 'from loguru import logger\n'), ((6339, 6478), 'platipy.backend.app.run', 'app.run', ([], {'debug': '(True)', 'host': '"""0.0.0.0"""', 'port': '(8001)', 'dicom_listener_port': 'DICOM_LISTENER_PORT', 'dicom_listener_aetitle': 'DICOM_LISTENER_AETITLE'}), "(debug=True, host='0.0.0.0', port=8001, dicom_listener_port=\n DICOM_LISTENER_PORT, dicom_listener_aetitle=DICOM_LISTENER_AETITLE)\n", (6346, 6478), False, 'from platipy.backend import app, DataObject, celery\n'), ((1399, 1457), 'loguru.logger.info', 'logger.info', (["('Running on data object: ' + data_object.path)"], {}), "('Running on data object: ' + data_object.path)\n", (1410, 1457), False, 'from loguru import logger\n'), ((1711, 1729), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1727, 1729), False, 'import tempfile\n'), ((1779, 1809), 'tarfile.open', 'tarfile.open', (['data_object.path'], {}), '(data_object.path)\n', (1791, 1809), False, 'import tarfile\n'), ((2107, 2143), 'os.path.join', 'os.path.join', (['archive_path', 'pat_path'], {}), '(archive_path, pat_path)\n', (2119, 2143), False, 'import os\n'), ((2160, 2196), 'pymedphys.experimental.pinnacle.PinnacleExport', 'PinnacleExport', (['pinn_extracted', 'None'], {}), '(pinn_extracted, None)\n', (2174, 2196), False, 'from pymedphys.experimental.pinnacle import PinnacleExport\n'), ((3162, 3188), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (3176, 3188), False, 'import os\n'), ((3331, 3354), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (3342, 3354), False, 'import os\n'), ((4233, 4255), 'os.listdir', 'os.listdir', (['output_dir'], {}), '(output_dir)\n', (4243, 4255), False, 'import os\n'), ((6050, 6077), 'shutil.rmtree', 'shutil.rmtree', (['archive_path'], {}), '(archive_path)\n', (6063, 6077), False, 'import shutil\n'), ((1558, 1635), 'loguru.logger.error', 'logger.error', (['f"""Can only process TAR file. Skipping file: {data_object.path}"""'], {}), "(f'Can only process TAR file. Skipping file: {data_object.path}')\n", (1570, 1635), False, 'from loguru import logger\n'), ((2306, 2348), 'loguru.logger.error', 'logger.error', (['"""No Plans found for patient"""'], {}), "('No Plans found for patient')\n", (2318, 2348), False, 'from loguru import logger\n'), ((3297, 3322), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {}), '(output_dir)\n', (3310, 3322), False, 'import shutil\n'), ((3417, 3452), 'loguru.logger.info', 'logger.info', (['"""Exporting Primary CT"""'], {}), "('Exporting Primary CT')\n", (3428, 3452), False, 'from loguru import logger\n'), ((3602, 3635), 'loguru.logger.info', 'logger.info', (['"""Exporting RTSTRUCT"""'], {}), "('Exporting RTSTRUCT')\n", (3613, 3635), False, 'from loguru import logger\n'), ((3758, 3789), 'loguru.logger.info', 'logger.info', (['"""Exporting RTPLAN"""'], {}), "('Exporting RTPLAN')\n", (3769, 3789), False, 'from loguru import logger\n'), ((3910, 3941), 'loguru.logger.info', 'logger.info', (['"""Exporting RTDOSE"""'], {}), "('Exporting RTDOSE')\n", (3921, 3941), False, 'from loguru import logger\n'), ((4310, 4337), 'os.path.join', 'os.path.join', (['output_dir', 'f'], {}), '(output_dir, f)\n', (4322, 4337), False, 'import os\n'), ((4524, 4545), 'os.path.basename', 'os.path.basename', (['obj'], {}), '(obj)\n', (4540, 4545), False, 'import os\n'), ((5890, 5944), 'platipy.backend.DataObject', 'DataObject', ([], {'type': '"""DICOM"""', 'path': 'obj', 'parent': 'data_object'}), "(type='DICOM', path=obj, parent=data_object)\n", (5900, 5944), False, 'from platipy.backend import app, DataObject, celery\n'), ((1508, 1544), 'tarfile.is_tarfile', 'tarfile.is_tarfile', (['data_object.path'], {}), '(data_object.path)\n', (1526, 1544), False, 'import tarfile\n'), ((4654, 4676), 'pydicom.read_file', 'pydicom.read_file', (['obj'], {}), '(obj)\n', (4671, 4676), False, 'import pydicom\n'), ((5790, 5811), 'json.dumps', 'json.dumps', (['meta_data'], {}), '(meta_data)\n', (5800, 5811), False, 'import json\n')]
|
import datetime
from datetime import datetime as dt
from datetime import timedelta
import pandas as pd
import plotly.express as px
import psycopg2.extras
import streamlit as st
import yfinance as yf
from database import connection, cursor
@st.cache
def query(sql):
cursor.execute(sql)
results = cursor.fetchall()
return results
def app():
# Main
st.title("Trending Stocks on r/wallstreetbets")
st.success("Reddit Comment Volume Analysis")
# Siderbar
d = st.sidebar.date_input("Select a date", datetime.date(2021, 1, 29), min_value=datetime.date(2021, 1, 1),
max_value=datetime.date(2021, 11, 28))
num = st.sidebar.number_input("Most Mentioned Symbols amount", value=10, min_value=1, max_value=100)
st.sidebar.markdown("---")
# Query
try:
results = query("""
SELECT "Symbol" , ("{0}") FROM reddit_comment_volume WHERE NOT "Symbol" = 'comment_volume' order by ("{0}") DESC LIMIT {1};
""".format(
d.strftime("%Y-%m-%d %H:%M:%S"), num))
symbol_list = [x[0] for x in results]
volume_list = [x[1] for x in results]
# Metrics
container_1 = st.container()
with container_1:
col1, col2, col3 = st.columns(3)
col1.metric(label="Most Mentioned Symbol", value=symbol_list[0])
col2.metric(label="No. of Comments", value=volume_list[0])
end = dt(d.year, d.month, d.day)
start = end - timedelta(1)
his_stock_price = yf.download(symbol_list[0], start=start, end=end)
print(his_stock_price)
if len(his_stock_price) > 0:
his_stock_price = "{0:.2f}".format(his_stock_price['Close'][0])
col3.metric("Previous Day Stock Closing Price", value=his_stock_price)
container_2 = st.container()
with container_2:
col1, col2 = st.columns(2)
with col1:
fig = px.bar(x=symbol_list, y=volume_list, labels={'x': 'Symbol', 'y': 'Volume'},
title="Top {0} mentioned stocks on {1}".format(int(num), d.strftime("%Y-%m-%d")))
st.plotly_chart(fig)
results = query("""
SELECT body, "Symbol" FROM reddit_data WHERE created_utc = '{0}' and "Symbol" != '' LIMIT 20;
""".format(d.strftime("%Y-%m-%d %H:%M:%S"), ))
comment_list = [x[0] for x in results]
comment_symbol_list = [x[1] for x in results]
comments = {
"20 Sample Comments of the Day": comment_list,
"Stock": comment_symbol_list
}
df = pd.DataFrame(comments)
st.table(df)
except (Exception, psycopg2.Error) as error:
print("Error while fetching data from PostgreSQL", error)
st.warning("That date does not contain any comments after data processing, please select another date.")
connection.rollback()
|
[
"pandas.DataFrame",
"streamlit.sidebar.number_input",
"streamlit.columns",
"database.connection.rollback",
"streamlit.plotly_chart",
"streamlit.table",
"yfinance.download",
"datetime.date",
"streamlit.title",
"datetime.datetime",
"streamlit.container",
"streamlit.sidebar.markdown",
"datetime.timedelta",
"streamlit.success",
"database.cursor.execute",
"streamlit.warning",
"database.cursor.fetchall"
] |
[((273, 292), 'database.cursor.execute', 'cursor.execute', (['sql'], {}), '(sql)\n', (287, 292), False, 'from database import connection, cursor\n'), ((307, 324), 'database.cursor.fetchall', 'cursor.fetchall', ([], {}), '()\n', (322, 324), False, 'from database import connection, cursor\n'), ((372, 419), 'streamlit.title', 'st.title', (['"""Trending Stocks on r/wallstreetbets"""'], {}), "('Trending Stocks on r/wallstreetbets')\n", (380, 419), True, 'import streamlit as st\n'), ((424, 468), 'streamlit.success', 'st.success', (['"""Reddit Comment Volume Analysis"""'], {}), "('Reddit Comment Volume Analysis')\n", (434, 468), True, 'import streamlit as st\n'), ((676, 774), 'streamlit.sidebar.number_input', 'st.sidebar.number_input', (['"""Most Mentioned Symbols amount"""'], {'value': '(10)', 'min_value': '(1)', 'max_value': '(100)'}), "('Most Mentioned Symbols amount', value=10,\n min_value=1, max_value=100)\n", (699, 774), True, 'import streamlit as st\n'), ((775, 801), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""---"""'], {}), "('---')\n", (794, 801), True, 'import streamlit as st\n'), ((532, 558), 'datetime.date', 'datetime.date', (['(2021)', '(1)', '(29)'], {}), '(2021, 1, 29)\n', (545, 558), False, 'import datetime\n'), ((1180, 1194), 'streamlit.container', 'st.container', ([], {}), '()\n', (1192, 1194), True, 'import streamlit as st\n'), ((1845, 1859), 'streamlit.container', 'st.container', ([], {}), '()\n', (1857, 1859), True, 'import streamlit as st\n'), ((2625, 2647), 'pandas.DataFrame', 'pd.DataFrame', (['comments'], {}), '(comments)\n', (2637, 2647), True, 'import pandas as pd\n'), ((2656, 2668), 'streamlit.table', 'st.table', (['df'], {}), '(df)\n', (2664, 2668), True, 'import streamlit as st\n'), ((570, 595), 'datetime.date', 'datetime.date', (['(2021)', '(1)', '(1)'], {}), '(2021, 1, 1)\n', (583, 595), False, 'import datetime\n'), ((637, 664), 'datetime.date', 'datetime.date', (['(2021)', '(11)', '(28)'], {}), '(2021, 11, 28)\n', (650, 664), False, 'import datetime\n'), ((1252, 1265), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (1262, 1265), True, 'import streamlit as st\n'), ((1432, 1458), 'datetime.datetime', 'dt', (['d.year', 'd.month', 'd.day'], {}), '(d.year, d.month, d.day)\n', (1434, 1458), True, 'from datetime import datetime as dt\n'), ((1528, 1577), 'yfinance.download', 'yf.download', (['symbol_list[0]'], {'start': 'start', 'end': 'end'}), '(symbol_list[0], start=start, end=end)\n', (1539, 1577), True, 'import yfinance as yf\n'), ((1911, 1924), 'streamlit.columns', 'st.columns', (['(2)'], {}), '(2)\n', (1921, 1924), True, 'import streamlit as st\n'), ((2793, 2907), 'streamlit.warning', 'st.warning', (['"""That date does not contain any comments after data processing, please select another date."""'], {}), "(\n 'That date does not contain any comments after data processing, please select another date.'\n )\n", (2803, 2907), True, 'import streamlit as st\n'), ((2906, 2927), 'database.connection.rollback', 'connection.rollback', ([], {}), '()\n', (2925, 2927), False, 'from database import connection, cursor\n'), ((1485, 1497), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (1494, 1497), False, 'from datetime import timedelta\n'), ((2173, 2193), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {}), '(fig)\n', (2188, 2193), True, 'import streamlit as st\n')]
|
import sys
import os
import time
import datetime
import shutil
def main(path=sys.path[0], days=2,size=4096):
paths = [path+"\\"+"Archive",path+"\\"+"Small",path+"\\"]
if os.path.isdir(path):
flagA=False
flagS=False
if not os.path.isdir(paths[0]):
flagA=True
if not os.path.isdir(paths[1]):
flagS=True
files = os.listdir(path)
data = datetime.datetime.today()
for i in files:
if os.path.isfile(paths[2]+i) and data-datetime.datetime.fromtimestamp(os.path.getmtime(paths[2]+i)) >= datetime.timedelta(days=days):
if flagA:
os.mkdir(paths[0])
flagA=False
shutil.copy(paths[2]+i,paths[0]+"\\"+i)
if os.path.isfile(paths[2]+i) and os.path.getsize(paths[2]+i) <= size:
if flagS:
os.mkdir(paths[1])
flagS=False
shutil.copy(paths[2]+i,paths[1]+"\\"+i)
else:
print("Директории не существует по пути",path)
if __name__ == "__main__":
if len(sys.argv) == 7 and sys.argv[1] == "--source" and sys.argv[3] == "--days" and sys.argv[5]=="--size":
main(sys.argv[2],int(sys.argv[4]),int(sys.argv[6]))
else:
print("Error,Example\nreorganize --source \"C:\\TestDir\" --days 2 --size 4096")
|
[
"os.mkdir",
"datetime.datetime.today",
"os.path.isdir",
"os.path.getsize",
"os.path.isfile",
"datetime.timedelta",
"os.path.getmtime",
"os.listdir",
"shutil.copy"
] |
[((179, 198), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (192, 198), False, 'import os\n'), ((382, 398), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (392, 398), False, 'import os\n'), ((414, 439), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (437, 439), False, 'import datetime\n'), ((255, 278), 'os.path.isdir', 'os.path.isdir', (['paths[0]'], {}), '(paths[0])\n', (268, 278), False, 'import os\n'), ((318, 341), 'os.path.isdir', 'os.path.isdir', (['paths[1]'], {}), '(paths[1])\n', (331, 341), False, 'import os\n'), ((479, 507), 'os.path.isfile', 'os.path.isfile', (['(paths[2] + i)'], {}), '(paths[2] + i)\n', (493, 507), False, 'import os\n'), ((724, 770), 'shutil.copy', 'shutil.copy', (['(paths[2] + i)', "(paths[0] + '\\\\' + i)"], {}), "(paths[2] + i, paths[0] + '\\\\' + i)\n", (735, 770), False, 'import shutil\n'), ((779, 807), 'os.path.isfile', 'os.path.isfile', (['(paths[2] + i)'], {}), '(paths[2] + i)\n', (793, 807), False, 'import os\n'), ((960, 1006), 'shutil.copy', 'shutil.copy', (['(paths[2] + i)', "(paths[1] + '\\\\' + i)"], {}), "(paths[2] + i, paths[1] + '\\\\' + i)\n", (971, 1006), False, 'import shutil\n'), ((580, 609), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'days'}), '(days=days)\n', (598, 609), False, 'import datetime\n'), ((657, 675), 'os.mkdir', 'os.mkdir', (['paths[0]'], {}), '(paths[0])\n', (665, 675), False, 'import os\n'), ((810, 839), 'os.path.getsize', 'os.path.getsize', (['(paths[2] + i)'], {}), '(paths[2] + i)\n', (825, 839), False, 'import os\n'), ((893, 911), 'os.mkdir', 'os.mkdir', (['paths[1]'], {}), '(paths[1])\n', (901, 911), False, 'import os\n'), ((547, 577), 'os.path.getmtime', 'os.path.getmtime', (['(paths[2] + i)'], {}), '(paths[2] + i)\n', (563, 577), False, 'import os\n')]
|
# Generated by Django 2.0.1 on 2018-06-23 11:39
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('unlabel_backend', '0014_auto_20180623_1137'),
]
operations = [
migrations.RenameModel(
old_name='News',
new_name='Article',
),
migrations.AlterModelOptions(
name='article',
options={'verbose_name': 'Article', 'verbose_name_plural': 'Articles'},
),
]
|
[
"django.db.migrations.AlterModelOptions",
"django.db.migrations.RenameModel"
] |
[((235, 294), 'django.db.migrations.RenameModel', 'migrations.RenameModel', ([], {'old_name': '"""News"""', 'new_name': '"""Article"""'}), "(old_name='News', new_name='Article')\n", (257, 294), False, 'from django.db import migrations\n'), ((339, 459), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""article"""', 'options': "{'verbose_name': 'Article', 'verbose_name_plural': 'Articles'}"}), "(name='article', options={'verbose_name':\n 'Article', 'verbose_name_plural': 'Articles'})\n", (367, 459), False, 'from django.db import migrations\n')]
|
import os
import sys
import csv
def chart():
print("\t Reading CSV File and Generating Graph... \n")
#Create lists
labels=[]
values=[]
#Check csv file
if not os.path.isfile('WifiTest.csv'):
print("\n The MCP has derezzed the file!\n")
with open('WifiTest.csv') as csvFile:
reader = csv.reader(csvFile, delimiter=',')
for row in reader:
labels.append(row[3]+"-"+row[6])
values.append(float(row[7]))
print(labels)
chart()
|
[
"os.path.isfile",
"csv.reader"
] |
[((184, 214), 'os.path.isfile', 'os.path.isfile', (['"""WifiTest.csv"""'], {}), "('WifiTest.csv')\n", (198, 214), False, 'import os\n'), ((331, 365), 'csv.reader', 'csv.reader', (['csvFile'], {'delimiter': '""","""'}), "(csvFile, delimiter=',')\n", (341, 365), False, 'import csv\n')]
|
import os
from os import path
import numpy as np
import pytest
from astropy import cosmology as cosmo
import autofit as af
import autolens as al
from autolens.fit.fit import InterferometerFit
from test_autolens.mock import mock_pipeline
pytestmark = pytest.mark.filterwarnings(
"ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of "
"`arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result "
"either in an error or a different result."
)
directory = path.dirname(path.realpath(__file__))
@pytest.fixture(scope="session", autouse=True)
def do_something():
af.conf.instance = af.conf.Config(
"{}/../test_files/config/phase_interferometer_7".format(directory)
)
def clean_images():
try:
os.remove("{}/source_lens_phase/source_image_0.fits".format(directory))
os.remove("{}/source_lens_phase/lens_image_0.fits".format(directory))
os.remove("{}/source_lens_phase/model_image_0.fits".format(directory))
except FileNotFoundError:
pass
af.conf.instance.dataset_path = directory
class TestPhase:
def test__make_analysis__masks_visibilities_and_noise_map_correctly(
self, phase_interferometer_7, interferometer_7, visibilities_mask_7x2
):
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert (
analysis.masked_interferometer.visibilities == interferometer_7.visibilities
).all()
assert (
analysis.masked_interferometer.noise_map == interferometer_7.noise_map
).all()
def test__make_analysis__phase_info_is_made(
self, phase_interferometer_7, interferometer_7, visibilities_mask_7x2
):
phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
file_phase_info = "{}/{}".format(
phase_interferometer_7.optimizer.paths.phase_output_path, "phase.info"
)
phase_info = open(file_phase_info, "r")
optimizer = phase_info.readline()
sub_size = phase_info.readline()
primary_beam_shape_2d = phase_info.readline()
positions_threshold = phase_info.readline()
cosmology = phase_info.readline()
phase_info.close()
assert optimizer == "Optimizer = MockNLO \n"
assert sub_size == "Sub-grid size = 2 \n"
assert primary_beam_shape_2d == "Primary Beam shape = None \n"
assert positions_threshold == "Positions Threshold = None \n"
assert (
cosmology
== 'Cosmology = FlatLambdaCDM(name="Planck15", H0=67.7 km / (Mpc s), Om0=0.307, Tcmb0=2.725 K, '
"Neff=3.05, m_nu=[0. 0. 0.06] eV, Ob0=0.0486) \n"
)
def test__fit_using_interferometer(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
phase_interferometer_7 = al.PhaseInterferometer(
optimizer_class=mock_pipeline.MockNLO,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, light=al.lp.EllipticalSersic),
source=al.GalaxyModel(redshift=1.0, light=al.lp.EllipticalSersic),
),
real_space_mask=mask_7x7,
phase_name="test_phase_test_fit",
)
result = phase_interferometer_7.run(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
assert isinstance(result.instance.galaxies[0], al.Galaxy)
def test_modify_visibilities(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
class MyPhase(al.PhaseInterferometer):
def modify_visibilities(self, visibilities, results):
assert interferometer_7.visibilities.shape_1d == visibilities.shape_1d
visibilities = al.visibilities.full(fill_value=20.0, shape_1d=(7,))
return visibilities
phase_interferometer_7 = MyPhase(
phase_name="phase_interferometer_7", real_space_mask=mask_7x7
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
assert (
analysis.masked_dataset.visibilities == 20.0 * np.ones(shape=(7, 2))
).all()
def test__phase_can_receive_hyper_image_and_noise_maps(self, mask_7x7):
phase_interferometer_7 = al.PhaseInterferometer(
galaxies=dict(
lens=al.GalaxyModel(redshift=al.Redshift),
lens1=al.GalaxyModel(redshift=al.Redshift),
),
real_space_mask=mask_7x7,
hyper_background_noise=al.hyper_data.HyperBackgroundNoise,
optimizer_class=af.MultiNest,
phase_name="test_phase",
)
instance = phase_interferometer_7.model.instance_from_physical_vector(
[0.1, 0.2, 0.3]
)
assert instance.galaxies[0].redshift == 0.1
assert instance.galaxies[1].redshift == 0.2
assert instance.hyper_background_noise.noise_scale == 0.3
def test__extended_with_hyper_and_pixelizations(self, phase_interferometer_7):
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=False, inversion=False
)
assert phase_extended == phase_interferometer_7
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.InversionPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=True, inversion=False
)
assert type(phase_extended.hyper_phases[0]) == al.HyperGalaxyPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=False, inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.InversionPhase
phase_extended = phase_interferometer_7.extend_with_multiple_hyper_phases(
hyper_galaxy=True, inversion=True
)
assert type(phase_extended.hyper_phases[0]) == al.HyperGalaxyPhase
assert type(phase_extended.hyper_phases[1]) == al.InversionPhase
def test__fit_figure_of_merit__matches_correct_fit_given_galaxy_profiles(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_interferometer_7 = al.PhaseInterferometer(
real_space_mask=mask_7x7,
galaxies=[lens_galaxy],
cosmology=cosmo.FLRW,
sub_size=2,
phase_name="test_phase",
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
instance = phase_interferometer_7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.fit(instance=instance)
real_space_mask = phase_interferometer_7.meta_interferometer_fit.mask_with_phase_sub_size_from_mask(
mask=mask_7x7
)
masked_interferometer = al.masked.interferometer(
interferometer=interferometer_7,
visibilities_mask=visibilities_mask_7x2,
real_space_mask=real_space_mask,
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = al.fit(masked_dataset=masked_interferometer, tracer=tracer)
assert fit.likelihood == fit_figure_of_merit
def test__fit_figure_of_merit__includes_hyper_image_and_noise__matches_fit(
self, interferometer_7, mask_7x7, visibilities_mask_7x2
):
hyper_background_noise = al.hyper_data.HyperBackgroundNoise(noise_scale=1.0)
lens_galaxy = al.Galaxy(
redshift=0.5, light=al.lp.EllipticalSersic(intensity=0.1)
)
phase_interferometer_7 = al.PhaseInterferometer(
real_space_mask=mask_7x7,
galaxies=[lens_galaxy],
hyper_background_noise=hyper_background_noise,
cosmology=cosmo.FLRW,
sub_size=4,
phase_name="test_phase",
)
analysis = phase_interferometer_7.make_analysis(
dataset=interferometer_7, mask=visibilities_mask_7x2
)
instance = phase_interferometer_7.model.instance_from_unit_vector([])
fit_figure_of_merit = analysis.fit(instance=instance)
real_space_mask = phase_interferometer_7.meta_interferometer_fit.mask_with_phase_sub_size_from_mask(
mask=mask_7x7
)
assert real_space_mask.sub_size == 4
masked_interferometer = al.masked.interferometer(
interferometer=interferometer_7,
visibilities_mask=visibilities_mask_7x2,
real_space_mask=real_space_mask,
)
tracer = analysis.tracer_for_instance(instance=instance)
fit = InterferometerFit(
masked_interferometer=masked_interferometer,
tracer=tracer,
hyper_background_noise=hyper_background_noise,
)
assert fit.likelihood == fit_figure_of_merit
|
[
"autolens.masked.interferometer",
"autolens.PhaseInterferometer",
"os.path.realpath",
"pytest.fixture",
"numpy.ones",
"autolens.visibilities.full",
"autolens.GalaxyModel",
"autolens.fit",
"autolens.hyper_data.HyperBackgroundNoise",
"autolens.fit.fit.InterferometerFit",
"pytest.mark.filterwarnings",
"autolens.lp.EllipticalSersic"
] |
[((253, 558), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result."""'], {}), "(\n 'ignore:Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an arrays index, `arr[np.arrays(seq)]`, which will result either in an error or a different result.'\n )\n", (279, 558), False, 'import pytest\n'), ((623, 668), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (637, 668), False, 'import pytest\n'), ((595, 618), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (608, 618), False, 'from os import path\n'), ((6730, 6865), 'autolens.PhaseInterferometer', 'al.PhaseInterferometer', ([], {'real_space_mask': 'mask_7x7', 'galaxies': '[lens_galaxy]', 'cosmology': 'cosmo.FLRW', 'sub_size': '(2)', 'phase_name': '"""test_phase"""'}), "(real_space_mask=mask_7x7, galaxies=[lens_galaxy],\n cosmology=cosmo.FLRW, sub_size=2, phase_name='test_phase')\n", (6752, 6865), True, 'import autolens as al\n'), ((7384, 7520), 'autolens.masked.interferometer', 'al.masked.interferometer', ([], {'interferometer': 'interferometer_7', 'visibilities_mask': 'visibilities_mask_7x2', 'real_space_mask': 'real_space_mask'}), '(interferometer=interferometer_7, visibilities_mask\n =visibilities_mask_7x2, real_space_mask=real_space_mask)\n', (7408, 7520), True, 'import autolens as al\n'), ((7643, 7702), 'autolens.fit', 'al.fit', ([], {'masked_dataset': 'masked_interferometer', 'tracer': 'tracer'}), '(masked_dataset=masked_interferometer, tracer=tracer)\n', (7649, 7702), True, 'import autolens as al\n'), ((7942, 7993), 'autolens.hyper_data.HyperBackgroundNoise', 'al.hyper_data.HyperBackgroundNoise', ([], {'noise_scale': '(1.0)'}), '(noise_scale=1.0)\n', (7976, 7993), True, 'import autolens as al\n'), ((8142, 8328), 'autolens.PhaseInterferometer', 'al.PhaseInterferometer', ([], {'real_space_mask': 'mask_7x7', 'galaxies': '[lens_galaxy]', 'hyper_background_noise': 'hyper_background_noise', 'cosmology': 'cosmo.FLRW', 'sub_size': '(4)', 'phase_name': '"""test_phase"""'}), "(real_space_mask=mask_7x7, galaxies=[lens_galaxy],\n hyper_background_noise=hyper_background_noise, cosmology=cosmo.FLRW,\n sub_size=4, phase_name='test_phase')\n", (8164, 8328), True, 'import autolens as al\n'), ((8901, 9037), 'autolens.masked.interferometer', 'al.masked.interferometer', ([], {'interferometer': 'interferometer_7', 'visibilities_mask': 'visibilities_mask_7x2', 'real_space_mask': 'real_space_mask'}), '(interferometer=interferometer_7, visibilities_mask\n =visibilities_mask_7x2, real_space_mask=real_space_mask)\n', (8925, 9037), True, 'import autolens as al\n'), ((9159, 9288), 'autolens.fit.fit.InterferometerFit', 'InterferometerFit', ([], {'masked_interferometer': 'masked_interferometer', 'tracer': 'tracer', 'hyper_background_noise': 'hyper_background_noise'}), '(masked_interferometer=masked_interferometer, tracer=\n tracer, hyper_background_noise=hyper_background_noise)\n', (9176, 9288), False, 'from autolens.fit.fit import InterferometerFit\n'), ((3995, 4047), 'autolens.visibilities.full', 'al.visibilities.full', ([], {'fill_value': '(20.0)', 'shape_1d': '(7,)'}), '(fill_value=20.0, shape_1d=(7,))\n', (4015, 4047), True, 'import autolens as al\n'), ((6648, 6685), 'autolens.lp.EllipticalSersic', 'al.lp.EllipticalSersic', ([], {'intensity': '(0.1)'}), '(intensity=0.1)\n', (6670, 6685), True, 'import autolens as al\n'), ((8060, 8097), 'autolens.lp.EllipticalSersic', 'al.lp.EllipticalSersic', ([], {'intensity': '(0.1)'}), '(intensity=0.1)\n', (8082, 8097), True, 'import autolens as al\n'), ((3153, 3211), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': '(0.5)', 'light': 'al.lp.EllipticalSersic'}), '(redshift=0.5, light=al.lp.EllipticalSersic)\n', (3167, 3211), True, 'import autolens as al\n'), ((3236, 3294), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': '(1.0)', 'light': 'al.lp.EllipticalSersic'}), '(redshift=1.0, light=al.lp.EllipticalSersic)\n', (3250, 3294), True, 'import autolens as al\n'), ((4420, 4441), 'numpy.ones', 'np.ones', ([], {'shape': '(7, 2)'}), '(shape=(7, 2))\n', (4427, 4441), True, 'import numpy as np\n'), ((4640, 4676), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': 'al.Redshift'}), '(redshift=al.Redshift)\n', (4654, 4676), True, 'import autolens as al\n'), ((4700, 4736), 'autolens.GalaxyModel', 'al.GalaxyModel', ([], {'redshift': 'al.Redshift'}), '(redshift=al.Redshift)\n', (4714, 4736), True, 'import autolens as al\n')]
|
"""
Customize the behavior of a fixture by allowing special code to be
executed before or after each test, and before or after each suite.
"""
from __future__ import absolute_import
import os
import sys
import bson
import pymongo
from . import fixtures
from . import testcases
from .. import errors
from .. import logging
from .. import utils
def make_custom_behavior(class_name, *args, **kwargs):
"""
Factory function for creating CustomBehavior instances.
"""
if class_name not in _CUSTOM_BEHAVIORS:
raise ValueError("Unknown custom behavior class '%s'" % (class_name))
return _CUSTOM_BEHAVIORS[class_name](*args, **kwargs)
class CustomBehavior(object):
"""
The common interface all CustomBehaviors will inherit from.
"""
@staticmethod
def start_dynamic_test(test_case, test_report):
"""
If a CustomBehavior wants to add a test case that will show up
in the test report, it should use this method to add it to the
report, since we will need to count it as a dynamic test to get
the stats in the summary information right.
"""
test_report.startTest(test_case, dynamic=True)
def __init__(self, logger, fixture):
"""
Initializes the CustomBehavior with the specified fixture.
"""
if not isinstance(logger, logging.Logger):
raise TypeError("logger must be a Logger instance")
self.logger = logger
self.fixture = fixture
def before_suite(self, test_report):
"""
The test runner calls this exactly once before they start
running the suite.
"""
pass
def after_suite(self, test_report):
"""
The test runner calls this exactly once after all tests have
finished executing. Be sure to reset the behavior back to its
original state so that it can be run again.
"""
pass
def before_test(self, test_report):
"""
Each test will call this before it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
def after_test(self, test_report):
"""
Each test will call this after it executes.
Raises a TestFailure if the test should be marked as a failure,
or a ServerFailure if the fixture exits uncleanly or
unexpectedly.
"""
pass
class CleanEveryN(CustomBehavior):
"""
Restarts the fixture after it has ran 'n' tests.
On mongod-related fixtures, this will clear the dbpath.
"""
DEFAULT_N = 20
def __init__(self, logger, fixture, n=DEFAULT_N):
CustomBehavior.__init__(self, logger, fixture)
# Try to isolate what test triggers the leak by restarting the fixture each time.
if "detect_leaks=1" in os.getenv("ASAN_OPTIONS", ""):
self.logger.info("ASAN_OPTIONS environment variable set to detect leaks, so restarting"
" the fixture after each test instead of after every %d.", n)
n = 1
self.n = n
self.tests_run = 0
def after_test(self, test_report):
self.tests_run += 1
if self.tests_run >= self.n:
self.logger.info("%d tests have been run against the fixture, stopping it...",
self.tests_run)
self.tests_run = 0
teardown_success = self.fixture.teardown()
self.logger.info("Starting the fixture back up again...")
self.fixture.setup()
self.fixture.await_ready()
# Raise this after calling setup in case --continueOnFailure was specified.
if not teardown_success:
raise errors.TestFailure("%s did not exit cleanly" % (self.fixture))
class CheckReplDBHash(CustomBehavior):
"""
Waits for replication after each test, then checks that the dbhahses
of all databases other than "local" match on the primary and all of
the secondaries. If any dbhashes do not match, logs information
about what was different (e.g. Different numbers of collections,
missing documents in a collection, mismatching documents, etc).
Compatible only with ReplFixture subclasses.
"""
def __init__(self, logger, fixture):
if not isinstance(fixture, fixtures.ReplFixture):
raise TypeError("%s does not support replication" % (fixture.__class__.__name__))
CustomBehavior.__init__(self, logger, fixture)
self.test_case = testcases.TestCase(self.logger, "Hook", "#dbhash#")
self.started = False
def after_test(self, test_report):
"""
After each test, check that the dbhash of the test database is
the same on all nodes in the replica set or master/slave
fixture.
"""
try:
if not self.started:
CustomBehavior.start_dynamic_test(self.test_case, test_report)
self.started = True
# Wait until all operations have replicated.
self.fixture.await_repl()
success = True
sb = [] # String builder.
primary = self.fixture.get_primary()
primary_conn = utils.new_mongo_client(port=primary.port)
for secondary in self.fixture.get_secondaries():
read_preference = pymongo.ReadPreference.SECONDARY
secondary_conn = utils.new_mongo_client(port=secondary.port,
read_preference=read_preference)
# Skip arbiters.
if secondary_conn.admin.command("isMaster").get("arbiterOnly", False):
continue
all_matched = CheckReplDBHash._check_all_db_hashes(primary_conn,
secondary_conn,
sb)
if not all_matched:
sb.insert(0,
"One or more databases were different between the primary on port %d"
" and the secondary on port %d:"
% (primary.port, secondary.port))
success = all_matched and success
if not success:
# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
self.test_case.logger.info("\n ".join(sb))
raise self.test_case.failureException("The dbhashes did not match")
except self.test_case.failureException as err:
self.test_case.logger.exception("The dbhashes did not match.")
self.test_case.return_code = 1
test_report.addFailure(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.ServerFailure(err.args[0])
except pymongo.errors.WTimeoutError:
self.test_case.logger.exception("Awaiting replication timed out.")
self.test_case.return_code = 2
test_report.addError(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.StopExecution("Awaiting replication timed out")
def after_suite(self, test_report):
"""
If we get to this point, the #dbhash# test must have been
successful, so add it to the test report.
"""
if self.started:
self.test_case.logger.info("The dbhashes matched for all tests.")
self.test_case.return_code = 0
test_report.addSuccess(self.test_case)
# TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)
self.started = False
@staticmethod
def _check_all_db_hashes(primary_conn, secondary_conn, sb):
"""
Returns true if for each non-local database, the dbhash command
returns the same MD5 hash on the primary as it does on the
secondary. Returns false otherwise.
Logs a message describing the differences if any database's
dbhash did not match.
"""
# Overview of how we'll check that everything replicated correctly between these two nodes:
#
# - Check whether they have the same databases.
# - If not, log which databases are missing where, and dump the contents of any that are
# missing.
#
# - Check whether each database besides "local" gives the same md5 field as the result of
# running the dbhash command.
# - If not, check whether they have the same collections.
# - If not, log which collections are missing where, and dump the contents of any
# that are missing.
# - If so, check that the hash of each non-capped collection matches.
# - If any do not match, log the diff of the collection between the two nodes.
success = True
if not CheckReplDBHash._check_dbs_present(primary_conn, secondary_conn, sb):
return False
for db_name in primary_conn.database_names():
if db_name == "local":
continue # We don't expect this to match across different nodes.
matched = CheckReplDBHash._check_db_hash(primary_conn, secondary_conn, db_name, sb)
success = matched and success
return success
@staticmethod
def _check_dbs_present(primary_conn, secondary_conn, sb):
"""
Returns true if the list of databases on the primary is
identical to the list of databases on the secondary, and false
otherwise.
"""
success = True
primary_dbs = primary_conn.database_names()
# Can't run database_names() on secondary, so instead use the listDatabases command.
# TODO: Use database_names() once PYTHON-921 is resolved.
list_db_output = secondary_conn.admin.command("listDatabases")
secondary_dbs = [db["name"] for db in list_db_output["databases"]]
# There may be a difference in databases which is not considered an error, when
# the database only contains system collections. This difference is only logged
# when others are encountered, i.e., success = False.
missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
set(primary_dbs), set(secondary_dbs), "database")
for missing_db in missing_on_secondary:
db = primary_conn[missing_db]
coll_names = db.collection_names()
non_system_colls = [name for name in coll_names if not name.startswith("system.")]
# It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined whether they should exist or not.
if non_system_colls:
sb.append("Database %s present on primary but not on secondary." % (missing_db))
CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
success = False
for missing_db in missing_on_primary:
db = secondary_conn[missing_db]
# Can't run collection_names() on secondary, so instead use the listCollections command.
# TODO: Always use collection_names() once PYTHON-921 is resolved. Then much of the
# logic that is duplicated here can be consolidated.
list_coll_output = db.command("listCollections")["cursor"]["firstBatch"]
coll_names = [coll["name"] for coll in list_coll_output]
non_system_colls = [name for name in coll_names if not name.startswith("system.")]
# It is only an error if there are any non-system collections in the database,
# otherwise it's not well defined if it should exist or not.
if non_system_colls:
sb.append("Database %s present on secondary but not on primary." % (missing_db))
CheckReplDBHash._dump_all_collections(db, non_system_colls, sb)
success = False
return success
@staticmethod
def _check_db_hash(primary_conn, secondary_conn, db_name, sb):
"""
Returns true if the dbhash for 'db_name' matches on the primary
and the secondary, and false otherwise.
Appends a message to 'sb' describing the differences if the
dbhashes do not match.
"""
primary_hash = primary_conn[db_name].command("dbhash")
secondary_hash = secondary_conn[db_name].command("dbhash")
if primary_hash["md5"] == secondary_hash["md5"]:
return True
success = CheckReplDBHash._check_dbs_eq(
primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb)
if not success:
sb.append("Database %s has a different hash on the primary and the secondary"
" ([ %s ] != [ %s ]):"
% (db_name, primary_hash["md5"], secondary_hash["md5"]))
return success
@staticmethod
def _check_dbs_eq(primary_conn, secondary_conn, primary_hash, secondary_hash, db_name, sb):
"""
Returns true if all non-capped collections had the same hash in
the dbhash response, and false otherwise.
Appends information to 'sb' about the differences between the
'db_name' database on the primary and the 'db_name' database on
the secondary, if any.
"""
success = True
primary_db = primary_conn[db_name]
secondary_db = secondary_conn[db_name]
primary_coll_hashes = primary_hash["collections"]
secondary_coll_hashes = secondary_hash["collections"]
primary_coll_names = set(primary_coll_hashes.keys())
secondary_coll_names = set(secondary_coll_hashes.keys())
missing_on_primary, missing_on_secondary = CheckReplDBHash._check_difference(
primary_coll_names, secondary_coll_names, "collection", sb=sb)
if missing_on_primary or missing_on_secondary:
# 'sb' already describes which collections are missing where.
for coll_name in missing_on_primary:
CheckReplDBHash._dump_all_documents(secondary_db, coll_name, sb)
for coll_name in missing_on_secondary:
CheckReplDBHash._dump_all_documents(primary_db, coll_name, sb)
return
for coll_name in primary_coll_names & secondary_coll_names:
primary_coll_hash = primary_coll_hashes[coll_name]
secondary_coll_hash = secondary_coll_hashes[coll_name]
if primary_coll_hash == secondary_coll_hash:
continue
# Ignore capped collections because they are not expected to match on all nodes.
if primary_db.command({"collStats": coll_name})["capped"]:
# Still fail if the collection is not capped on the secondary.
if not secondary_db.command({"collStats": coll_name})["capped"]:
success = False
sb.append("%s.%s collection is capped on primary but not on secondary."
% (primary_db.name, coll_name))
sb.append("%s.%s collection is capped, ignoring." % (primary_db.name, coll_name))
continue
# Still fail if the collection is capped on the secondary, but not on the primary.
elif secondary_db.command({"collStats": coll_name})["capped"]:
success = False
sb.append("%s.%s collection is capped on secondary but not on primary."
% (primary_db.name, coll_name))
continue
success = False
sb.append("Collection %s.%s has a different hash on the primary and the secondary"
" ([ %s ] != [ %s ]):"
% (db_name, coll_name, primary_coll_hash, secondary_coll_hash))
CheckReplDBHash._check_colls_eq(primary_db, secondary_db, coll_name, sb)
if success:
sb.append("All collections that were expected to match did.")
return success
@staticmethod
def _check_colls_eq(primary_db, secondary_db, coll_name, sb):
"""
Appends information to 'sb' about the differences or between
the 'coll_name' collection on the primary and the 'coll_name'
collection on the secondary, if any.
"""
codec_options = bson.CodecOptions(document_class=TypeSensitiveSON)
primary_coll = primary_db.get_collection(coll_name, codec_options=codec_options)
secondary_coll = secondary_db.get_collection(coll_name, codec_options=codec_options)
primary_docs = CheckReplDBHash._extract_documents(primary_coll)
secondary_docs = CheckReplDBHash._extract_documents(secondary_coll)
CheckReplDBHash._get_collection_diff(primary_docs, secondary_docs, sb)
@staticmethod
def _extract_documents(collection):
"""
Returns a list of all documents in the collection, sorted by
their _id.
"""
return [doc for doc in collection.find().sort("_id", pymongo.ASCENDING)]
@staticmethod
def _get_collection_diff(primary_docs, secondary_docs, sb):
"""
Returns true if the documents in 'primary_docs' exactly match
the documents in 'secondary_docs', and false otherwise.
Appends information to 'sb' about what matched or did not match.
"""
matched = True
# These need to be lists instead of sets because documents aren't hashable.
missing_on_primary = []
missing_on_secondary = []
p_idx = 0 # Keep track of our position in 'primary_docs'.
s_idx = 0 # Keep track of our position in 'secondary_docs'.
while p_idx < len(primary_docs) and s_idx < len(secondary_docs):
primary_doc = primary_docs[p_idx]
secondary_doc = secondary_docs[s_idx]
if primary_doc == secondary_doc:
p_idx += 1
s_idx += 1
continue
# We have mismatching documents.
matched = False
if primary_doc["_id"] == secondary_doc["_id"]:
sb.append("Mismatching document:")
sb.append(" primary: %s" % (primary_doc))
sb.append(" secondary: %s" % (secondary_doc))
p_idx += 1
s_idx += 1
# One node was missing a document. Since the documents are sorted by _id, the doc with
# the smaller _id was the one that was skipped.
elif primary_doc["_id"] < secondary_doc["_id"]:
missing_on_secondary.append(primary_doc)
# Only move past the doc that we know was skipped.
p_idx += 1
else: # primary_doc["_id"] > secondary_doc["_id"]
missing_on_primary.append(secondary_doc)
# Only move past the doc that we know was skipped.
s_idx += 1
# Check if there are any unmatched documents left.
while p_idx < len(primary_docs):
matched = False
missing_on_secondary.append(primary_docs[p_idx])
p_idx += 1
while s_idx < len(secondary_docs):
matched = False
missing_on_primary.append(secondary_docs[s_idx])
s_idx += 1
if not matched:
CheckReplDBHash._append_differences(
missing_on_primary, missing_on_secondary, "document", sb)
else:
sb.append("All documents matched.")
@staticmethod
def _check_difference(primary_set, secondary_set, item_type_name, sb=None):
"""
Returns true if the contents of 'primary_set' and
'secondary_set' are identical, and false otherwise. The sets
contain information about the primary and secondary,
respectively, e.g. the database names that exist on each node.
Appends information about anything that differed to 'sb'.
"""
missing_on_primary = set()
missing_on_secondary = set()
for item in primary_set - secondary_set:
missing_on_secondary.add(item)
for item in secondary_set - primary_set:
missing_on_primary.add(item)
if sb is not None:
CheckReplDBHash._append_differences(
missing_on_primary, missing_on_secondary, item_type_name, sb)
return (missing_on_primary, missing_on_secondary)
@staticmethod
def _append_differences(missing_on_primary, missing_on_secondary, item_type_name, sb):
"""
Given two iterables representing items that were missing on the
primary or the secondary respectively, append the information
about which items were missing to 'sb', if any.
"""
if missing_on_primary:
sb.append("The following %ss were present on the secondary, but not on the"
" primary:" % (item_type_name))
for item in missing_on_primary:
sb.append(str(item))
if missing_on_secondary:
sb.append("The following %ss were present on the primary, but not on the"
" secondary:" % (item_type_name))
for item in missing_on_secondary:
sb.append(str(item))
@staticmethod
def _dump_all_collections(database, coll_names, sb):
"""
Appends the contents of each of the collections in 'coll_names'
to 'sb'.
"""
if coll_names:
sb.append("Database %s contains the following collections: %s"
% (database.name, coll_names))
for coll_name in coll_names:
CheckReplDBHash._dump_all_documents(database, coll_name, sb)
else:
sb.append("No collections in database %s." % (database.name))
@staticmethod
def _dump_all_documents(database, coll_name, sb):
"""
Appends the contents of 'coll_name' to 'sb'.
"""
docs = CheckReplDBHash._extract_documents(database[coll_name])
if docs:
sb.append("Documents in %s.%s:" % (database.name, coll_name))
for doc in docs:
sb.append(" %s" % (doc))
else:
sb.append("No documents in %s.%s." % (database.name, coll_name))
class TypeSensitiveSON(bson.SON):
"""
Extends bson.SON to perform additional type-checking of document values
to differentiate BSON types.
"""
def items_with_types(self):
"""
Returns a list of triples. Each triple consists of a field name, a
field value, and a field type for each field in the document.
"""
return [(key, self[key], type(self[key])) for key in self]
def __eq__(self, other):
"""
Comparison to another TypeSensitiveSON is order-sensitive and
type-sensitive while comparison to a regular dictionary ignores order
and type mismatches.
"""
if isinstance(other, TypeSensitiveSON):
return (len(self) == len(other) and
self.items_with_types() == other.items_with_types())
raise TypeError("TypeSensitiveSON objects cannot be compared to other types")
class ValidateCollections(CustomBehavior):
"""
Runs full validation (db.collection.validate(true)) on all collections
in all databases on every standalone, or primary mongod. If validation
fails (validate.valid), then the validate return object is logged.
Compatible with all subclasses.
"""
DEFAULT_FULL = True
DEFAULT_SCANDATA = True
def __init__(self, logger, fixture, full=DEFAULT_FULL, scandata=DEFAULT_SCANDATA):
CustomBehavior.__init__(self, logger, fixture)
if not isinstance(full, bool):
raise TypeError("Fixture option full is not specified as type bool")
if not isinstance(scandata, bool):
raise TypeError("Fixture option scandata is not specified as type bool")
self.test_case = testcases.TestCase(self.logger, "Hook", "#validate#")
self.started = False
self.full = full
self.scandata = scandata
def after_test(self, test_report):
"""
After each test, run a full validation on all collections.
"""
try:
if not self.started:
CustomBehavior.start_dynamic_test(self.test_case, test_report)
self.started = True
sb = [] # String builder.
# The self.fixture.port can be used for client connection to a
# standalone mongod, a replica-set primary, or mongos.
# TODO: Run collection validation on all nodes in a replica-set.
port = self.fixture.port
conn = utils.new_mongo_client(port=port)
success = ValidateCollections._check_all_collections(
conn, sb, self.full, self.scandata)
if not success:
# Adding failures to a TestReport requires traceback information, so we raise
# a 'self.test_case.failureException' that we will catch ourselves.
self.test_case.logger.info("\n ".join(sb))
raise self.test_case.failureException("Collection validation failed")
except self.test_case.failureException as err:
self.test_case.logger.exception("Collection validation failed")
self.test_case.return_code = 1
test_report.addFailure(self.test_case, sys.exc_info())
test_report.stopTest(self.test_case)
raise errors.ServerFailure(err.args[0])
def after_suite(self, test_report):
"""
If we get to this point, the #validate# test must have been
successful, so add it to the test report.
"""
if self.started:
self.test_case.logger.info("Collection validation passed for all tests.")
self.test_case.return_code = 0
test_report.addSuccess(self.test_case)
# TestReport.stopTest() has already been called if there was a failure.
test_report.stopTest(self.test_case)
self.started = False
@staticmethod
def _check_all_collections(conn, sb, full, scandata):
"""
Returns true if for all databases and collections validate_collection
succeeds. Returns false otherwise.
Logs a message if any database's collection fails validate_collection.
"""
success = True
for db_name in conn.database_names():
for coll_name in conn[db_name].collection_names():
try:
conn[db_name].validate_collection(coll_name, full=full, scandata=scandata)
except pymongo.errors.CollectionInvalid as err:
sb.append("Database %s, collection %s failed to validate:\n%s"
% (db_name, coll_name, err.args[0]))
success = False
return success
_CUSTOM_BEHAVIORS = {
"CleanEveryN": CleanEveryN,
"CheckReplDBHash": CheckReplDBHash,
"ValidateCollections": ValidateCollections,
}
|
[
"bson.CodecOptions",
"os.getenv",
"sys.exc_info"
] |
[((16763, 16813), 'bson.CodecOptions', 'bson.CodecOptions', ([], {'document_class': 'TypeSensitiveSON'}), '(document_class=TypeSensitiveSON)\n', (16780, 16813), False, 'import bson\n'), ((2930, 2959), 'os.getenv', 'os.getenv', (['"""ASAN_OPTIONS"""', '""""""'], {}), "('ASAN_OPTIONS', '')\n", (2939, 2959), False, 'import os\n'), ((6978, 6992), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6990, 6992), False, 'import sys\n'), ((7311, 7325), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7323, 7325), False, 'import sys\n'), ((25914, 25928), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (25926, 25928), False, 'import sys\n')]
|
import logging
from utils import find_resource
from diana.apis import Orthanc, DcmDir
from diana.dixel import DixelView, ShamDixel
from diana.utils.dicom import DicomLevel
def test_orthanc_ep(setup_orthanc0):
logging.debug("Test Orthanc EP")
O = Orthanc()
print(O)
O.check()
def test_orthanc_upload(setup_orthanc0):
logging.debug("Test Orthanc Upload")
O = Orthanc()
dicom_dir = find_resource("resources/dcm")
D = DcmDir(path=dicom_dir)
d = D.get("IM2263", view=DixelView.TAGS_FILE)
O.put(d)
q = {"PatientID": "AW15119516.678.1392297407"}
result = O.find(q)
if result:
id = result[0]
logging.debug( id )
result = O.exists(id)
logging.debug(result)
assert( result )
O.delete(d)
result = O.exists(id)
assert( not result )
def test_anon(setup_orthanc0):
O = Orthanc()
dicom_dir = find_resource("resources/dcm")
D = DcmDir(path=dicom_dir)
d = D.get("IM2263", view=DixelView.TAGS_FILE)
O.put(d)
d.tags["AccessionNumber"] = "123456"
d.tags["PatientBirthDate"] = "20000101"
d.tags["PatientID"] = "ABC"
d.tags["PatientName"] ="XYZ"
d.level=DicomLevel.STUDIES
e = ShamDixel.from_dixel(d)
rep = e.orthanc_sham_map()
O.anonymize("959e4e9f-e954be4e-11917c87-09d0f98f-7cc39128",
level=DicomLevel.STUDIES,
replacement_map=rep)
def test_psend(setup_orthanc0, setup_orthanc1):
O = Orthanc(peername="peer0")
print(O)
O.check()
O2 = Orthanc(port=8043, peername="peer0")
print(O2)
O2.check()
dicom_dir = find_resource("resources/dcm")
D = DcmDir(path=dicom_dir)
d = D.get("IM2263", view=DixelView.TAGS_FILE)
O2.put(d)
logging.debug( O2.gateway._get("peers") )
O2.psend(d.oid(), O)
e = O.get(d.oid(), level=DicomLevel.INSTANCES)
logging.debug(e)
assert d.oid() == e.oid()
if __name__=="__main__":
logging.basicConfig(level=logging.DEBUG)
from conftest import mk_orthanc
S0 = mk_orthanc()
S1 = mk_orthanc(8043, 4243, 8042, 4242)
test_orthanc_ep(None)
test_orthanc_upload(None)
test_anon(None)
test_psend(None, None)
S0.stop_container()
S1.stop_container()
|
[
"logging.debug",
"logging.basicConfig",
"conftest.mk_orthanc",
"diana.apis.Orthanc",
"diana.dixel.ShamDixel.from_dixel",
"utils.find_resource",
"diana.apis.DcmDir"
] |
[((216, 248), 'logging.debug', 'logging.debug', (['"""Test Orthanc EP"""'], {}), "('Test Orthanc EP')\n", (229, 248), False, 'import logging\n'), ((258, 267), 'diana.apis.Orthanc', 'Orthanc', ([], {}), '()\n', (265, 267), False, 'from diana.apis import Orthanc, DcmDir\n'), ((343, 379), 'logging.debug', 'logging.debug', (['"""Test Orthanc Upload"""'], {}), "('Test Orthanc Upload')\n", (356, 379), False, 'import logging\n'), ((389, 398), 'diana.apis.Orthanc', 'Orthanc', ([], {}), '()\n', (396, 398), False, 'from diana.apis import Orthanc, DcmDir\n'), ((416, 446), 'utils.find_resource', 'find_resource', (['"""resources/dcm"""'], {}), "('resources/dcm')\n", (429, 446), False, 'from utils import find_resource\n'), ((455, 477), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'dicom_dir'}), '(path=dicom_dir)\n', (461, 477), False, 'from diana.apis import Orthanc, DcmDir\n'), ((661, 678), 'logging.debug', 'logging.debug', (['id'], {}), '(id)\n', (674, 678), False, 'import logging\n'), ((713, 734), 'logging.debug', 'logging.debug', (['result'], {}), '(result)\n', (726, 734), False, 'import logging\n'), ((867, 876), 'diana.apis.Orthanc', 'Orthanc', ([], {}), '()\n', (874, 876), False, 'from diana.apis import Orthanc, DcmDir\n'), ((893, 923), 'utils.find_resource', 'find_resource', (['"""resources/dcm"""'], {}), "('resources/dcm')\n", (906, 923), False, 'from utils import find_resource\n'), ((932, 954), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'dicom_dir'}), '(path=dicom_dir)\n', (938, 954), False, 'from diana.apis import Orthanc, DcmDir\n'), ((1208, 1231), 'diana.dixel.ShamDixel.from_dixel', 'ShamDixel.from_dixel', (['d'], {}), '(d)\n', (1228, 1231), False, 'from diana.dixel import DixelView, ShamDixel\n'), ((1466, 1491), 'diana.apis.Orthanc', 'Orthanc', ([], {'peername': '"""peer0"""'}), "(peername='peer0')\n", (1473, 1491), False, 'from diana.apis import Orthanc, DcmDir\n'), ((1529, 1565), 'diana.apis.Orthanc', 'Orthanc', ([], {'port': '(8043)', 'peername': '"""peer0"""'}), "(port=8043, peername='peer0')\n", (1536, 1565), False, 'from diana.apis import Orthanc, DcmDir\n'), ((1612, 1642), 'utils.find_resource', 'find_resource', (['"""resources/dcm"""'], {}), "('resources/dcm')\n", (1625, 1642), False, 'from utils import find_resource\n'), ((1651, 1673), 'diana.apis.DcmDir', 'DcmDir', ([], {'path': 'dicom_dir'}), '(path=dicom_dir)\n', (1657, 1673), False, 'from diana.apis import Orthanc, DcmDir\n'), ((1869, 1885), 'logging.debug', 'logging.debug', (['e'], {}), '(e)\n', (1882, 1885), False, 'import logging\n'), ((1949, 1989), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1968, 1989), False, 'import logging\n'), ((2036, 2048), 'conftest.mk_orthanc', 'mk_orthanc', ([], {}), '()\n', (2046, 2048), False, 'from conftest import mk_orthanc\n'), ((2058, 2092), 'conftest.mk_orthanc', 'mk_orthanc', (['(8043)', '(4243)', '(8042)', '(4242)'], {}), '(8043, 4243, 8042, 4242)\n', (2068, 2092), False, 'from conftest import mk_orthanc\n')]
|
from copy import copy
import image_loader
import torch
import os
import binary_classifier.model
import binary_classifier.binary_network_pytorch
import transfer_classifier.model
import transfer_classifier.transfer_network_pytorch
from utils import fill_labels
EPOCHS = 50
LEARNING_RATE = 0.001
BATCH_SIZE = 128
BINARY_CLASS_MODEL_PATH = './models/model_BinCNN_bin_covid.ckpt'
MULTI_CLASS_MODEL_PATH = './models/model_MultiCNN_bin_covid.ckpt'
MULTI_CLASS_MODEL_PATH_NIH = './models/model_MultiCNN_bin_nih.ckpt'
TRANSFER_MULTI_CLASS_MODEL_PATH = './models/model_transferMultiCNN_bin_covid.ckpt'
TRANSFER_BINARY_CLASS_MODEL_PATH = './models/model_transferBinCNN_bin_covid.ckpt'
if __name__ == "__main__":
# Creating models
binary_model = binary_classifier.model.Net()
transfer_model = transfer_classifier.model.Net()
# Loading datasets
images_nih, labels_nih = image_loader.get_nih_dataset()
images_covid, labels_covid = image_loader.get_covid_dataset()
_, labels_covid_bin = image_loader.get_covid_dataset(binary=True)
# Binary learning on COVID-19
if not os.path.exists(BINARY_CLASS_MODEL_PATH):
binary_classifier.binary_network_pytorch.train_model(images=images_covid, #
labels=labels_covid_bin, #
epochs=EPOCHS, #
learning_rate=LEARNING_RATE, #
batch_size=BATCH_SIZE, #
path=BINARY_CLASS_MODEL_PATH)
else:
binary_model.load_state_dict(torch.load(BINARY_CLASS_MODEL_PATH))
# Multi-label learning on COVID-19
if not os.path.exists(MULTI_CLASS_MODEL_PATH):
transfer_model = transfer_classifier.transfer_network_pytorch.train_model(images=images_covid, #
labels=labels_covid, #
epochs=EPOCHS, #
learning_rate=LEARNING_RATE, #
batch_size=BATCH_SIZE, #
path=MULTI_CLASS_MODEL_PATH,
stand_alone=True)
else:
transfer_model.load_state_dict(torch.load(MULTI_CLASS_MODEL_PATH))
# Multi-label Learning on NIH
if not os.path.exists(MULTI_CLASS_MODEL_PATH_NIH):
transfer_model = transfer_classifier.transfer_network_pytorch.train_model(images=images_nih, #
labels=labels_nih, #
epochs=EPOCHS, #
learning_rate=LEARNING_RATE, #
batch_size=BATCH_SIZE, #
path=MULTI_CLASS_MODEL_PATH_NIH)
else:
transfer_model.load_state_dict(torch.load(MULTI_CLASS_MODEL_PATH_NIH))
# COVID-19 and NIH labels are different. We have to fix the corresponding size
labels_covid = fill_labels(labels_covid_bin)
# Make a copy so we can use it twice
transfer_model2 = copy(transfer_model)
# Transfer learning on COVID-19: multi-label
transfer_classifier.transfer_network_pytorch.train_using_pretrained_model(images=images_covid, #
labels=labels_covid, #
net=transfer_model, #
epochs=EPOCHS, #
learning_rate=LEARNING_RATE, #
batch_size=100, #
path=TRANSFER_MULTI_CLASS_MODEL_PATH)
# Transfer learning on COVID-19: binary
binary_classifier.binary_network_pytorch.train_using_pretrained_model(images=images_covid, #
labels=labels_covid, #
net=transfer_model2, #
epochs=EPOCHS, #
learning_rate=LEARNING_RATE, #
batch_size=100, #
path=TRANSFER_BINARY_CLASS_MODEL_PATH)
|
[
"torch.load",
"copy.copy",
"os.path.exists",
"image_loader.get_nih_dataset",
"utils.fill_labels",
"image_loader.get_covid_dataset"
] |
[((882, 912), 'image_loader.get_nih_dataset', 'image_loader.get_nih_dataset', ([], {}), '()\n', (910, 912), False, 'import image_loader\n'), ((946, 978), 'image_loader.get_covid_dataset', 'image_loader.get_covid_dataset', ([], {}), '()\n', (976, 978), False, 'import image_loader\n'), ((1005, 1048), 'image_loader.get_covid_dataset', 'image_loader.get_covid_dataset', ([], {'binary': '(True)'}), '(binary=True)\n', (1035, 1048), False, 'import image_loader\n'), ((3592, 3621), 'utils.fill_labels', 'fill_labels', (['labels_covid_bin'], {}), '(labels_covid_bin)\n', (3603, 3621), False, 'from utils import fill_labels\n'), ((3686, 3706), 'copy.copy', 'copy', (['transfer_model'], {}), '(transfer_model)\n', (3690, 3706), False, 'from copy import copy\n'), ((1095, 1134), 'os.path.exists', 'os.path.exists', (['BINARY_CLASS_MODEL_PATH'], {}), '(BINARY_CLASS_MODEL_PATH)\n', (1109, 1134), False, 'import os\n'), ((1795, 1833), 'os.path.exists', 'os.path.exists', (['MULTI_CLASS_MODEL_PATH'], {}), '(MULTI_CLASS_MODEL_PATH)\n', (1809, 1833), False, 'import os\n'), ((2711, 2753), 'os.path.exists', 'os.path.exists', (['MULTI_CLASS_MODEL_PATH_NIH'], {}), '(MULTI_CLASS_MODEL_PATH_NIH)\n', (2725, 2753), False, 'import os\n'), ((1707, 1742), 'torch.load', 'torch.load', (['BINARY_CLASS_MODEL_PATH'], {}), '(BINARY_CLASS_MODEL_PATH)\n', (1717, 1742), False, 'import torch\n'), ((2629, 2663), 'torch.load', 'torch.load', (['MULTI_CLASS_MODEL_PATH'], {}), '(MULTI_CLASS_MODEL_PATH)\n', (2639, 2663), False, 'import torch\n'), ((3449, 3487), 'torch.load', 'torch.load', (['MULTI_CLASS_MODEL_PATH_NIH'], {}), '(MULTI_CLASS_MODEL_PATH_NIH)\n', (3459, 3487), False, 'import torch\n')]
|
#!/usr/bin/env python3
# -*- encoding: utf-8
# SPDX-License-Identifier: MIT
# Copyright (c) 2021 - 2021, <EMAIL>
__banner__ = r""" (
_ ______ ____ _____ _______ _ _
| | | ____| / __ \ | __ \ |__ __| | | | |
__| | ___ ___ | |__ | | | | | |__) | | | | |__| |
/ _` | / _ \ / __| | __| | | | | | _ / | | | __ |
| (_| | | (_) | | (__ | | | |__| | | | \ \ | | | | | |
\__,_| \___/ \___| |_| \____/ |_| \_\ |_| |_| |_|
______
|______|
)
As this module is not available in production
""" # __banner__
class DOC:
def __init__(self):
pass
def magic(self):
from .FORTH import Engine
std = self.standard_2012()
v = MagicVistor(std)
e = Engine(vis=v)
import dis
files = {}
for name in v.in_order:
print()
print()
data = std.get(name, {})
if 'en' in data:
print(name, " == ", data["en"])
else:
print(name)
print()
print(v.to_code[name].__doc__)
print()
fname = v.to_code[name].__code__.co_filename
if not fname in files:
files[fname] = ['']
for line in open(fname, 'r').readlines():
files[fname].append(line.rstrip())
print(fname)
fline = v.to_code[name].__code__.co_firstlineno
for i in range(fline, fline + 2):
line = files[fname][i]
print(f'Line: {i} == ', line)
print()
#print(dir(v.to_code[name].__code__))
#e.execute("SEE /")
def standard_2012(self):
import simplejson as json
from bs4 import BeautifulSoup
with open("books\\2012.json", "r") as f:
std = json.loads(f.read())['wordSets']
all_help = {}
for k, v in std.items():
for k1, v1 in v['words'].items():
help = {}
stack_plain = v1['stackEffect']['plain']
help["effects"] = stack_plain.get('NBSP', '')
data = v1['sections'].get('NBSP', {})
html = data.get('html', '')
html = html.replace('<em>', ' ')
html = html.replace('</em>', ' ')
soup = BeautifulSoup(html, 'html.parser')
text = ''
for t in soup.find_all(text=True):
text += '{} '.format(t)
text = text.strip()
for x in range(0, 10):
text = text.replace(" ", " ")
text = text.replace("\n ", "\n")
text = text.replace("\n\n", "~~")
text = text.replace("\n", "")
text = text.replace("~~", "\n\n")
for o in ['n', 'x', 'u']:
for i in ['1', '2', '3', '4']:
text = text.replace(f"{o} {i}", f"{o}{i}")
lines = text.split('\n')
lines = lines[4:]
text = '\n'.join(lines)
help["text"] = text
help["url"] = f"https://forth-standard.org/standard/{k}/{k1}"
help["en"] = v1.get('english', None)
all_help[v1['name'].lower()] = help
return all_help
class MagicVistor:
def __init__(self, std):
self.std = std
self.in_order = []
self.to_code = {}
self.to_fname = {}
self.to_sname = {}
self.to_lname = {} # the mapping to library name
self.is_sigil = {}
self.is_word = {}
def before_imports(self, engine):
pass
def before_import(self, lname, lcode):
self.lname = lname
self.lcode = lcode
def visit_sigil(self, code, fname, sname, tname): # full, short, true
assert tname not in self.is_sigil
self.is_sigil[tname] = True
self.in_order.append(tname)
self.to_code[tname] = code
self.to_fname[tname] = fname
self.to_sname[tname] = sname
self.to_lname[tname] = self.lname
def visit_word(self, code, fname, sname, tname):
assert tname not in self.is_word
self.is_word[tname] = True
self.in_order.append(tname)
self.to_code[tname] = code
self.to_fname[tname] = fname
self.to_sname[tname] = sname
self.to_lname[tname] = self.lname
def after_import(self, name, lib):
pass
def after_imports(self, engine):
return
sorted = copy.copy(self.in_order)
sorted.sort()
for i in range(0, len(sorted)):
tname = sorted[i]
url = self.std[tname][2] if tname in self.std else ''
code = self.to_code[tname]
lname = self.to_lname[tname]
print("%03i %10s %10s %30s %s"%(i, lname, tname, self.to_sname[tname], url))
if self.is_word[tname]:
parts = self.to_fname[tname].split("_")
name = []
meta = None
for part in parts:
if meta is None:
if part == "":
meta = []
continue
else:
meta.append(part)
continue
name.append(engine.symbol_map.get(part, part))
name = "".join(name)
where = engine.root
# if name in where:
# raise ForthException(f"{name}: error(-4): Word Already Defined")
if name in where.word_immediate:
del where.word_immediate[name]
if not meta is None:
if "i" in meta[0]:
where.word_immediate[name] = True
where.words[name] = code
argc = code.__code__.co_argcount - 3
doc = code.__doc__
#fname = self.to_fname[name]
#parts = fname.split("_")
if i == 20: break
__openapi_header__ = """
from dataclasses import dataclass
from datetime import datetime
from typing import Optional
from quart import Quart, websocket
from quart_schema import QuartSchema, validate_request, validate_response
app = Quart(__name__)
QuartSchema(app)
@dataclass
class Todo:
task: str
due: Optional[datetime]
"""
__openapi_body__ = """
@app.post("/")
@validate_request(Todo)
@validate_response(Todo, 201)
async def create_todo(data: Todo) -> Todo:
... # Do something with data, e.g. save to the DB
return data, 201
"""
__openapi_footer__ = """
"""
import hashlib, copy
|
[
"bs4.BeautifulSoup",
"copy.copy"
] |
[((4784, 4808), 'copy.copy', 'copy.copy', (['self.in_order'], {}), '(self.in_order)\n', (4793, 4808), False, 'import hashlib, copy\n'), ((2543, 2577), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (2556, 2577), False, 'from bs4 import BeautifulSoup\n')]
|
from django.db import models
from django_extensions.db.fields.json import JSONField
from olympia.amo.fields import PositiveAutoField
from olympia.amo.models import SearchMixin
def update_inc(initial, key, count):
"""Update or create a dict of `int` counters, for JSONField."""
initial = initial or {}
initial[key] = count + initial.get(key, 0)
return initial
class StatsSearchMixin(SearchMixin):
ES_ALIAS_KEY = 'stats'
class DownloadCount(StatsSearchMixin, models.Model):
id = PositiveAutoField(primary_key=True)
# has an index `addon_id` on this column...
addon = models.ForeignKey('addons.Addon')
# has an index named `count` in dev, stage and prod
count = models.PositiveIntegerField(db_index=True)
date = models.DateField()
sources = JSONField(db_column='src', null=True)
class Meta:
db_table = 'download_counts'
# additional indices on this table (in dev, stage and prod):
# * KEY `addon_and_count` (`addon_id`,`count`)
# * KEY `addon_date_idx` (`addon_id`,`date`)
# in our (dev, stage and prod) database:
# UNIQUE KEY `date_2` (`date`,`addon_id`)
unique_together = ('date', 'addon')
class UpdateCount(StatsSearchMixin, models.Model):
id = PositiveAutoField(primary_key=True)
# Has an index `addon_id` in our dev, stage and prod database
addon = models.ForeignKey('addons.Addon')
# Has an index named `count` in our dev, stage and prod database
count = models.PositiveIntegerField(db_index=True)
# Has an index named `date` in our dev, stage and prod database
date = models.DateField(db_index=True)
versions = JSONField(db_column='version', null=True)
statuses = JSONField(db_column='status', null=True)
applications = JSONField(db_column='application', null=True)
oses = JSONField(db_column='os', null=True)
locales = JSONField(db_column='locale', null=True)
class Meta:
db_table = 'update_counts'
# Additional indices on this table (on dev, stage and prod):
# * KEY `addon_and_count` (`addon_id`,`count`)
# * KEY `addon_date_idx` (`addon_id`,`date`)
class ThemeUpdateCountManager(models.Manager):
def get_range_days_avg(self, start, end, *extra_fields):
"""Return a a ValuesListQuerySet containing the addon_id and popularity
for each theme where popularity is the average number of users (count)
over the given range of days passed as start / end arguments.
If extra_fields are passed, then the list of fields is returned in the
queryset, inserted after addon_id but before popularity."""
return (self.values_list('addon_id', *extra_fields)
.filter(date__range=[start, end])
.annotate(avg=models.Avg('count')))
class ThemeUpdateCount(StatsSearchMixin, models.Model):
"""Daily users taken from the ADI data (coming from Hive)."""
id = PositiveAutoField(primary_key=True)
addon = models.ForeignKey('addons.Addon')
count = models.PositiveIntegerField()
date = models.DateField()
objects = ThemeUpdateCountManager()
class Meta:
db_table = 'theme_update_counts'
class ThemeUpdateCountBulk(models.Model):
"""Used by the update_theme_popularity_movers command for perf reasons.
First bulk inserting all the averages over the last week and last three
weeks in this table allows us to bulk update (instead of running an update
per Persona).
"""
id = PositiveAutoField(primary_key=True)
persona_id = models.PositiveIntegerField()
popularity = models.PositiveIntegerField()
movers = models.FloatField()
class Meta:
db_table = 'theme_update_counts_bulk'
class GlobalStat(models.Model):
id = PositiveAutoField(primary_key=True)
name = models.CharField(max_length=255)
count = models.IntegerField()
date = models.DateField()
class Meta:
db_table = 'global_stats'
unique_together = ('name', 'date')
get_latest_by = 'date'
class ThemeUserCount(StatsSearchMixin, models.Model):
"""Theme popularity (weekly average of users).
This is filled in by a cron job reading the popularity from the theme
(Persona).
"""
addon = models.ForeignKey('addons.Addon')
count = models.PositiveIntegerField()
date = models.DateField()
class Meta:
db_table = 'theme_user_counts'
index_together = ('date', 'addon')
|
[
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.PositiveIntegerField",
"django_extensions.db.fields.json.JSONField",
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.DateField",
"django.db.models.Avg",
"olympia.amo.fields.PositiveAutoField"
] |
[((509, 544), 'olympia.amo.fields.PositiveAutoField', 'PositiveAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (526, 544), False, 'from olympia.amo.fields import PositiveAutoField\n'), ((605, 638), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""addons.Addon"""'], {}), "('addons.Addon')\n", (622, 638), False, 'from django.db import models\n'), ((708, 750), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (735, 750), False, 'from django.db import models\n'), ((762, 780), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (778, 780), False, 'from django.db import models\n'), ((795, 832), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""src"""', 'null': '(True)'}), "(db_column='src', null=True)\n", (804, 832), False, 'from django_extensions.db.fields.json import JSONField\n'), ((1271, 1306), 'olympia.amo.fields.PositiveAutoField', 'PositiveAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (1288, 1306), False, 'from olympia.amo.fields import PositiveAutoField\n'), ((1385, 1418), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""addons.Addon"""'], {}), "('addons.Addon')\n", (1402, 1418), False, 'from django.db import models\n'), ((1500, 1542), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (1527, 1542), False, 'from django.db import models\n'), ((1622, 1653), 'django.db.models.DateField', 'models.DateField', ([], {'db_index': '(True)'}), '(db_index=True)\n', (1638, 1653), False, 'from django.db import models\n'), ((1669, 1710), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""version"""', 'null': '(True)'}), "(db_column='version', null=True)\n", (1678, 1710), False, 'from django_extensions.db.fields.json import JSONField\n'), ((1726, 1766), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""status"""', 'null': '(True)'}), "(db_column='status', null=True)\n", (1735, 1766), False, 'from django_extensions.db.fields.json import JSONField\n'), ((1786, 1831), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""application"""', 'null': '(True)'}), "(db_column='application', null=True)\n", (1795, 1831), False, 'from django_extensions.db.fields.json import JSONField\n'), ((1843, 1879), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""os"""', 'null': '(True)'}), "(db_column='os', null=True)\n", (1852, 1879), False, 'from django_extensions.db.fields.json import JSONField\n'), ((1894, 1934), 'django_extensions.db.fields.json.JSONField', 'JSONField', ([], {'db_column': '"""locale"""', 'null': '(True)'}), "(db_column='locale', null=True)\n", (1903, 1934), False, 'from django_extensions.db.fields.json import JSONField\n'), ((2956, 2991), 'olympia.amo.fields.PositiveAutoField', 'PositiveAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (2973, 2991), False, 'from olympia.amo.fields import PositiveAutoField\n'), ((3004, 3037), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""addons.Addon"""'], {}), "('addons.Addon')\n", (3021, 3037), False, 'from django.db import models\n'), ((3050, 3079), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3077, 3079), False, 'from django.db import models\n'), ((3091, 3109), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3107, 3109), False, 'from django.db import models\n'), ((3521, 3556), 'olympia.amo.fields.PositiveAutoField', 'PositiveAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3538, 3556), False, 'from olympia.amo.fields import PositiveAutoField\n'), ((3574, 3603), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3601, 3603), False, 'from django.db import models\n'), ((3621, 3650), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (3648, 3650), False, 'from django.db import models\n'), ((3664, 3683), 'django.db.models.FloatField', 'models.FloatField', ([], {}), '()\n', (3681, 3683), False, 'from django.db import models\n'), ((3790, 3825), 'olympia.amo.fields.PositiveAutoField', 'PositiveAutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (3807, 3825), False, 'from olympia.amo.fields import PositiveAutoField\n'), ((3837, 3869), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (3853, 3869), False, 'from django.db import models\n'), ((3882, 3903), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (3901, 3903), False, 'from django.db import models\n'), ((3915, 3933), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (3931, 3933), False, 'from django.db import models\n'), ((4277, 4310), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""addons.Addon"""'], {}), "('addons.Addon')\n", (4294, 4310), False, 'from django.db import models\n'), ((4323, 4352), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (4350, 4352), False, 'from django.db import models\n'), ((4364, 4382), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (4380, 4382), False, 'from django.db import models\n'), ((2801, 2820), 'django.db.models.Avg', 'models.Avg', (['"""count"""'], {}), "('count')\n", (2811, 2820), False, 'from django.db import models\n')]
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from polyaxon.cli.options import OPTIONS_MODEL_VERSION, OPTIONS_NAME, OPTIONS_PROJECT
from polyaxon.cli.project_versions import (
delete_project_version,
get_project_version,
list_project_versions,
open_project_version_dashboard,
register_project_version,
stage_project_version,
update_project_version,
)
from polyaxon.env_vars.getters import get_project_or_local
from polyaxon.lifecycle import V1ProjectVersionKind, V1Stages
from polyaxon.logger import clean_outputs
from polyaxon.utils.formatting import Printer
@click.group()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.pass_context
@clean_outputs
def models(ctx, project, version):
"""Commands for managing models."""
ctx.obj = ctx.obj or {}
if project or version:
Printer.print_warning(
"Passing arguments to command groups is deprecated and will be removed in v2! "
"Please use arguments on the sub-command directly: `polyaxon ops SUB_COMMAND --help`"
)
ctx.obj["project"] = project
if ctx.invoked_subcommand not in ["ls"]:
ctx.obj["version"] = version
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(
"--query",
"-q",
type=str,
help="To filter the model versions based on this query spec.",
)
@click.option(
"--sort",
"-s",
type=str,
help="To order the model versions based on the sort spec.",
)
@click.option("--limit", type=int, help="To limit the list of model versions.")
@click.option("--offset", type=int, help="To offset the list of model versions.")
@click.pass_context
@clean_outputs
def ls(ctx, project, query, sort, limit, offset):
"""List model versions by owner or owner/model.
Example:
\b
$ polyaxon models ls -p=project-name
\b
$ polyaxon models ls -p=acme/project-name
"""
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
list_project_versions(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
query=query,
sort=sort,
limit=limit,
offset=offset,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.option("--description", type=str, help="Description of the version.")
@click.option("--tags", type=str, help="Tags of the version, comma separated values.")
@click.option(
"--content", type=str, help="Additional content/metadata fo the model version."
)
@click.option("--run-uid", type=str, help="The run to link to this model version.")
@click.option(
"--artifacts",
"artifacts_",
type=str,
help="The artifacts to link to this model version.",
)
@click.option(
"--connection", type=str, help="The connection to link to this model version."
)
@click.option(
"--force",
is_flag=True,
default=False,
help="Flag to force push if the version already exists.",
)
@click.pass_context
@clean_outputs
def push(
ctx,
project,
version,
description,
tags,
content,
run_uid,
artifacts_,
connection,
force,
):
"""Push a new model version.
If the name corresponds to an existing model version, it will be updated.
Example:
\b
$ polyaxon models push --artifacts=model,env --run=uuid
\b
$ polyaxon models push -f polyaxonfile.yaml --project=kaniko --description="..."
\b
$ polyaxon models push -f polyaxonfile.yaml -p kaniko -ver latest --run=uuid
\b
$ polyaxon models push -f polyaxonfile.yaml -p owner/name -ver v1 --tags="tag1,tag2"
"""
version = version or ctx.obj.get("version")
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
register_project_version(
owner=owner,
project_name=project_name,
version=version,
kind=V1ProjectVersionKind.MODEL,
description=description,
tags=tags,
content=content,
run=run_uid,
connection=connection,
artifacts=artifacts_,
force=force,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.pass_context
@clean_outputs
def get(ctx, project, version):
"""Get info for a model version by name, name & version, owner/name & tag.
Examples:
\b
$ polyaxon models get // returns `latest` in current project
\b
$ polyaxon models get --project=my-project --version=test-version
\b
$ polyaxon models get -p owner/my-project -ver rc12
"""
version = version or ctx.obj.get("version") or "latest"
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
get_project_version(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
version=version,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.pass_context
@clean_outputs
def delete(ctx, project, version):
"""Delete a model version.
Examples:
\b
$ polyaxon models delete // delete `latest` in current project
\b
$ polyaxon models delete --project=my-project --version=test-version
\b
$ polyaxon models get -p owner/my-project -ver rc12
"""
version = version or ctx.obj.get("version") or "latest"
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
delete_project_version(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
version=version,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.option(
*OPTIONS_NAME["args"],
type=str,
help="Name of the model version, must be unique within the same project.",
)
@click.option("--description", type=str, help="Description of the model version.")
@click.option(
"--tags", type=str, help="Tags of the run, comma separated values (optional)."
)
@click.pass_context
@clean_outputs
def update(ctx, project, version, name, description, tags):
"""Update model version.
Uses /docs/core/cli/#caching
Example:
\b
$ polyaxon models update --version=foobar --description="..."
\b
$ polyaxon models update -p mike1/foobar -ver current-name --name=new-name
\b
$ polyaxon models update --tags="foo, bar"
"""
version = version or ctx.obj.get("version") or "latest"
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
update_project_version(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
version=version,
name=name,
description=description,
tags=tags,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.option(
"--to",
"-to",
type=click.Choice(V1Stages.allowable_values, case_sensitive=True),
help="Stage to transition to.",
)
@click.option(
"--message", type=str, help="Additional information to set with this stage change."
)
@click.pass_context
@clean_outputs
def stage(ctx, project, version, to, message):
"""Update stage for a model version.
Uses /docs/core/cli/#caching
Example:
\b
$ polyaxon models stage -ver rc-12 --to=production
\b
$ polyaxon models stage -p amce/foobar -ver rc-12 --to=staging --message="Use carefully!"
"""
version = version or ctx.obj.get("version") or "latest"
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
stage_project_version(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
version=version,
to=to,
message=message,
)
@models.command()
@click.option(*OPTIONS_PROJECT["args"], **OPTIONS_PROJECT["kwargs"])
@click.option(*OPTIONS_MODEL_VERSION["args"], **OPTIONS_MODEL_VERSION["kwargs"])
@click.option(
"--yes",
"-y",
is_flag=True,
default=False,
help="Automatic yes to prompts. "
'Assume "yes" as answer to all prompts and run non-interactively.',
)
@click.option(
"--url",
is_flag=True,
default=False,
help="Print the url of the dashboard for this model version.",
)
@click.pass_context
@clean_outputs
def dashboard(ctx, project, version, yes, url):
"""Open this operation's dashboard details in browser."""
version = version or ctx.obj.get("version") or "latest"
owner, project_name = get_project_or_local(
project or ctx.obj.get("project"), is_cli=True
)
open_project_version_dashboard(
owner=owner,
project_name=project_name,
kind=V1ProjectVersionKind.MODEL,
version=version,
url=url,
yes=yes,
)
|
[
"polyaxon.utils.formatting.Printer.print_warning",
"click.option",
"polyaxon.cli.project_versions.open_project_version_dashboard",
"click.Choice",
"polyaxon.cli.project_versions.get_project_version",
"polyaxon.cli.project_versions.list_project_versions",
"polyaxon.cli.project_versions.delete_project_version",
"polyaxon.cli.project_versions.register_project_version",
"click.group",
"polyaxon.cli.project_versions.stage_project_version",
"polyaxon.cli.project_versions.update_project_version"
] |
[((1164, 1177), 'click.group', 'click.group', ([], {}), '()\n', (1175, 1177), False, 'import click\n'), ((1179, 1246), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (1191, 1246), False, 'import click\n'), ((1248, 1327), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (1260, 1327), False, 'import click\n'), ((1860, 1927), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (1872, 1927), False, 'import click\n'), ((1929, 2036), 'click.option', 'click.option', (['"""--query"""', '"""-q"""'], {'type': 'str', 'help': '"""To filter the model versions based on this query spec."""'}), "('--query', '-q', type=str, help=\n 'To filter the model versions based on this query spec.')\n", (1941, 2036), False, 'import click\n'), ((2052, 2155), 'click.option', 'click.option', (['"""--sort"""', '"""-s"""'], {'type': 'str', 'help': '"""To order the model versions based on the sort spec."""'}), "('--sort', '-s', type=str, help=\n 'To order the model versions based on the sort spec.')\n", (2064, 2155), False, 'import click\n'), ((2171, 2249), 'click.option', 'click.option', (['"""--limit"""'], {'type': 'int', 'help': '"""To limit the list of model versions."""'}), "('--limit', type=int, help='To limit the list of model versions.')\n", (2183, 2249), False, 'import click\n'), ((2251, 2336), 'click.option', 'click.option', (['"""--offset"""'], {'type': 'int', 'help': '"""To offset the list of model versions."""'}), "('--offset', type=int, help='To offset the list of model versions.'\n )\n", (2263, 2336), False, 'import click\n'), ((2939, 3006), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (2951, 3006), False, 'import click\n'), ((3008, 3087), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (3020, 3087), False, 'import click\n'), ((3089, 3164), 'click.option', 'click.option', (['"""--description"""'], {'type': 'str', 'help': '"""Description of the version."""'}), "('--description', type=str, help='Description of the version.')\n", (3101, 3164), False, 'import click\n'), ((3166, 3256), 'click.option', 'click.option', (['"""--tags"""'], {'type': 'str', 'help': '"""Tags of the version, comma separated values."""'}), "('--tags', type=str, help=\n 'Tags of the version, comma separated values.')\n", (3178, 3256), False, 'import click\n'), ((3253, 3351), 'click.option', 'click.option', (['"""--content"""'], {'type': 'str', 'help': '"""Additional content/metadata fo the model version."""'}), "('--content', type=str, help=\n 'Additional content/metadata fo the model version.')\n", (3265, 3351), False, 'import click\n'), ((3354, 3441), 'click.option', 'click.option', (['"""--run-uid"""'], {'type': 'str', 'help': '"""The run to link to this model version."""'}), "('--run-uid', type=str, help=\n 'The run to link to this model version.')\n", (3366, 3441), False, 'import click\n'), ((3438, 3547), 'click.option', 'click.option', (['"""--artifacts"""', '"""artifacts_"""'], {'type': 'str', 'help': '"""The artifacts to link to this model version."""'}), "('--artifacts', 'artifacts_', type=str, help=\n 'The artifacts to link to this model version.')\n", (3450, 3547), False, 'import click\n'), ((3563, 3660), 'click.option', 'click.option', (['"""--connection"""'], {'type': 'str', 'help': '"""The connection to link to this model version."""'}), "('--connection', type=str, help=\n 'The connection to link to this model version.')\n", (3575, 3660), False, 'import click\n'), ((3663, 3778), 'click.option', 'click.option', (['"""--force"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Flag to force push if the version already exists."""'}), "('--force', is_flag=True, default=False, help=\n 'Flag to force push if the version already exists.')\n", (3675, 3778), False, 'import click\n'), ((4968, 5035), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (4980, 5035), False, 'import click\n'), ((5037, 5116), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (5049, 5116), False, 'import click\n'), ((5845, 5912), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (5857, 5912), False, 'import click\n'), ((5914, 5993), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (5926, 5993), False, 'import click\n'), ((6685, 6752), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (6697, 6752), False, 'import click\n'), ((6754, 6833), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (6766, 6833), False, 'import click\n'), ((6835, 6960), 'click.option', 'click.option', (["*OPTIONS_NAME['args']"], {'type': 'str', 'help': '"""Name of the model version, must be unique within the same project."""'}), "(*OPTIONS_NAME['args'], type=str, help=\n 'Name of the model version, must be unique within the same project.')\n", (6847, 6960), False, 'import click\n'), ((6972, 7058), 'click.option', 'click.option', (['"""--description"""'], {'type': 'str', 'help': '"""Description of the model version."""'}), "('--description', type=str, help=\n 'Description of the model version.')\n", (6984, 7058), False, 'import click\n'), ((7055, 7152), 'click.option', 'click.option', (['"""--tags"""'], {'type': 'str', 'help': '"""Tags of the run, comma separated values (optional)."""'}), "('--tags', type=str, help=\n 'Tags of the run, comma separated values (optional).')\n", (7067, 7152), False, 'import click\n'), ((7967, 8034), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (7979, 8034), False, 'import click\n'), ((8036, 8115), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (8048, 8115), False, 'import click\n'), ((8264, 8366), 'click.option', 'click.option', (['"""--message"""'], {'type': 'str', 'help': '"""Additional information to set with this stage change."""'}), "('--message', type=str, help=\n 'Additional information to set with this stage change.')\n", (8276, 8366), False, 'import click\n'), ((9097, 9164), 'click.option', 'click.option', (["*OPTIONS_PROJECT['args']"], {}), "(*OPTIONS_PROJECT['args'], **OPTIONS_PROJECT['kwargs'])\n", (9109, 9164), False, 'import click\n'), ((9166, 9245), 'click.option', 'click.option', (["*OPTIONS_MODEL_VERSION['args']"], {}), "(*OPTIONS_MODEL_VERSION['args'], **OPTIONS_MODEL_VERSION['kwargs'])\n", (9178, 9245), False, 'import click\n'), ((9247, 9412), 'click.option', 'click.option', (['"""--yes"""', '"""-y"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively."""'}), '(\'--yes\', \'-y\', is_flag=True, default=False, help=\n \'Automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively.\'\n )\n', (9259, 9412), False, 'import click\n'), ((9434, 9552), 'click.option', 'click.option', (['"""--url"""'], {'is_flag': '(True)', 'default': '(False)', 'help': '"""Print the url of the dashboard for this model version."""'}), "('--url', is_flag=True, default=False, help=\n 'Print the url of the dashboard for this model version.')\n", (9446, 9552), False, 'import click\n'), ((2708, 2864), 'polyaxon.cli.project_versions.list_project_versions', 'list_project_versions', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'query': 'query', 'sort': 'sort', 'limit': 'limit', 'offset': 'offset'}), '(owner=owner, project_name=project_name, kind=\n V1ProjectVersionKind.MODEL, query=query, sort=sort, limit=limit, offset\n =offset)\n', (2729, 2864), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((4613, 4866), 'polyaxon.cli.project_versions.register_project_version', 'register_project_version', ([], {'owner': 'owner', 'project_name': 'project_name', 'version': 'version', 'kind': 'V1ProjectVersionKind.MODEL', 'description': 'description', 'tags': 'tags', 'content': 'content', 'run': 'run_uid', 'connection': 'connection', 'artifacts': 'artifacts_', 'force': 'force'}), '(owner=owner, project_name=project_name, version=\n version, kind=V1ProjectVersionKind.MODEL, description=description, tags\n =tags, content=content, run=run_uid, connection=connection, artifacts=\n artifacts_, force=force)\n', (4637, 4866), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((5675, 5789), 'polyaxon.cli.project_versions.get_project_version', 'get_project_version', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'version': 'version'}), '(owner=owner, project_name=project_name, kind=\n V1ProjectVersionKind.MODEL, version=version)\n', (5694, 5789), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((6512, 6629), 'polyaxon.cli.project_versions.delete_project_version', 'delete_project_version', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'version': 'version'}), '(owner=owner, project_name=project_name, kind=\n V1ProjectVersionKind.MODEL, version=version)\n', (6534, 6629), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((7723, 7892), 'polyaxon.cli.project_versions.update_project_version', 'update_project_version', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'version': 'version', 'name': 'name', 'description': 'description', 'tags': 'tags'}), '(owner=owner, project_name=project_name, kind=\n V1ProjectVersionKind.MODEL, version=version, name=name, description=\n description, tags=tags)\n', (7745, 7892), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((8885, 9025), 'polyaxon.cli.project_versions.stage_project_version', 'stage_project_version', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'version': 'version', 'to': 'to', 'message': 'message'}), '(owner=owner, project_name=project_name, kind=\n V1ProjectVersionKind.MODEL, version=version, to=to, message=message)\n', (8906, 9025), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((9885, 10028), 'polyaxon.cli.project_versions.open_project_version_dashboard', 'open_project_version_dashboard', ([], {'owner': 'owner', 'project_name': 'project_name', 'kind': 'V1ProjectVersionKind.MODEL', 'version': 'version', 'url': 'url', 'yes': 'yes'}), '(owner=owner, project_name=project_name, kind\n =V1ProjectVersionKind.MODEL, version=version, url=url, yes=yes)\n', (9915, 10028), False, 'from polyaxon.cli.project_versions import delete_project_version, get_project_version, list_project_versions, open_project_version_dashboard, register_project_version, stage_project_version, update_project_version\n'), ((1501, 1696), 'polyaxon.utils.formatting.Printer.print_warning', 'Printer.print_warning', (['"""Passing arguments to command groups is deprecated and will be removed in v2! Please use arguments on the sub-command directly: `polyaxon ops SUB_COMMAND --help`"""'], {}), "(\n 'Passing arguments to command groups is deprecated and will be removed in v2! Please use arguments on the sub-command directly: `polyaxon ops SUB_COMMAND --help`'\n )\n", (1522, 1696), False, 'from polyaxon.utils.formatting import Printer\n'), ((8163, 8223), 'click.Choice', 'click.Choice', (['V1Stages.allowable_values'], {'case_sensitive': '(True)'}), '(V1Stages.allowable_values, case_sensitive=True)\n', (8175, 8223), False, 'import click\n')]
|
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import functools
import numpy as np
from sklearn.metrics import recall_score
from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction
def test_constructor_unnamed():
fc = AnnotatedMetricFunction(func=recall_score, name=None)
assert fc.name == recall_score.__name__
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
def test_constructor_no_name(recwarn):
# Tests case where no name is given and the function has no __name__
my_func = functools.partial(recall_score, pos_label=0)
fc = AnnotatedMetricFunction(func=my_func, name=None)
assert fc.name == "metric"
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
assert len(recwarn) == 1
assert str(recwarn[0].message) == "Supplied 'func' had no __name__ attribute"
def test_constructor_named():
fc = AnnotatedMetricFunction(func=recall_score, name="OverrideName")
assert fc.name == "OverrideName"
assert np.array_equal(fc.postional_argument_names, ["y_true", "y_pred"])
assert isinstance(fc.kw_argument_mapping, dict)
assert len(fc.kw_argument_mapping) == 0
|
[
"numpy.array_equal",
"functools.partial",
"fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction"
] |
[((304, 357), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'recall_score', 'name': 'None'}), '(func=recall_score, name=None)\n', (327, 357), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((413, 478), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (427, 478), True, 'import numpy as np\n'), ((703, 747), 'functools.partial', 'functools.partial', (['recall_score'], {'pos_label': '(0)'}), '(recall_score, pos_label=0)\n', (720, 747), False, 'import functools\n'), ((758, 806), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'my_func', 'name': 'None'}), '(func=my_func, name=None)\n', (781, 806), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((849, 914), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (863, 914), True, 'import numpy as np\n'), ((1163, 1226), 'fairlearn.metrics._annotated_metric_function.AnnotatedMetricFunction', 'AnnotatedMetricFunction', ([], {'func': 'recall_score', 'name': '"""OverrideName"""'}), "(func=recall_score, name='OverrideName')\n", (1186, 1226), False, 'from fairlearn.metrics._annotated_metric_function import AnnotatedMetricFunction\n'), ((1275, 1340), 'numpy.array_equal', 'np.array_equal', (['fc.postional_argument_names', "['y_true', 'y_pred']"], {}), "(fc.postional_argument_names, ['y_true', 'y_pred'])\n", (1289, 1340), True, 'import numpy as np\n')]
|
import discord
import datetime, time
from discord.ext import commands
restart_data = {
'str': str(datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")),
'obj': time.time()
}
#this is very important for creating a cog
class UPTIME(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print(f'{self} has been loaded')
global startTime
startTime = time.time()
#create a command in the cog
@commands.command(name='uptime')
async def _uptime(self,ctx):
# what this is doing is creating a variable called 'uptime' and assigning it
# a string value based off calling a time.time() snapshot now, and subtracting
# the global from earlier
uptime = str(datetime.timedelta(seconds=int(
round(time.time() - restart_data['obj']))))
embed=discord.Embed(title='Uptime ',description='Here is time I am online for you',color=discord.Color.blue())
embed.add_field(name='UPTIME',value=uptime)
embed.set_image(url=f"https://falsiskremlin.sirv.com/resim_2020-11-28_113400.png?text.0.text={uptime}&text.0.position.x=-20%25&text.0.position.y=-30%25&text.0.size=50&text.0.color=ffffff&watermark.0.image=%2FImages%2Fresim_2020-11-29_103837.png&watermark.0.position.x=-35%25&watermark.0.scale.width=170&watermark.0.scale.height=170")
await ctx.send(embed=embed)
@commands.command(
name='ping',
help='Shows my current response time.'
)
@commands.guild_only()
async def ping(self, ctx: commands.Context):
system_latency = round(self.bot.latency * 1000)
shard_id = ctx.guild.shard_id
shard = self.bot.get_shard(shard_id)
shard_ping = shard.latency
if shard_ping <= 4 :
xc = discord.Color.green()
elif shard_ping <=9:
xc = 0xFFFF00
else:
xc = discord.Color.red()
start_time = time.time()
message = await ctx.reply('Testing overall speed...')
end_time = time.time()
api_latency = round((end_time - start_time) * 1000)
uptime = str(datetime.timedelta(seconds=int(
round(time.time() - restart_data['obj']))))
embed = (
discord.Embed(
color=xc
).add_field(
name='System Latency',
value=f'{system_latency}ms [{self.bot.shard_count} shard(s)]',
inline=False
).add_field(
name="SHARD STATS",value=f"Shard Id :{shard_id} \n Shard Ping :{shard_ping}").add_field(
name='API Latency',
value=f'{api_latency}ms'
).add_field(
name='Startup Time',
value=restart_data['str'],
inline=False
).add_field(
name='Uptime',
value=uptime,
inline=False
).set_footer(
text='Ping'
)
)
await message.edit(content=None, embed=embed)
def setup(bot):
bot.add_cog(UPTIME(bot))
|
[
"discord.ext.commands.command",
"discord.Embed",
"discord.Color.red",
"discord.Color.green",
"discord.Color.blue",
"discord.ext.commands.Cog.listener",
"time.time",
"discord.ext.commands.guild_only",
"datetime.datetime.now"
] |
[((172, 183), 'time.time', 'time.time', ([], {}), '()\n', (181, 183), False, 'import datetime, time\n'), ((315, 338), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (336, 338), False, 'from discord.ext import commands\n'), ((508, 539), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""uptime"""'}), "(name='uptime')\n", (524, 539), False, 'from discord.ext import commands\n'), ((1443, 1512), 'discord.ext.commands.command', 'commands.command', ([], {'name': '"""ping"""', 'help': '"""Shows my current response time."""'}), "(name='ping', help='Shows my current response time.')\n", (1459, 1512), False, 'from discord.ext import commands\n'), ((1541, 1562), 'discord.ext.commands.guild_only', 'commands.guild_only', ([], {}), '()\n', (1560, 1562), False, 'from discord.ext import commands\n'), ((457, 468), 'time.time', 'time.time', ([], {}), '()\n', (466, 468), False, 'import datetime, time\n'), ((1992, 2003), 'time.time', 'time.time', ([], {}), '()\n', (2001, 2003), False, 'import datetime, time\n'), ((2085, 2096), 'time.time', 'time.time', ([], {}), '()\n', (2094, 2096), False, 'import datetime, time\n'), ((1840, 1861), 'discord.Color.green', 'discord.Color.green', ([], {}), '()\n', (1859, 1861), False, 'import discord\n'), ((105, 128), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (126, 128), False, 'import datetime, time\n'), ((986, 1006), 'discord.Color.blue', 'discord.Color.blue', ([], {}), '()\n', (1004, 1006), False, 'import discord\n'), ((1944, 1963), 'discord.Color.red', 'discord.Color.red', ([], {}), '()\n', (1961, 1963), False, 'import discord\n'), ((851, 862), 'time.time', 'time.time', ([], {}), '()\n', (860, 862), False, 'import datetime, time\n'), ((2229, 2240), 'time.time', 'time.time', ([], {}), '()\n', (2238, 2240), False, 'import datetime, time\n'), ((2298, 2321), 'discord.Embed', 'discord.Embed', ([], {'color': 'xc'}), '(color=xc)\n', (2311, 2321), False, 'import discord\n')]
|
import urllib.request
from pymongo import MongoClient
import pandas as pd
import os
import json
class Wunderground:
"""Class that fetches weather from Wunderground and copies to MongoDB"""
def __init__(self, dbc, sid, api):
"""
Initializes class
"""
self.dbc = dbc
self.sid = sid
self.api = api
def convert(self, val):
"""
Convert values to float
"""
try:
val = float(val)
except Exception:
pass
if val == " ":
val = None
return val
def get_day(self, m, d, y):
"""
Get observations for the full day
"""
url = (
"https://api.weather.com/v2/pws/history/all?stationId="
+ str(self.sid)
+ "&format=json&units=e&apiKey="
+ str(self.api)
+ "&date="
+ str(y).zfill(4)
+ str(m).zfill(2)
+ str(d).zfill(2)
)
print(url)
hdr = {
"User-Agent": "Mozilla/5.0 (Macintosh; \
Intel Mac OS X 10_10_1) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/39.0.2171.95 \
Safari/537.36"
}
req = urllib.request.Request(url, headers=hdr)
msg = urllib.request.urlopen(req).read()
msg = json.loads(msg)
# print(msg)
obs = msg["observations"]
for ob in obs:
ob["station_id"] = ob["stationID"]
ob.pop("stationID")
ob["obs_time_utc"] = pd.to_datetime(ob["obsTimeUtc"])
ob.pop("obsTimeUtc")
ob["obs_time_local"] = pd.to_datetime(ob["obsTimeLocal"])
ob.pop("obsTimeLocal")
ob.update(ob["imperial"])
ob.pop("imperial")
ob["solar"] = ob["solarRadiationHigh"]
ob.pop("solarRadiationHigh")
ob["uv"] = ob["uvHigh"]
ob.pop("uvHigh")
ob["wind_deg"] = ob["winddirAvg"]
ob.pop("winddirAvg")
ob["temp_f"] = ob["tempAvg"]
ob.pop("tempAvg")
ob.pop("tempHigh")
ob.pop("tempLow")
ob["dewpt_f"] = ob["dewptAvg"]
ob.pop("dewptAvg")
ob.pop("dewptHigh")
ob.pop("dewptLow")
ob["humidity"] = ob["humidityAvg"]
ob.pop("humidityAvg")
ob.pop("humidityHigh")
ob.pop("humidityLow")
ob["pressure_in"] = (ob["pressureMax"] + ob["pressureMin"]) / 2
ob.pop("pressureMax")
ob.pop("pressureMin")
ob["pressure_trend"] = ob["pressureTrend"]
ob.pop("pressureTrend")
ob["qc_status"] = ob["qcStatus"]
ob.pop("qcStatus")
ob["heat_index_f"] = ob["heatindexAvg"]
ob.pop("heatindexAvg")
ob.pop("heatindexHigh")
ob.pop("heatindexLow")
ob["windchill_f"] = ob["windchillAvg"]
ob.pop("windchillAvg")
ob.pop("windchillHigh")
ob.pop("windchillLow")
ob["wind_speed_mph"] = ob["windspeedAvg"]
ob.pop("windspeedAvg")
ob.pop("windspeedHigh")
ob.pop("windspeedLow")
ob["wind_gust_mph"] = ob["windgustAvg"]
ob.pop("windgustAvg")
ob.pop("windgustHigh")
ob.pop("windgustLow")
ob["precip_rate"] = ob["precipRate"]
ob.pop("precipRate")
ob["precip_total"] = ob["precipTotal"]
ob.pop("precipTotal")
ob.pop("epoch")
ob.pop("tz")
try:
self.raw.insert_one(ob)
print(ob)
except Exception:
print("duplicate observation")
def run(self):
"""
Main method to run
"""
client = MongoClient(self.dbc)
db = client.wx
self.raw = db.raw
y = 2021
for m in range(9, 10):
for d in range(12, 31):
try:
self.get_day(m, d, y)
print("got day {}-{}-{}".format(y, m, d))
except Exception:
print("failed day {}-{}-{}".format(y, m, d))
pass
if __name__ == "__main__":
wunderground = Wunderground(
dbc=os.environ["MONGODB_CLIENT"],
sid=os.environ["SID"],
api=os.environ["API"],
)
wunderground.run()
|
[
"pymongo.MongoClient",
"pandas.to_datetime",
"json.loads"
] |
[((1362, 1377), 'json.loads', 'json.loads', (['msg'], {}), '(msg)\n', (1372, 1377), False, 'import json\n'), ((3872, 3893), 'pymongo.MongoClient', 'MongoClient', (['self.dbc'], {}), '(self.dbc)\n', (3883, 3893), False, 'from pymongo import MongoClient\n'), ((1568, 1600), 'pandas.to_datetime', 'pd.to_datetime', (["ob['obsTimeUtc']"], {}), "(ob['obsTimeUtc'])\n", (1582, 1600), True, 'import pandas as pd\n'), ((1669, 1703), 'pandas.to_datetime', 'pd.to_datetime', (["ob['obsTimeLocal']"], {}), "(ob['obsTimeLocal'])\n", (1683, 1703), True, 'import pandas as pd\n')]
|
# coding: utf-8
""" demo on forward 2D """
# Copyright (c) <NAME>. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
import pyeit.eit.protocol as protocol
import pyeit.mesh as mesh
from pyeit.eit.fem import Forward
from pyeit.mesh.shape import thorax
from pyeit.mesh.wrapper import PyEITAnomaly_Circle
""" 0. build mesh """
n_el = 16 # nb of electrodes
use_customize_shape = False
if use_customize_shape:
# Mesh shape is specified with fd parameter in the instantiation, e.g : fd=thorax
mesh_obj = mesh.create(n_el, h0=0.1, fd=thorax)
else:
mesh_obj = mesh.create(n_el, h0=0.1)
el_pos = mesh_obj.el_pos
# extract node, element, alpha
pts = mesh_obj.node
tri = mesh_obj.element
x, y = pts[:, 0], pts[:, 1]
mesh_obj.print_stats()
# change permittivity
anomaly = PyEITAnomaly_Circle(center=[0.4, 0.5], r=0.2, perm=100.0)
mesh_new = mesh.set_perm(mesh_obj, anomaly=anomaly, background=1.0)
perm = mesh_new.perm
""" 1. FEM forward simulations """
# setup EIT scan conditions
protocol_obj = protocol.create(n_el, dist_exc=7, step_meas=1, parser_meas="std")
# Define electrode current sink and current source
ex_line = protocol_obj.ex_mat[0].ravel()
# calculate simulated data using FEM
fwd = Forward(mesh_new)
f = fwd.solve(ex_line)
f = np.real(f)
""" 2. plot """
fig = plt.figure()
ax1 = fig.add_subplot(111)
# draw equi-potential lines
vf = np.linspace(min(f), max(f), 32)
# vf = np.sort(f[el_pos])
# Draw contour lines on an unstructured triangular grid.
ax1.tricontour(x, y, tri, f, vf, cmap=plt.cm.viridis)
# draw mesh structure
# Create a pseudocolor plot of an unstructured triangular grid
ax1.tripcolor(
x,
y,
tri,
np.real(perm),
edgecolors="k",
shading="flat",
alpha=0.5,
cmap=plt.cm.Greys,
)
# draw electrodes
ax1.plot(x[el_pos], y[el_pos], "ro")
for i, e in enumerate(el_pos):
ax1.text(x[e], y[e], str(i + 1), size=12)
ax1.set_title("equi-potential lines")
# clean up
ax1.set_aspect("equal")
ax1.set_ylim([-1.2, 1.2])
ax1.set_xlim([-1.2, 1.2])
fig.set_size_inches(6, 6)
# fig.savefig('demo_bp.png', dpi=96)
plt.show()
|
[
"pyeit.mesh.wrapper.PyEITAnomaly_Circle",
"matplotlib.pyplot.show",
"pyeit.eit.protocol.create",
"pyeit.mesh.set_perm",
"pyeit.eit.fem.Forward",
"matplotlib.pyplot.figure",
"numpy.real",
"pyeit.mesh.create"
] |
[((938, 995), 'pyeit.mesh.wrapper.PyEITAnomaly_Circle', 'PyEITAnomaly_Circle', ([], {'center': '[0.4, 0.5]', 'r': '(0.2)', 'perm': '(100.0)'}), '(center=[0.4, 0.5], r=0.2, perm=100.0)\n', (957, 995), False, 'from pyeit.mesh.wrapper import PyEITAnomaly_Circle\n'), ((1007, 1063), 'pyeit.mesh.set_perm', 'mesh.set_perm', (['mesh_obj'], {'anomaly': 'anomaly', 'background': '(1.0)'}), '(mesh_obj, anomaly=anomaly, background=1.0)\n', (1020, 1063), True, 'import pyeit.mesh as mesh\n'), ((1164, 1229), 'pyeit.eit.protocol.create', 'protocol.create', (['n_el'], {'dist_exc': '(7)', 'step_meas': '(1)', 'parser_meas': '"""std"""'}), "(n_el, dist_exc=7, step_meas=1, parser_meas='std')\n", (1179, 1229), True, 'import pyeit.eit.protocol as protocol\n'), ((1367, 1384), 'pyeit.eit.fem.Forward', 'Forward', (['mesh_new'], {}), '(mesh_new)\n', (1374, 1384), False, 'from pyeit.eit.fem import Forward\n'), ((1412, 1422), 'numpy.real', 'np.real', (['f'], {}), '(f)\n', (1419, 1422), True, 'import numpy as np\n'), ((1446, 1458), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1456, 1458), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2241), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2239, 2241), True, 'import matplotlib.pyplot as plt\n'), ((670, 706), 'pyeit.mesh.create', 'mesh.create', (['n_el'], {'h0': '(0.1)', 'fd': 'thorax'}), '(n_el, h0=0.1, fd=thorax)\n', (681, 706), True, 'import pyeit.mesh as mesh\n'), ((728, 753), 'pyeit.mesh.create', 'mesh.create', (['n_el'], {'h0': '(0.1)'}), '(n_el, h0=0.1)\n', (739, 753), True, 'import pyeit.mesh as mesh\n'), ((1816, 1829), 'numpy.real', 'np.real', (['perm'], {}), '(perm)\n', (1823, 1829), True, 'import numpy as np\n')]
|
from .. components import OutputPitchWidget
from .. import Defaults
import json
from kivy.properties import NumericProperty
from kivy.properties import ListProperty
from kivy.properties import ObjectProperty
from kivy.properties import BooleanProperty
from kivy.properties import StringProperty
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.popup import Popup
from kivy.uix.screenmanager import Screen
from kivy.core.audio import SoundLoader
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.logger import Logger
from kivy.app import App
import copy
import numpy as np
class TutorialKey(Button):
alpha = NumericProperty(0.0)
pitch_class = NumericProperty()
def __init__(self, inPC, x, y, text, *kwargs):
"""Initializes the key
"""
Logger.info("Init TutorialKey")
super(TutorialKey,self).__init__(x=x,y=y,text=text)
self.pitch_class = inPC
def set_alpha(self, value):
self.alpha = value
class TutorialScreen(Screen):
currentStepNumber = NumericProperty(0)
outputLabels = ListProperty()
keys = ListProperty()
winHeight = NumericProperty()
winWidth = NumericProperty()
outputs = ListProperty()
presses = ListProperty()
measured = BooleanProperty()
buttonPositions = ListProperty()
keyWidgets = ListProperty()
text = Label()
def __init__(self, **kwargs):
super(Screen, self).__init__()
Logger.info("Init Tutorial Screen")
self.text.pos_hint = {'center_x': 0.5, 'center_y': 0.7}
self.add_widget(self.text)
self.n = 5
self.currentStepNumber = 0
self.SOUND_PATH = 'src/assets/sounds/piano/'
self.SOUND_EXT = '.wav'
#self.pitches_from_num = Defaults.PitchesSharps().numbers
##CHANGED to pentatonic mode
self.pitches_from_num = Defaults.PitchesSharps().numbers5
self.pitchClasses = Defaults.PitchesSharps().pitchClasses['C_pentatonic']
self.winHeight = Window.size[1]
self.winWidth = Window.size[0]
self.currentTutorial = "tutorial1"
with open('src/screens/json/Tutorial1.json') as file:
self.data = json.load(file)
Logger.info('Tutorial Screen: Loaded tutorial json')
def on_enter(self):
Logger.info("Entered Tutorial Screen")
self.currentStepNumber = 0
self.keyslist = [None for i in range(self.n)]
for i in range(self.n):
self.keyslist[i] = SoundLoader.load(self.SOUND_PATH + str(self.pitches_from_num[str(i)]) + self.SOUND_EXT)
Logger.info('SoundLoader: Loaded ' + self.SOUND_PATH + str(self.pitches_from_num[str(i)]) + self.SOUND_EXT)
self.keys = self.keyslist
self.buttonPositions = self.setButtonPositions()
self.keyWidgets = [TutorialKey(i, int(self.buttonPositions[0][i]), int(self.buttonPositions[1][i]), str(self.pitches_from_num[str(i)])) for i in range(self.n)]
for i in range(self.n):
self.ids['main_window'].add_widget(self.keyWidgets[i])
Logger.info('TutorialApp: Tutorial App Initialized with ' + str(self.n) + ' keys')
self.start_tutorial()
def start_tutorial(self):
Logger.info('TutorialApp: Tutorial started')
self.currentStep = self.data[self.currentTutorial]["steps"][self.currentStepNumber]
self.text.text = self.currentStep["text"]
self.set_action_trigger(self.currentStep["trigger"])
for (i,j) in enumerate(self.keyWidgets):
j.set_alpha(self.currentStep["alpha"][i])
j.canvas.ask_update()
def set_action_trigger(self, trigger_key, *kwargs):
Logger.info('Setting new action trigger to ' + str(trigger_key))
self.keyWidgets[trigger_key].bind(on_press=self.next_step)
def unbindAction(self, trigger_key, *kwargs):
self.keyWidgets[trigger_key].funbind('on_press', self.next_step)
def end_tutorial(self, *kwargs):
App.screenManager.current = 'welcomeScreen'
def playSinglePitch(self):
if self.keys[self.outputLabels[1]].state == 'play':
self.keys[self.outputLabels[1]].stop()
self.keys[self.outputLabels[1]].play()
def addOutputWidget(self,output,key):
outputPitchWidget = OutputPitchWidget.OutputPitchWidget(self.pitchClasses[output],self.pitchClasses[key])
self.ids['output_window'].add_widget(outputPitchWidget)
outputPitchWidget.start()
def next_step(self, *kwargs):
#Advances tutorial to the next step
self.currentStep["completed"] = True
if self.currentStepNumber < len(self.data[self.currentTutorial]["steps"]) - 1:
Logger.info('Tutorial Screen: On step ' + str(self.currentStepNumber))
self.outputLabels = [self.currentStep["trigger"], self.currentStep["heard"]]
self.addOutputWidget(self.currentStep["trigger"], self.currentStep["heard"])
self.playSinglePitch()
self.unbindAction(self.currentStep["trigger"])
self.currentStepNumber += 1
self.currentStep = self.data[self.currentTutorial]["steps"][self.currentStepNumber]
self.text.text = self.currentStep["text"]
for (i,j) in enumerate(self.keyWidgets):
j.set_alpha(self.currentStep["alpha"][i])
j.canvas.ask_update()
Logger.info(str(j.alpha))
self.set_action_trigger(self.currentStep["trigger"])
Logger.info(self.currentStep["text"])
elif self.currentStepNumber == len(self.data[self.currentTutorial]["steps"]) - 1:
more_button = Button(on_press=self.walkthrough)
self.add_widget(more_button)
else:
Logger.info('Tutorial complete!')
def walkthrough(self):
pass
def addOutputWidget(self,output,key):
outputPitchWidget = OutputPitchWidget.OutputPitchWidget(self.pitchClasses[output],self.pitchClasses[key])
self.ids['output_window'].add_widget(outputPitchWidget)
outputPitchWidget.start()
def setButtonPositions(self):
Logger.info("Set Button Positions, Tutorial Screen")
boundingFunction = lambda x: .5 - 1 / (self.winHeight * 4 * (x - 0.501) ** 2)
xoffset = .1 * self.winWidth
xstep = self.winWidth * .8 / self.n
x_rel_pos = np.array([.1 * self.winWidth + i * xstep for i in range(self.n)]) / self.winWidth
y_rel_pos = boundingFunction(x_rel_pos)
xpos = x_rel_pos * self.winWidth
ypos = y_rel_pos * self.winHeight
return [xpos, ypos]
|
[
"kivy.properties.ListProperty",
"json.load",
"kivy.properties.BooleanProperty",
"kivy.uix.button.Button",
"kivy.uix.label.Label",
"kivy.logger.Logger.info",
"kivy.properties.NumericProperty"
] |
[((735, 755), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0.0)'], {}), '(0.0)\n', (750, 755), False, 'from kivy.properties import NumericProperty\n'), ((774, 791), 'kivy.properties.NumericProperty', 'NumericProperty', ([], {}), '()\n', (789, 791), False, 'from kivy.properties import NumericProperty\n'), ((1139, 1157), 'kivy.properties.NumericProperty', 'NumericProperty', (['(0)'], {}), '(0)\n', (1154, 1157), False, 'from kivy.properties import NumericProperty\n'), ((1177, 1191), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1189, 1191), False, 'from kivy.properties import ListProperty\n'), ((1203, 1217), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1215, 1217), False, 'from kivy.properties import ListProperty\n'), ((1234, 1251), 'kivy.properties.NumericProperty', 'NumericProperty', ([], {}), '()\n', (1249, 1251), False, 'from kivy.properties import NumericProperty\n'), ((1267, 1284), 'kivy.properties.NumericProperty', 'NumericProperty', ([], {}), '()\n', (1282, 1284), False, 'from kivy.properties import NumericProperty\n'), ((1299, 1313), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1311, 1313), False, 'from kivy.properties import ListProperty\n'), ((1328, 1342), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1340, 1342), False, 'from kivy.properties import ListProperty\n'), ((1358, 1375), 'kivy.properties.BooleanProperty', 'BooleanProperty', ([], {}), '()\n', (1373, 1375), False, 'from kivy.properties import BooleanProperty\n'), ((1398, 1412), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1410, 1412), False, 'from kivy.properties import ListProperty\n'), ((1430, 1444), 'kivy.properties.ListProperty', 'ListProperty', ([], {}), '()\n', (1442, 1444), False, 'from kivy.properties import ListProperty\n'), ((1456, 1463), 'kivy.uix.label.Label', 'Label', ([], {}), '()\n', (1461, 1463), False, 'from kivy.uix.label import Label\n'), ((899, 930), 'kivy.logger.Logger.info', 'Logger.info', (['"""Init TutorialKey"""'], {}), "('Init TutorialKey')\n", (910, 930), False, 'from kivy.logger import Logger\n'), ((1548, 1583), 'kivy.logger.Logger.info', 'Logger.info', (['"""Init Tutorial Screen"""'], {}), "('Init Tutorial Screen')\n", (1559, 1583), False, 'from kivy.logger import Logger\n'), ((2305, 2357), 'kivy.logger.Logger.info', 'Logger.info', (['"""Tutorial Screen: Loaded tutorial json"""'], {}), "('Tutorial Screen: Loaded tutorial json')\n", (2316, 2357), False, 'from kivy.logger import Logger\n'), ((2391, 2429), 'kivy.logger.Logger.info', 'Logger.info', (['"""Entered Tutorial Screen"""'], {}), "('Entered Tutorial Screen')\n", (2402, 2429), False, 'from kivy.logger import Logger\n'), ((3308, 3352), 'kivy.logger.Logger.info', 'Logger.info', (['"""TutorialApp: Tutorial started"""'], {}), "('TutorialApp: Tutorial started')\n", (3319, 3352), False, 'from kivy.logger import Logger\n'), ((6221, 6273), 'kivy.logger.Logger.info', 'Logger.info', (['"""Set Button Positions, Tutorial Screen"""'], {}), "('Set Button Positions, Tutorial Screen')\n", (6232, 6273), False, 'from kivy.logger import Logger\n'), ((2281, 2296), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2290, 2296), False, 'import json\n'), ((5573, 5610), 'kivy.logger.Logger.info', 'Logger.info', (["self.currentStep['text']"], {}), "(self.currentStep['text'])\n", (5584, 5610), False, 'from kivy.logger import Logger\n'), ((5727, 5760), 'kivy.uix.button.Button', 'Button', ([], {'on_press': 'self.walkthrough'}), '(on_press=self.walkthrough)\n', (5733, 5760), False, 'from kivy.uix.button import Button\n'), ((5828, 5861), 'kivy.logger.Logger.info', 'Logger.info', (['"""Tutorial complete!"""'], {}), "('Tutorial complete!')\n", (5839, 5861), False, 'from kivy.logger import Logger\n')]
|
# coding: utf-8
# Raytheon BBN Technologies 2016
# Contributiors: <NAME>
#
# This file contains the windows, view, and scene descriptions
from qtpy.QtGui import *
from qtpy.QtCore import *
from qtpy.QtSvg import *
from qtpy.QtWidgets import *
import glob
import time
import os
import os.path
import numpy as np
from .node import *
from .wire import *
from .param import *
from .graph import *
from .util import *
from .inspect import *
from .load import *
def strip_vendor_names(instr_name):
vns = ["Agilent", "Alazar", "Keysight", "Holzworth", "Yoko", "Yokogawa"]
for vn in vns:
instr_name = instr_name.replace(vn, "")
return instr_name
def correct_resource_name(resource_name):
substs = {"USB::": "USB0::", }
for k, v in substs.items():
resource_name = resource_name.replace(k, v)
return resource_name
class NodeScene(QGraphicsScene):
"""docstring for NodeScene"""
def __init__(self, window=None):
super(NodeScene, self).__init__()
self.window = window
self.backdrop = QGraphicsRectItem()
self.backdrop.setRect(-10000,-10000,20000,20000)
self.backdrop.setZValue(-100)
self.qr = QRectF(-10000,-10000,20000,20000)
self.setSceneRect(self.qr)
self.setBackgroundBrush(QBrush(QColor(60,60,60)))
self.addItem(self.backdrop)
self.view = None
self.menu = QMenu()
self.sub_menus = {}
self.generate_menus()
self.menu.addSeparator()
clear_wires = QAction('Clear Wires', self)
clear_wires.triggered.connect(self.clear_wires)
self.menu.addAction(clear_wires)
self.last_click = self.backdrop.pos()
self.qt_settings = QSettings("BBN", "Quince")
self.undo_stack = QUndoStack(self)
self.update_screen()
def update_screen(self):
if hasattr(self.window, 'view'):
dpr = self.window.devicePixelRatio()
nodes = [i for i in self.items() if isinstance(i, Node)]
_ = [n.update_screen(dpr) for n in nodes]
def connectors_nearby(self, position, exclude=[]):
connectors = [i for i in self.items() if isinstance(i, Connector)
and i.connector_type == 'input'
and i not in exclude]
rs = {}
for i, conn in enumerate(connectors):
p = (position - conn.scenePos())
r = np.sqrt(p.x()*p.x() + p.y()*p.y())
if r < 30.0:
rs[conn] = r
scale = 1.0+3.0/(r+0.2)
if scale > 1.5:
scale = 1.5
conn.setRect(-5.0*scale, -5.0*scale, 10*scale, 10*scale)
else:
conn.setRect(-5.0, -5.0, 10, 10)
if len(rs) > 0:
return sorted(rs, key=rs.get)[0]
else:
return None
def mouseMoveEvent(self, event):
self.crowded_connectors_nearby(event.scenePos())
return super(NodeScene, self).mouseMoveEvent(event)
def crowded_connectors_nearby(self, position):
conns = [i for i in self.items() if isinstance(i, Connector)]
for conn in conns:
if conn.connector_type == 'input' and len(conn.wires_in) > 1:
p = (position - conn.scenePos())
r = np.sqrt(p.x()*p.x() + p.y()*p.y())
if r < 30.0:
conn.explode_wires()
else:
conn.implode_wires()
def clear_wires(self, only_clear_orphaned=False):
wires = [i for i in self.items() if isinstance(i, Wire)]
for wire in wires:
if only_clear_orphaned:
if wire.end_obj is None:
self.removeItem(wire)
elif wire.start_obj is None:
self.removeItem(wire)
else:
self.removeItem(wire)
def open_add_menu(self, location):
self.menu.exec_(location)
def contextMenuEvent(self, event):
self.last_click = event.scenePos()
self.open_add_menu(event.screenPos())
def generate_menus(self):
# Parse Auspex directly
parse_quince_modules(self)
def load_yaml(self):
load_from_yaml(self)
def reload_yaml(self):
# Store node settings before reloading
self.save_node_positions_to_settings()
# Don't retain any undo information, since it is outdated
self.undo_stack.clear()
# Reconstruct the scene
nodes = [i for i in self.items() if isinstance(i, Node)]
wires = [i for i in self.items() if isinstance(i, Wire)]
for o in nodes+wires:
self.removeItem(o)
self.load_yaml()
def save_node_positions_to_settings(self):
for n in [i for i in self.items() if isinstance(i, Node)]:
self.qt_settings.setValue("node_positions/" + n.label.toPlainText() + "_pos_x", n.pos().x())
self.qt_settings.setValue("node_positions/" + n.label.toPlainText() + "_pos_y", n.pos().y())
self.qt_settings.sync()
def save_for_yaml(self):
self.save_node_positions_to_settings()
nodes = [i for i in self.items() if isinstance(i, Node)]
node_names = [n.label.toPlainText() for n in nodes]
if not hasattr(self, 'settings'):
self.window.set_status("Not launched with yaml config. Cannot save to yaml.")
return
# Start from the original config file in order that we can save comments
# and other human-friendly conveniences.
for node, node_name in zip(nodes, node_names):
if node.is_instrument:
# Create a new entry if necessary
if node_name not in self.settings["instruments"].keys():
self.settings["instruments"][node_name] = {}
for k, v in node.dict_repr().items():
self.settings["instruments"][node_name][k] = v
else:
# Create a new entry if necessary
if node_name not in self.settings["filters"].keys():
self.settings["filters"][node_name] = {}
for k, v in node.dict_repr().items():
self.settings["filters"][node_name][k] = v
# Prune stale (deleted) filters from the config, but
# leave instruments other than digitizers alone
for section in ("filters", "instruments"):
for name in list(self.settings[section].keys()):
if name not in node_names:
if section=="instruments" and "rx_channels" not in self.settings[section][name].keys():
continue
self.settings[section].pop(name)
self.window.ignore_file_updates = True
self.window.ignore_timer.start()
yaml_dump(self.settings, self.window.meas_file)
def create_node_by_name(self, name):
create_node_func_name = "create_"+("".join(name.split()))
if hasattr(self, create_node_func_name):
new_node = getattr(self, create_node_func_name)()
return new_node
else:
self.window.set_status("Could not create a node of the requested type.")
return None
def removeItem(self, item):
super(NodeScene, self).removeItem(item)
def addItem(self, item):
super(NodeScene, self).addItem(item)
class NodeView(QGraphicsView):
"""docstring for NodeView"""
def __init__(self, scene):
super(NodeView, self).__init__(scene)
self.scene = scene
self.centerOn(self.scene.qr.center())
self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)
self.setRenderHint(QPainter.Antialiasing)
self.current_scale = 1.0
def wheelEvent(self, event):
change = 0.001*event.angleDelta().y()/2.0
self.scale(1+change, 1+change)
self.current_scale *= 1+change
def keyPressEvent(self, event):
if not self.scene.focusItem() and event.key() in [Qt.Key_Delete, Qt.Key_Backspace]:
selected_nodes = [i for i in self.scene.items() if isinstance(i, Node) and i.isSelected()]
self.scene.undo_stack.push(CommandDeleteNodes(selected_nodes, self.scene))
else:
return super(NodeView, self).keyPressEvent(event)
def mousePressEvent(self, event):
if (event.button() == Qt.MidButton) or (event.button() == Qt.LeftButton and event.modifiers() == Qt.ShiftModifier):
self.setDragMode(QGraphicsView.ScrollHandDrag)
fake = QMouseEvent(event.type(), event.pos(), Qt.LeftButton, Qt.LeftButton, event.modifiers())
return super(NodeView, self).mousePressEvent(fake)
elif event.button() == Qt.LeftButton:
self.setDragMode(QGraphicsView.RubberBandDrag)
return super(NodeView, self).mousePressEvent(event)
def mouseReleaseEvent(self, event):
if (event.button() == Qt.MidButton) or (event.button() == Qt.LeftButton and event.modifiers() == Qt.ShiftModifier):
self.setDragMode(QGraphicsView.NoDrag)
fake = QMouseEvent(event.type(), event.pos(), Qt.LeftButton, Qt.LeftButton, event.modifiers())
return super(NodeView, self).mouseReleaseEvent(fake)
elif event.button() == Qt.LeftButton:
a = super(NodeView, self).mouseReleaseEvent(event)
self.setDragMode(QGraphicsView.NoDrag)
return a
return super(NodeView, self).mouseReleaseEvent(event)
class NodeWindow(QMainWindow):
"""docstring for NodeWindow"""
def __init__(self, parent=None):
super(NodeWindow, self).__init__(parent=parent)
self.setWindowTitle("Nodes")
self.setGeometry(50,50,1300,600)
# Setup graphics
self.scene = NodeScene(window=self)
self.view = NodeView(self.scene)
# Setup menu
self.status_bar = self.statusBar()
exitAction = QAction('&Exit', self)
exitAction.setShortcut('Ctrl+Q')
exitAction.setStatusTip('Exit application')
exitAction.triggered.connect(QApplication.instance().quit)
saveAction = QAction('&Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.setStatusTip('Save')
saveAction.triggered.connect(self.save)
selectAllAction = QAction('&Select All', self)
selectAllAction.setShortcut('Ctrl+A')
selectAllAction.setStatusTip('Select All')
selectAllAction.triggered.connect(self.select_all)
selectAllConnectedAction = QAction('&Select All Connected', self)
selectAllConnectedAction.setShortcut('Shift+Ctrl+A')
selectAllConnectedAction.setStatusTip('Select All Connected')
selectAllConnectedAction.triggered.connect(self.select_all_connected)
constructExperimentAction = QAction('&Construct Experiment', self)
constructExperimentAction.setShortcut('Shift+Ctrl+E')
constructExperimentAction.setStatusTip('Construct Experiment')
constructExperimentAction.triggered.connect(self.construct_experiment)
collapseAllAction = QAction('&Collapse All', self)
collapseAllAction.setShortcut('Ctrl+K')
collapseAllAction.setStatusTip('Collapse All')
collapseAllAction.triggered.connect(self.collapse_all)
expandAllAction = QAction('&Expand All', self)
expandAllAction.setShortcut('Shift+Ctrl+K')
expandAllAction.setStatusTip('Expand All')
expandAllAction.triggered.connect(self.expand_all)
toggleEnabledAction = QAction('&Toggle Descendants', self)
toggleEnabledAction.setShortcut('Ctrl+E')
toggleEnabledAction.setStatusTip('Toggle the Enabled/Disabled status of all descendant nodes.')
toggleEnabledAction.triggered.connect(self.toggle_enable_descendants)
autoLayoutAction = QAction('&Auto Layout', self)
autoLayoutAction.setStatusTip('Auto-arrange the nodes.')
autoLayoutAction.triggered.connect(self.auto_layout)
duplicateAction = QAction('&Duplicate', self)
duplicateAction.setShortcut('Ctrl+D')
duplicateAction.setStatusTip('Duplicate')
duplicateAction.triggered.connect(self.duplicate)
undoAction = QAction('&Undo', self)
undoAction.setShortcut('Ctrl+Z')
undoAction.setStatusTip('Undo')
undoAction.triggered.connect(self.undo)
redoAction = QAction('&Redo', self)
redoAction.setShortcut('Shift+Ctrl+Z')
redoAction.setStatusTip('Redo')
redoAction.triggered.connect(self.redo)
debugAction = QAction('&Debug', self)
debugAction.setShortcut('Shift+Ctrl+Alt+D')
debugAction.setStatusTip('Debug!')
debugAction.triggered.connect(self.debug)
fileMenu = self.menuBar().addMenu('&File')
editMenu = self.menuBar().addMenu('&Edit')
helpMenu = self.menuBar().addMenu('&Help')
# fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
# fileMenu.addAction(exportAction)
fileMenu.addAction(exitAction)
editMenu.addAction(selectAllAction)
editMenu.addAction(selectAllConnectedAction)
editMenu.addAction(collapseAllAction)
editMenu.addAction(expandAllAction)
editMenu.addAction(constructExperimentAction)
editMenu.addAction(toggleEnabledAction)
editMenu.addAction(duplicateAction)
editMenu.addSeparator()
editMenu.addAction(autoLayoutAction)
editMenu.addSeparator()
editMenu.addAction(undoAction)
editMenu.addAction(redoAction)
helpMenu.addAction(debugAction)
# Setup layout
self.hbox = QHBoxLayout()
self.hbox.addWidget(self.view)
self.hbox.setContentsMargins(0,0,0,0)
self.main_widget = QWidget()
self.main_widget.setLayout(self.hbox)
self.setCentralWidget(self.main_widget)
# Establish automatic QSettings update timer that
# writes the node positions every 3s
self.settings_timer = QTimer(self)
self.settings_timer.setInterval(3000)
self.settings_timer.timeout.connect(self.scene.save_node_positions_to_settings)
self.settings_timer.start()
# Create the pipeline start node if possible
if hasattr(self.scene, 'create_PipelineStart'):
ps = self.scene.create_PipelineStart()
ps.setPos(-300,0)
svg_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "assets/quince.svg")
svgrend = QSvgRenderer(svg_path)
self.svgitem = QGraphicsSvgItem()
self.svgitem.setSharedRenderer(svgrend)
self.scene.addItem(self.svgitem)
self.svgitem.setScale(0.5)
self.svgitem.setPos(self.svgitem.pos().x()-self.svgitem.boundingRect().width()/4,
self.svgitem.pos().y()-self.svgitem.boundingRect().height()/4)
self.svg_animation = QPropertyAnimation(self.svgitem, bytes("opacity".encode("ascii")))
self.svg_animation.setDuration(2000)
self.svg_animation.setStartValue(1.0)
self.svg_animation.setEndValue(0.0)
self.svg_animation.start()
# Try to check on screen changes...
def moveEvent(self, event):
self.scene.update_screen()
return super(NodeWindow, self).moveEvent(event)
def set_status(self, text, time=2000):
self.status_bar.showMessage(text, time)
def debug(self):
import ipdb; ipdb.set_trace()
def load_yaml(self, meas_file):
self.set_status("Loading YAML configuration files...")
self.meas_file = meas_file
# Perform a preliminary loading to find all of the connected files...
_, self.filenames, self.dirname = yaml_load(self.meas_file)
# Delay timer to avoid multiple firings
self.update_timer = QTimer(self)
self.update_timer.setSingleShot(True)
self.update_timer.setInterval(100)
self.update_timer.timeout.connect(self.update_yaml)
# Delay timer to avoid multiple firings
self.ignore_file_updates = False
self.ignore_timer = QTimer(self)
self.ignore_timer.setSingleShot(True)
self.ignore_timer.setInterval(1500)
self.ignore_timer.timeout.connect(self.stop_ignoring_updates)
# Establish File Watchers for these config files:
self.watcher = QFileSystemWatcher()
# Note many editors make copies and delete files. This confuses the
# file watchers in POSIX enviroments especially, so we just watch the
# config directory for changes in all non-Windows cases.
if (os.name == 'nt'):
for f in self.filenames:
self.watcher.addPath(f)
self.watcher.fileChanged.connect(self.yaml_needs_update)
else:
self.watcher.addPath(self.dirname)
self.watcher.directoryChanged.connect(self.yaml_needs_update)
self.update_timer.start()
def stop_ignoring_updates(self):
self.ignore_file_updates = False
def yaml_needs_update(self, path):
if not self.update_timer.isActive() and not self.ignore_file_updates:
self.update_timer.start()
def update_yaml(self):
self.set_status("Files changed on disk, reloading.")
self.scene.reload_yaml()
def save(self):
self.scene.save_for_yaml()
def undo(self):
self.scene.undo_stack.undo()
def redo(self):
self.scene.undo_stack.redo()
def construct_experiment(self):
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
wires = [i for i in self.scene.items() if isinstance(i, Wire)]
create_experiment_graph(nodes, wires)
def select_all(self):
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
for n in nodes:
n.setSelected(True)
def select_all_connected(self):
selected_nodes = [i.label.toPlainText() for i in self.scene.items() if isinstance(i, Node) and i.isSelected()]
wires = [i for i in self.scene.items() if isinstance(i, Wire)]
nodes_by_label = {i.label.toPlainText(): i for i in self.scene.items() if isinstance(i, Node)}
graph = generate_graph(wires)
items = []
for sn in selected_nodes:
sub_graph_items = items_on_subgraph(graph, sn)
items.extend(sub_graph_items)
for i in items:
nodes_by_label[i].setSelected(True)
def toggle_enable_descendants(self):
selected_nodes = [i.label.toPlainText() for i in self.scene.items() if isinstance(i, Node) and i.isSelected()]
if len(selected_nodes) == 0:
self.set_status("No nodes selected.")
return
wires = [i for i in self.scene.items() if isinstance(i, Wire)]
nodes_by_label = {i.label.toPlainText(): i for i in self.scene.items() if isinstance(i, Node)}
graph = generate_graph(wires, dag=True)
items = []
items.extend(selected_nodes)
for sn in selected_nodes:
if sn in graph.nodes():
descs = descendants(graph, sn)
items.extend(descs)
new_status = not nodes_by_label[selected_nodes[0]].enabled
for i in items:
nodes_by_label[i].enabled = new_status
def auto_layout(self):
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
wires = [i for i in self.scene.items() if isinstance(i, Wire)]
nodes_by_label = {i.label.toPlainText(): i for i in self.scene.items() if isinstance(i, Node)}
graph = generate_graph(wires, dag=True)
input_nodes = graph_input_nodes(graph)
for input_node in input_nodes:
pos = hierarchy_pos(graph, input_node)
for l, p in pos.items():
nodes_by_label[l].setPos(-p[1], p[0])
def collapse_all(self):
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
for n in nodes:
n.change_collapsed_state(True)
def expand_all(self):
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
for n in nodes:
n.change_collapsed_state(False)
def duplicate(self):
selected_nodes = [i for i in self.scene.items() if isinstance(i, Node) and i.isSelected()]
self.scene.undo_stack.push(CommandDuplicateNodes(selected_nodes, self.scene))
def cleanup(self):
# Have to manually close proxy widgets
nodes = [i for i in self.scene.items() if isinstance(i, Node)]
for n in nodes:
for k, v in n.parameters.items():
pass
|
[
"os.path.dirname",
"ipdb.set_trace"
] |
[((15543, 15559), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (15557, 15559), False, 'import ipdb\n'), ((14534, 14559), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (14549, 14559), False, 'import os\n')]
|
import pytest
from dlms_cosem import enumerations
from dlms_cosem.protocol.acse import ReleaseRequest
class TestDecodeRLRQ:
def test_simple(self):
data = bytes.fromhex("6203800100") # Normal no user-information
rlrq = ReleaseRequest.from_bytes(data)
assert rlrq.reason == enumerations.ReleaseRequestReason.NORMAL
assert rlrq.user_information is None
assert data == rlrq.to_bytes()
def test_with_ciphered_initiate_request(self):
data = bytes.fromhex(
"6239800100be34043221303001234567801302FF8A7874133D414CED25B42534D28DB0047720606B175BD52211BE6841DB204D39EE6FDB8E356855"
)
# No support for ciphnered adpus yet
rlrq = ReleaseRequest.from_bytes(data)
assert rlrq.reason == enumerations.ReleaseRequestReason.NORMAL
|
[
"dlms_cosem.protocol.acse.ReleaseRequest.from_bytes"
] |
[((242, 273), 'dlms_cosem.protocol.acse.ReleaseRequest.from_bytes', 'ReleaseRequest.from_bytes', (['data'], {}), '(data)\n', (267, 273), False, 'from dlms_cosem.protocol.acse import ReleaseRequest\n'), ((715, 746), 'dlms_cosem.protocol.acse.ReleaseRequest.from_bytes', 'ReleaseRequest.from_bytes', (['data'], {}), '(data)\n', (740, 746), False, 'from dlms_cosem.protocol.acse import ReleaseRequest\n')]
|
import os
import time
from dagster_graphql.client.query import LAUNCH_PIPELINE_EXECUTION_MUTATION
from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector
from dagster import execute_pipeline
from dagster.utils import safe_tempfile_path
from .graphql_context_test_suite import GraphQLContextVariant, make_graphql_context_test_suite
from .setup import main_repo_location_name
RUN_CANCELLATION_QUERY = """
mutation($runId: String!) {
terminatePipelineExecution(runId: $runId){
__typename
... on TerminatePipelineExecutionSuccess{
run {
runId
}
}
... on TerminatePipelineExecutionFailure {
run {
runId
}
message
}
... on PipelineRunNotFoundError {
runId
}
}
}
"""
class TestRunVariantTermination(
make_graphql_context_test_suite(
context_variants=[GraphQLContextVariant.sqlite_with_default_run_launcher_in_process_env()]
)
):
def test_basic_termination(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "infinite_loop_pipeline")
with safe_tempfile_path() as path:
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"solids": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data["launchPipelineExecution"]["__typename"] == "LaunchPipelineRunSuccess"
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
assert run_id
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert (
result.data["terminatePipelineExecution"]["__typename"]
== "TerminatePipelineExecutionSuccess"
)
def test_run_not_found(self, graphql_context):
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": "nope"}
)
assert result.data["terminatePipelineExecution"]["__typename"] == "PipelineRunNotFoundError"
def test_terminate_failed(self, graphql_context):
selector = infer_pipeline_selector(graphql_context, "infinite_loop_pipeline")
with safe_tempfile_path() as path:
old_terminate = graphql_context.instance.run_launcher.terminate
graphql_context.instance.run_launcher.terminate = lambda _run_id: False
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PIPELINE_EXECUTION_MUTATION,
variables={
"executionParams": {
"selector": selector,
"mode": "default",
"runConfigData": {"solids": {"loop": {"config": {"file": path}}}},
}
},
)
assert not result.errors
assert result.data
# just test existence
assert (
result.data["launchPipelineExecution"]["__typename"] == "LaunchPipelineRunSuccess"
)
run_id = result.data["launchPipelineExecution"]["run"]["runId"]
# ensure the execution has happened
while not os.path.exists(path):
time.sleep(0.1)
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert (
result.data["terminatePipelineExecution"]["__typename"]
== "TerminatePipelineExecutionFailure"
)
assert result.data["terminatePipelineExecution"]["message"].startswith(
"Unable to terminate run"
)
graphql_context.instance.run_launcher.terminate = old_terminate
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": run_id}
)
assert (
result.data["terminatePipelineExecution"]["__typename"]
== "TerminatePipelineExecutionSuccess"
)
def test_run_finished(self, graphql_context):
instance = graphql_context.instance
pipeline = graphql_context.get_repository_location(
main_repo_location_name()
).get_reconstructable_pipeline("noop_pipeline")
pipeline_result = execute_pipeline(pipeline, instance=instance)
assert pipeline_result.success
assert pipeline_result.run_id
time.sleep(0.05) # guarantee execution finish
result = execute_dagster_graphql(
graphql_context, RUN_CANCELLATION_QUERY, variables={"runId": pipeline_result.run_id}
)
assert (
result.data["terminatePipelineExecution"]["__typename"]
== "TerminatePipelineExecutionFailure"
)
assert (
"is not in a started state. Current status is SUCCESS"
in result.data["terminatePipelineExecution"]["message"]
)
|
[
"dagster_graphql.test.utils.infer_pipeline_selector",
"os.path.exists",
"dagster.execute_pipeline",
"time.sleep",
"dagster_graphql.test.utils.execute_dagster_graphql",
"dagster.utils.safe_tempfile_path"
] |
[((1034, 1100), 'dagster_graphql.test.utils.infer_pipeline_selector', 'infer_pipeline_selector', (['graphql_context', '"""infinite_loop_pipeline"""'], {}), "(graphql_context, 'infinite_loop_pipeline')\n", (1057, 1100), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((2423, 2521), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'RUN_CANCELLATION_QUERY'], {'variables': "{'runId': 'nope'}"}), "(graphql_context, RUN_CANCELLATION_QUERY, variables=\n {'runId': 'nope'})\n", (2446, 2521), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((2714, 2780), 'dagster_graphql.test.utils.infer_pipeline_selector', 'infer_pipeline_selector', (['graphql_context', '"""infinite_loop_pipeline"""'], {}), "(graphql_context, 'infinite_loop_pipeline')\n", (2737, 2780), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((4967, 5012), 'dagster.execute_pipeline', 'execute_pipeline', (['pipeline'], {'instance': 'instance'}), '(pipeline, instance=instance)\n', (4983, 5012), False, 'from dagster import execute_pipeline\n'), ((5099, 5115), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (5109, 5115), False, 'import time\n'), ((5164, 5278), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'RUN_CANCELLATION_QUERY'], {'variables': "{'runId': pipeline_result.run_id}"}), "(graphql_context, RUN_CANCELLATION_QUERY, variables=\n {'runId': pipeline_result.run_id})\n", (5187, 5278), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((1114, 1134), 'dagster.utils.safe_tempfile_path', 'safe_tempfile_path', ([], {}), '()\n', (1132, 1134), False, 'from dagster.utils import safe_tempfile_path\n'), ((1165, 1390), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'LAUNCH_PIPELINE_EXECUTION_MUTATION'], {'variables': "{'executionParams': {'selector': selector, 'mode': 'default',\n 'runConfigData': {'solids': {'loop': {'config': {'file': path}}}}}}"}), "(graphql_context, LAUNCH_PIPELINE_EXECUTION_MUTATION,\n variables={'executionParams': {'selector': selector, 'mode': 'default',\n 'runConfigData': {'solids': {'loop': {'config': {'file': path}}}}}})\n", (1188, 1390), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((2067, 2165), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'RUN_CANCELLATION_QUERY'], {'variables': "{'runId': run_id}"}), "(graphql_context, RUN_CANCELLATION_QUERY, variables=\n {'runId': run_id})\n", (2090, 2165), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((2794, 2814), 'dagster.utils.safe_tempfile_path', 'safe_tempfile_path', ([], {}), '()\n', (2812, 2814), False, 'from dagster.utils import safe_tempfile_path\n'), ((3005, 3230), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'LAUNCH_PIPELINE_EXECUTION_MUTATION'], {'variables': "{'executionParams': {'selector': selector, 'mode': 'default',\n 'runConfigData': {'solids': {'loop': {'config': {'file': path}}}}}}"}), "(graphql_context, LAUNCH_PIPELINE_EXECUTION_MUTATION,\n variables={'executionParams': {'selector': selector, 'mode': 'default',\n 'runConfigData': {'solids': {'loop': {'config': {'file': path}}}}}})\n", (3028, 3230), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((3879, 3977), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'RUN_CANCELLATION_QUERY'], {'variables': "{'runId': run_id}"}), "(graphql_context, RUN_CANCELLATION_QUERY, variables=\n {'runId': run_id})\n", (3902, 3977), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((4404, 4502), 'dagster_graphql.test.utils.execute_dagster_graphql', 'execute_dagster_graphql', (['graphql_context', 'RUN_CANCELLATION_QUERY'], {'variables': "{'runId': run_id}"}), "(graphql_context, RUN_CANCELLATION_QUERY, variables=\n {'runId': run_id})\n", (4427, 4502), False, 'from dagster_graphql.test.utils import execute_dagster_graphql, infer_pipeline_selector\n'), ((1991, 2011), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2005, 2011), False, 'import os\n'), ((2029, 2044), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2039, 2044), False, 'import time\n'), ((3803, 3823), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3817, 3823), False, 'import os\n'), ((3841, 3856), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (3851, 3856), False, 'import time\n')]
|
import torch
from .ClauseEnhancer import ClauseEnhancer
class KnowledgeEnhancer(torch.nn.Module):
def __init__(self, predicates: [str], clauses: [str], initial_clause_weight=0.5, save_training_data=False, device=0):
"""Initialize the knowledge base.
:param predicates: a list of predicates names
:param clauses: a list of constraints. Each constraint is a string on the form:
clause_weight:clause
The clause_weight should be either a real number (in such a case this value is fixed) or an underscore
(in this case the weight will be a tensorflow variable and learned during training).
The clause must be represented as a list of literals separated by commas (that represent disjunctions).
Negation must specified by adding the letter 'n' before the predicate name.
An example:
_:nDog,Animal
:param initial_clause_weight: the initial sign to the clause weight. Used if the clause weight is learned.
"""
super().__init__()
self.clause_enhancers = []
self.save_training_data = save_training_data
for index, clause in enumerate(clauses):
clause_weight = initial_clause_weight[index] if isinstance(initial_clause_weight, list) else initial_clause_weight
enhancer = ClauseEnhancer(
predicates, clause[:-1], float(clause_weight), device=device)
self.clause_enhancers.append(enhancer)
self.add_module(f'clause-{index}', enhancer)
def forward(self, ground_atoms: torch.Tensor) -> (torch.Tensor, [torch.Tensor, torch.Tensor]):
"""Improve the satisfaction level of a set of clauses.
:param ground_atoms: the tensor containing the pre-activation values of the ground atoms
:return: final delta values"""
# scatter_deltas_list will be the list of deltas for each clause
# e.g. scatter_deltas_list[0] are the deltas relative to the first clause.
scatter_deltas_list: [torch.Tensor] = []
light_deltas_list = []
weights = []
# TODO: parllelize over clauses
for enhancer in self.clause_enhancers:
scattered_delta, delta = enhancer(ground_atoms)
scatter_deltas_list.append(scattered_delta)
if self.save_training_data:
light_deltas_list.append(delta)
weights.append(enhancer.clause_weight.item())
deltas_data = [light_deltas_list, weights]
# The sum can be refactored into the for loop above.
return torch.stack(scatter_deltas_list).sum(dim=0), deltas_data # ORIGINAL
# return torch.stack(scatter_deltas_list).sum(dim=0), scatter_deltas_list
|
[
"torch.stack"
] |
[((2565, 2597), 'torch.stack', 'torch.stack', (['scatter_deltas_list'], {}), '(scatter_deltas_list)\n', (2576, 2597), False, 'import torch\n')]
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from elasticsearch import Elasticsearch
from ..test_cases import DummyTransportTestCase
class TestClient(DummyTransportTestCase):
def test_request_timeout_is_passed_through_unescaped(self):
self.client.ping(request_timeout=0.1)
calls = self.assert_url_called("HEAD", "/")
assert [({"request_timeout": 0.1}, {}, None)] == calls
def test_params_is_copied_when(self):
rt = object()
params = dict(request_timeout=rt)
self.client.ping(params=params)
self.client.ping(params=params)
calls = self.assert_url_called("HEAD", "/", 2)
assert [
({"request_timeout": rt}, {}, None),
({"request_timeout": rt}, {}, None),
] == calls
assert not (calls[0][0] is calls[1][0])
def test_headers_is_copied_when(self):
hv = "value"
headers = dict(Authentication=hv)
self.client.ping(headers=headers)
self.client.ping(headers=headers)
calls = self.assert_url_called("HEAD", "/", 2)
assert [
({}, {"authentication": hv}, None),
({}, {"authentication": hv}, None),
] == calls
assert not (calls[0][0] is calls[1][0])
def test_from_in_search(self):
self.client.search(index="i", from_=10)
calls = self.assert_url_called("POST", "/i/_search")
assert [({"from": "10"}, {}, None)] == calls
def test_repr_contains_hosts(self):
assert "<Elasticsearch([{}])>" == repr(self.client)
def test_repr_subclass(self):
class OtherElasticsearch(Elasticsearch):
pass
assert "<OtherElasticsearch([{}])>" == repr(OtherElasticsearch())
def test_repr_contains_hosts_passed_in(self):
assert "es.org" in repr(Elasticsearch(["es.org:123"]))
def test_repr_truncates_host_to_5(self):
hosts = [{"host": "es" + str(i)} for i in range(10)]
es = Elasticsearch(hosts)
assert "es5" not in repr(es)
assert "..." in repr(es)
def test_index_uses_post_if_id_is_empty(self):
self.client.index(index="my-index", id="", body={})
self.assert_url_called("POST", "/my-index/_doc")
def test_index_uses_put_if_id_is_not_empty(self):
self.client.index(index="my-index", id=0, body={})
self.assert_url_called("PUT", "/my-index/_doc/0")
|
[
"elasticsearch.Elasticsearch"
] |
[((2746, 2766), 'elasticsearch.Elasticsearch', 'Elasticsearch', (['hosts'], {}), '(hosts)\n', (2759, 2766), False, 'from elasticsearch import Elasticsearch\n'), ((2595, 2624), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["['es.org:123']"], {}), "(['es.org:123'])\n", (2608, 2624), False, 'from elasticsearch import Elasticsearch\n')]
|
from collections import defaultdict
import pickle
from tqdm import tqdm
class ReverseIndex:
def __init__(self,docs, preprocessing):
self.lookup = defaultdict(set)
self.preprocess = preprocessing
if docs is not None:
for title,words in tqdm(docs):
self.add(title,self.preprocess(words))
def add(self,title,words):
for word in words:
self.lookup[word].add(title)
def docs(self,phrase):
ret = []
for word in self.preprocess(phrase):
ret.extend(self.lookup[word])
return ret
def save(self,file):
with open(file,"wb+") as f:
pickle.dump(self.lookup,f)
def load(self,file):
with open(file,"rb") as f:
self.lookup = pickle.load(f)
|
[
"collections.defaultdict",
"tqdm.tqdm",
"pickle.load",
"pickle.dump"
] |
[((161, 177), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (172, 177), False, 'from collections import defaultdict\n'), ((279, 289), 'tqdm.tqdm', 'tqdm', (['docs'], {}), '(docs)\n', (283, 289), False, 'from tqdm import tqdm\n'), ((671, 698), 'pickle.dump', 'pickle.dump', (['self.lookup', 'f'], {}), '(self.lookup, f)\n', (682, 698), False, 'import pickle\n'), ((785, 799), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (796, 799), False, 'import pickle\n')]
|
import numpy as np
from scipy.spatial.distance import cdist
# reference vector generation
def das_dennis(n_part, n_obj):
if n_part == 0:
return np.full((1, n_obj), 1 / n_obj)
else:
ref_dirs = []
ref_dir = np.full(n_obj, np.nan)
das_dennis_recursion(ref_dirs, ref_dir, n_part, n_part, 0)
return np.concatenate(ref_dirs, axis=0)
def das_dennis_recursion(ref_dirs, ref_dir, n_part, beta, depth):
if depth == len(ref_dir) - 1:
ref_dir[depth] = beta / (1.0 * n_part)
ref_dir = ref_dir / np.sqrt( np.sum(ref_dir ** 2) )
ref_dirs.append(ref_dir[None, :])
else:
for i in range(beta + 1):
ref_dir[depth] = 1.0 * i / (1.0 * n_part)
das_dennis_recursion(ref_dirs, np.copy(ref_dir), n_part, beta - i, depth + 1)
def neighboring_angle(ref_dirs):
cosine_refdirs = np.dot(ref_dirs, ref_dirs.T)
sorted_cosine_refdirs = - np.sort(- cosine_refdirs, axis=1)
arccosine_refdirs = np.arccos( np.clip(sorted_cosine_refdirs[:,1], 0, 1) )
return arccosine_refdirs
|
[
"numpy.full",
"numpy.sum",
"numpy.copy",
"numpy.clip",
"numpy.sort",
"numpy.dot",
"numpy.concatenate"
] |
[((869, 897), 'numpy.dot', 'np.dot', (['ref_dirs', 'ref_dirs.T'], {}), '(ref_dirs, ref_dirs.T)\n', (875, 897), True, 'import numpy as np\n'), ((157, 187), 'numpy.full', 'np.full', (['(1, n_obj)', '(1 / n_obj)'], {}), '((1, n_obj), 1 / n_obj)\n', (164, 187), True, 'import numpy as np\n'), ((238, 260), 'numpy.full', 'np.full', (['n_obj', 'np.nan'], {}), '(n_obj, np.nan)\n', (245, 260), True, 'import numpy as np\n'), ((343, 375), 'numpy.concatenate', 'np.concatenate', (['ref_dirs'], {'axis': '(0)'}), '(ref_dirs, axis=0)\n', (357, 375), True, 'import numpy as np\n'), ((928, 960), 'numpy.sort', 'np.sort', (['(-cosine_refdirs)'], {'axis': '(1)'}), '(-cosine_refdirs, axis=1)\n', (935, 960), True, 'import numpy as np\n'), ((997, 1039), 'numpy.clip', 'np.clip', (['sorted_cosine_refdirs[:, 1]', '(0)', '(1)'], {}), '(sorted_cosine_refdirs[:, 1], 0, 1)\n', (1004, 1039), True, 'import numpy as np\n'), ((561, 581), 'numpy.sum', 'np.sum', (['(ref_dir ** 2)'], {}), '(ref_dir ** 2)\n', (567, 581), True, 'import numpy as np\n'), ((767, 783), 'numpy.copy', 'np.copy', (['ref_dir'], {}), '(ref_dir)\n', (774, 783), True, 'import numpy as np\n')]
|
# Copyright (c) 2019 <NAME>.
# Cura is released under the terms of the LGPLv3 or higher.
## Find_moveo ##
from copy import deepcopy
from typing import cast, Dict, List, Optional
from UM.Application import Application
from UM.Math.AxisAlignedBox import AxisAlignedBox
from UM.Math.Polygon import Polygon # For typing.
from UM.Scene.SceneNode import SceneNode
from UM.Scene.SceneNodeDecorator import SceneNodeDecorator # To cast the deepcopy of every decorator back to SceneNodeDecorator.
import cura.CuraApplication # To get the build plate.
from cura.Settings.ExtruderStack import ExtruderStack # For typing.
from cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator # For per-object settings.
import threading
import queue
## Scene nodes that are models are only seen when selecting the corresponding build plate
# Note that many other nodes can just be UM SceneNode objects.
class CuraSceneNode(SceneNode):
def __init__(self, parent: Optional["SceneNode"] = None, visible: bool = True, name: str = "", no_setting_override: bool = False) -> None:
super().__init__(parent = parent, visible = visible, name = name)
self._lock = threading.Lock()
self._check = True
self._queue = queue.Queue()
if not no_setting_override:
self.addDecorator(SettingOverrideDecorator()) # Now we always have a getActiveExtruderPosition, unless explicitly disabled
self._outside_buildarea = False
def setOutsideBuildArea(self, new_value: bool) -> None:
self._outside_buildarea = new_value
def isOutsideBuildArea(self) -> bool:
return self._outside_buildarea or self.callDecoration("getBuildPlateNumber") < 0
def isVisible(self) -> bool:
return super().isVisible() and self.callDecoration("getBuildPlateNumber") == cura.CuraApplication.CuraApplication.getInstance().getMultiBuildPlateModel().activeBuildPlate
def isSelectable(self) -> bool:
return super().isSelectable() and self.callDecoration("getBuildPlateNumber") == cura.CuraApplication.CuraApplication.getInstance().getMultiBuildPlateModel().activeBuildPlate
## Get the extruder used to print this node. If there is no active node, then the extruder in position zero is returned
# TODO The best way to do it is by adding the setActiveExtruder decorator to every node when is loaded
def getPrintingExtruder(self) -> Optional[ExtruderStack]:
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack is None:
return None
per_mesh_stack = self.callDecoration("getStack")
extruders = list(global_container_stack.extruders.values())
# Use the support extruder instead of the active extruder if this is a support_mesh
if per_mesh_stack:
if per_mesh_stack.getProperty("support_mesh", "value"):
return extruders[int(global_container_stack.getExtruderPositionValueWithDefault("support_extruder_nr"))]
# It's only set if you explicitly choose an extruder
extruder_id = self.callDecoration("getActiveExtruder")
for extruder in extruders:
# Find out the extruder if we know the id.
if extruder_id is not None:
if extruder_id == extruder.getId():
return extruder
else: # If the id is unknown, then return the extruder in the position 0
try:
if extruder.getMetaDataEntry("position", default = "0") == "0": # Check if the position is zero
return extruder
except ValueError:
continue
# This point should never be reached
return None
## Return the color of the material used to print this model
def getDiffuseColor(self) -> List[float]:
printing_extruder = self.getPrintingExtruder()
material_color = "#808080" # Fallback color
if printing_extruder is not None and printing_extruder.material:
material_color = printing_extruder.material.getMetaDataEntry("color_code", default = material_color)
# Colors are passed as rgb hex strings (eg "#ffffff"), and the shader needs
# an rgba list of floats (eg [1.0, 1.0, 1.0, 1.0])
return [
int(material_color[1:3], 16) / 255,
int(material_color[3:5], 16) / 255,
int(material_color[5:7], 16) / 255,
1.0
]
## Return if any area collides with the convex hull of this scene node
def collidesWithAreas(self, areas: List[Polygon]) -> bool:
convex_hull = self.callDecoration("getConvexHull")
if convex_hull:
if not convex_hull.isValid():
return False
# Check for collisions between provided areas and the object
for area in areas:
overlap = convex_hull.intersectsPolygon(area)
if overlap is None:
continue
return True
return False
## Return if any area collides with the convex hull of x-z plane and y-z plane
def collidesWithAreasForMoveo(self, areas: List[Polygon]) -> bool:
convex_hull = self.callDecoration("getConvexHullForMoveo")
self._check = True
threads = []
# Check for collisions between provided areas and the object
if convex_hull:
for area in areas:
self._queue.put(area)
for j in range(2):
# Use parallel to speedup
threads.append(threading.Thread(target=self.parallelCheck, args=(convex_hull,)))
threads[j].start()
for thread in threads:
thread.join()
if self._check:
return False
else:
return True
def parallelCheck(self,convex_hull = None):
while self._queue.qsize() > 0:
area = self._queue.get()
if self._check:
overlap_1 = convex_hull[0].intersectsPolygon(area)
overlap_2 = convex_hull[1].intersectsPolygon(area)
if overlap_1 is None and overlap_2 is None:
continue
# If we find an area out of range, then quickly break out this function
self._lock.acquire()
self._check = False
self._lock.release()
## Override of SceneNode._calculateAABB to exclude non-printing-meshes from bounding box
def _calculateAABB(self) -> None:
self._aabb = None
if self._mesh_data:
self._aabb = self._mesh_data.getExtents(self.getWorldTransformation())
else: # If there is no mesh_data, use a boundingbox that encompasses the local (0,0,0)
position = self.getWorldPosition()
self._aabb = AxisAlignedBox(minimum=position, maximum=position)
for child in self.getAllChildren():
if child.callDecoration("isNonPrintingMesh"):
# Non-printing-meshes inside a group should not affect push apart or drop to build plate
continue
if not child.getMeshData():
# Nodes without mesh data should not affect bounding boxes of their parents.
continue
if self._aabb is None:
self._aabb = child.getBoundingBox()
else:
self._aabb = self._aabb + child.getBoundingBox()
## Taken from SceneNode, but replaced SceneNode with CuraSceneNode
def __deepcopy__(self, memo: Dict[int, object]) -> "CuraSceneNode":
copy = CuraSceneNode(no_setting_override = True) # Setting override will be added later
copy.setTransformation(self.getLocalTransformation())
copy.setMeshData(self._mesh_data)
copy.setVisible(cast(bool, deepcopy(self._visible, memo)))
copy._selectable = cast(bool, deepcopy(self._selectable, memo))
copy._name = cast(str, deepcopy(self._name, memo))
for decorator in self._decorators:
copy.addDecorator(cast(SceneNodeDecorator, deepcopy(decorator, memo)))
for child in self._children:
copy.addChild(cast(SceneNode, deepcopy(child, memo)))
self.calculateBoundingBoxMesh()
return copy
def transformChanged(self) -> None:
self._transformChanged()
|
[
"threading.Thread",
"copy.deepcopy",
"UM.Math.AxisAlignedBox.AxisAlignedBox",
"cura.Settings.SettingOverrideDecorator.SettingOverrideDecorator",
"UM.Application.Application.getInstance",
"threading.Lock",
"queue.Queue"
] |
[((1178, 1194), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1192, 1194), False, 'import threading\n'), ((1244, 1257), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (1255, 1257), False, 'import queue\n'), ((6877, 6927), 'UM.Math.AxisAlignedBox.AxisAlignedBox', 'AxisAlignedBox', ([], {'minimum': 'position', 'maximum': 'position'}), '(minimum=position, maximum=position)\n', (6891, 6927), False, 'from UM.Math.AxisAlignedBox import AxisAlignedBox\n'), ((7940, 7972), 'copy.deepcopy', 'deepcopy', (['self._selectable', 'memo'], {}), '(self._selectable, memo)\n', (7948, 7972), False, 'from copy import deepcopy\n'), ((8005, 8031), 'copy.deepcopy', 'deepcopy', (['self._name', 'memo'], {}), '(self._name, memo)\n', (8013, 8031), False, 'from copy import deepcopy\n'), ((1324, 1350), 'cura.Settings.SettingOverrideDecorator.SettingOverrideDecorator', 'SettingOverrideDecorator', ([], {}), '()\n', (1348, 1350), False, 'from cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator\n'), ((2469, 2494), 'UM.Application.Application.getInstance', 'Application.getInstance', ([], {}), '()\n', (2492, 2494), False, 'from UM.Application import Application\n'), ((7870, 7899), 'copy.deepcopy', 'deepcopy', (['self._visible', 'memo'], {}), '(self._visible, memo)\n', (7878, 7899), False, 'from copy import deepcopy\n'), ((5612, 5676), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.parallelCheck', 'args': '(convex_hull,)'}), '(target=self.parallelCheck, args=(convex_hull,))\n', (5628, 5676), False, 'import threading\n'), ((8131, 8156), 'copy.deepcopy', 'deepcopy', (['decorator', 'memo'], {}), '(decorator, memo)\n', (8139, 8156), False, 'from copy import deepcopy\n'), ((8239, 8260), 'copy.deepcopy', 'deepcopy', (['child', 'memo'], {}), '(child, memo)\n', (8247, 8260), False, 'from copy import deepcopy\n')]
|
import re
import os
from .newsBase import NewsBaseSrc
from .newsResult import NewsResult
class CnnIndo(NewsBaseSrc):
def parse_url(self, url, date, page):
return url + "?p=" + str(page) + "&date=" + date.strftime("%Y/%m/%d")
def get_default_url(self):
return "https://www.cnnindonesia.com/nasional/indeks/3"
"""
<article>
<a href="..." target="_blank"></a>
</article>
"""
def get_linked_urls(self, url):
html = self.download_url(url)
soup = self.make_soup(html)
for link in soup.find_all("article"):
# not all <article> have the targeted link
try:
title = link.find("h2", attrs={'class': 'title'})
if(re.search("VIDEO:", title.get_text().strip())):
continue
item = link.find("a", attrs={'target': '_blank'})
path = item.get('href')
print(path)
yield path
except:
pass
"""
--TITLE--
<h1 class="title">...</h1>
--CONTENT--
<div id="detikdetailtext">
....
</div>
"""
def get_content(self, url):
html = self.download_url(url + "?page=all")
soup = self.make_soup(html)
result_text = ""
title = soup.find(
"h1", attrs={'class': 'title'}).get_text().strip()
content = soup.find("div", attrs={'id': 'detikdetailtext'})
try:
temp_result: String = content.get_text()
for text in temp_result.splitlines():
if(re.search("Gambas:", text)):
continue
if(text != ""):
text = re.sub("^(.*?)--", "", text)
result_text = result_text + text
# remove meaningless (xxxx/yyyy)
result_text = (re.sub("\(([^)]+)\)$", "", result_text))
return NewsResult(url, title, result_text)
except:
pass
if __name__ == '__main__':
# result = CnnIndo().run(target_total=50)
result = CnnIndo().get_content(
# "https://www.cnnindonesia.com/nasional/20200530180616-20-508320/10-provinsi-di-indonesia-nihil-kasus-baru-corona-hari-ini")
"https://www.cnnindonesia.com/nasional/20200530191534-20-508359/polda-jateng-sekat-pemudik-arus-balik-hingga-7-juni")
|
[
"re.sub",
"re.search"
] |
[((1869, 1910), 're.sub', 're.sub', (['"""\\\\(([^)]+)\\\\)$"""', '""""""', 'result_text'], {}), "('\\\\(([^)]+)\\\\)$', '', result_text)\n", (1875, 1910), False, 'import re\n'), ((1596, 1622), 're.search', 're.search', (['"""Gambas:"""', 'text'], {}), "('Gambas:', text)\n", (1605, 1622), False, 'import re\n'), ((1714, 1742), 're.sub', 're.sub', (['"""^(.*?)--"""', '""""""', 'text'], {}), "('^(.*?)--', '', text)\n", (1720, 1742), False, 'import re\n')]
|
from discord.ext import commands
from cogs.utils import game
from cogs.utils.game import GamePlay
from cogs.utils import views_solomode as views
class Solomode(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def solo(self, ctx):
result = {}
i = 0
await ctx.channel.purge(limit=1)
generated_num_tuple = game.generate_num()
answer = ''.join(map(str, generated_num_tuple))
UI = await ctx.send(embed=views.embed_start(ctx))
def check_msg(msg):
return msg.author == ctx.author
while True:
predicted_num = await self.bot.wait_for("message", check=check_msg, timeout=None)
await ctx.channel.purge(limit=1)
display = GamePlay(predicted_num, generated_num_tuple)
draw = display.draw()
if predicted_num.content == 'end':
"""endと入力されたら処理を終了"""
await ctx.send('ゲームを終了しました。')
break
if draw[0] is not True:
await ctx.send(draw[1])
continue
eat_bite = draw[1]
result[i] = f'{display.num} → **{eat_bite}**'
if display.EAT == 3:
"""EATが3の時に処理を終了"""
await UI.edit(embed=views.embed_gameclear(ctx, i, answer, result))
break
if i >= 10:
"""10回試行したら処理を終了"""
await UI.edit(embed=views.embed_gameover(ctx, answer))
break
await UI.edit(embed=views.embed_gameplay(ctx, i, display.num, eat_bite))
i += 1
def setup(bot):
bot.add_cog(Solomode(bot))
|
[
"discord.ext.commands.command",
"cogs.utils.game.generate_num",
"cogs.utils.views_solomode.embed_gameclear",
"cogs.utils.views_solomode.embed_start",
"cogs.utils.views_solomode.embed_gameplay",
"cogs.utils.game.GamePlay",
"cogs.utils.views_solomode.embed_gameover"
] |
[((236, 254), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (252, 254), False, 'from discord.ext import commands\n'), ((393, 412), 'cogs.utils.game.generate_num', 'game.generate_num', ([], {}), '()\n', (410, 412), False, 'from cogs.utils import game\n'), ((782, 826), 'cogs.utils.game.GamePlay', 'GamePlay', (['predicted_num', 'generated_num_tuple'], {}), '(predicted_num, generated_num_tuple)\n', (790, 826), False, 'from cogs.utils.game import GamePlay\n'), ((503, 525), 'cogs.utils.views_solomode.embed_start', 'views.embed_start', (['ctx'], {}), '(ctx)\n', (520, 525), True, 'from cogs.utils import views_solomode as views\n'), ((1569, 1620), 'cogs.utils.views_solomode.embed_gameplay', 'views.embed_gameplay', (['ctx', 'i', 'display.num', 'eat_bite'], {}), '(ctx, i, display.num, eat_bite)\n', (1589, 1620), True, 'from cogs.utils import views_solomode as views\n'), ((1313, 1358), 'cogs.utils.views_solomode.embed_gameclear', 'views.embed_gameclear', (['ctx', 'i', 'answer', 'result'], {}), '(ctx, i, answer, result)\n', (1334, 1358), True, 'from cogs.utils import views_solomode as views\n'), ((1479, 1512), 'cogs.utils.views_solomode.embed_gameover', 'views.embed_gameover', (['ctx', 'answer'], {}), '(ctx, answer)\n', (1499, 1512), True, 'from cogs.utils import views_solomode as views\n')]
|
from django.db import models
from django.utils.timezone import now
# Create your models here.
SEDAN = "SEDAN"
SUV = "SUV"
WAGON = "WAGON"
# (...)
CAR_MODELS_CHOICES = (
(SEDAN, "Sedan"),
(SUV, "SUV"),
(WAGON, "WAGON"),
)
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
class CarMake(models.Model):
name = models.CharField(max_length=30)
description = models.CharField(max_length=30)
def __str__(self):
return ("Car name: {0}, description: {1}".format(self.name, self.description))
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
class CarModel(models.Model):
car_makes = models.ManyToManyField(CarMake, related_name='carModels')
dealer_id = models.IntegerField()
name = models.CharField(max_length=30)
car_type = models.CharField(max_length=30, choices=CAR_MODELS_CHOICES)
year = models.DateField()
def __str__(self):
return ("Car name: {0}, type: {1}".format(self.name, self.car_type))
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
self.address = address
self.city = city
self.full_name = full_name
self.id = id
self.lat = lat
self.long = long
self.short_name = short_name
self.st = st
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name + " " + str(self.id)
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview :
def __init__(self, dealership, name, purchase, review, purchase_date, car_make, car_model, car_year, sentiment, id):
self.dealership = dealership
self.name = name
self.purchase = purchase
self.review = review
self.purchase_date = purchase_date
self.car_make = car_make
self.car_model = car_model
self.car_year = car_year
self.sentiment = sentiment
self.id = id
def __str__(self):
return "Dealer review: " + self.review
|
[
"django.db.models.CharField",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.DateField"
] |
[((341, 372), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (357, 372), False, 'from django.db import models\n'), ((391, 422), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (407, 422), False, 'from django.db import models\n'), ((649, 706), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['CarMake'], {'related_name': '"""carModels"""'}), "(CarMake, related_name='carModels')\n", (671, 706), False, 'from django.db import models\n'), ((723, 744), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (742, 744), False, 'from django.db import models\n'), ((756, 787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (772, 787), False, 'from django.db import models\n'), ((803, 862), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'choices': 'CAR_MODELS_CHOICES'}), '(max_length=30, choices=CAR_MODELS_CHOICES)\n', (819, 862), False, 'from django.db import models\n'), ((874, 892), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (890, 892), False, 'from django.db import models\n')]
|
import torch
import torch.nn as nn
from typing import Optional
from scipy import special
import math
from .utils import kld_gaussian, rand_epanechnikov_trig
class GaussianDropout(nn.Module):
def __init__(self, p=0.0, truncate=None):
super().__init__()
self.alpha = p / (1.0 - p)
self.truncate = truncate
def forward(self, vector):
if self.training and self.alpha > 0.0:
epsilon = torch.randn(vector.size(), device=vector.device)
if self.truncate is not None:
epsilon = torch.fmod(epsilon, self.truncate)
epsilon = self.alpha * epsilon + 1.0
vector = vector * epsilon
return vector
class VariationalBase(nn.Module):
def __init__(self, input_size, momentum=0.99, eps=1e-8):
super().__init__()
self.input_size = input_size
self.momentum = momentum
self.eps = eps
self.register_buffer('running_mean', torch.zeros(input_size))
self.register_buffer('running_var', torch.zeros(input_size))
self.register_buffer('num_batches_tracked',
torch.tensor(0, dtype=torch.long))
def reset_running_stats(self):
self.running_mean.zero_()
self.running_var.zero_()
self.num_batches_tracked.zero_()
@torch.jit.unused
def update(self,
input_vector: torch.Tensor,
mask: Optional[torch.Tensor] = None):
input_vector = input_vector.view(-1, self.input_size)
if mask is not None:
mask = mask.view(-1)
input_vector = input_vector[mask > 0]
mean = input_vector.data.mean(dim=0)
self.running_mean.data.mul_(self.momentum).add_(mean, alpha=1-self.momentum)
std = input_vector.data.std(dim=0)
self.running_var.data.mul_(self.momentum).addcmul_(std, std, value=1-self.momentum)
self.num_batches_tracked.data.add_(1)
@property
def _correction(self):
return 1.0 - self.momentum ** self.num_batches_tracked
@property
def mean(self):
return self.running_mean / self._correction
@property
def var(self):
return self.running_var / self._correction
@property
def snr(self):
return self.mean / (torch.sqrt(self.var) + self.eps)
class VariationalGaussianDropout(VariationalBase):
"""Вариационный слой регуляризации с априорным и апостериорным
нормальными распределениями
"""
def __init__(self, input_size, truncate=None, momentum=0.99, eps=1e-8):
super().__init__(input_size, momentum=momentum, eps=eps)
self.truncate = truncate
self.log_sigma = nn.Parameter(torch.Tensor(input_size))
self.log_sigma.data.fill_(-1.0)
self._mean = None
def forward(self,
vector: torch.Tensor,
mask: Optional[torch.Tensor] = None):
epsilon = torch.randn(vector.size(), device=vector.device)
if self.truncate is not None:
epsilon = torch.fmod(epsilon, self.truncate)
if mask is not None:
epsilon = epsilon * mask[..., None]
variance = torch.exp(self.log_sigma)
if self.training:
self._save_stat(vector)
vector = vector + variance * epsilon
if self.training:
self.update(vector, mask)
return vector
@torch.jit.unused
def _save_stat(self, vector):
self._mean = vector
def kld(self, nu=0.0, rho=1.0):
return kld_gaussian(self._mean, self.log_sigma, nu=nu, rho=rho)
class VariationalNormalEpanechnikovDropout(VariationalBase):
def __init__(self, input_size, momentum=0.99, eps=1e-8):
super().__init__(input_size, momentum=momentum, eps=eps)
self.log_sigma = nn.Parameter(torch.Tensor(input_size))
self.log_sigma.data.fill_(-1.0)
self._mean = None
self._const = 0.5*math.log(90.0*math.pi) - 7./6.
self._shift = 0.5*math.log(5.0)
def forward(self,
vector: torch.Tensor,
mask: Optional[torch.Tensor] = None):
epsilon = rand_epanechnikov_trig(vector.size(), device=vector.device)
if mask is not None:
epsilon = epsilon * mask[..., None]
variance = torch.exp(self.log_sigma)
if self.training:
self._save_stat(vector)
vector = vector + variance * epsilon
if self.training:
self.update(vector, mask)
return vector
@torch.jit.unused
def _save_stat(self, vector):
self._mean = vector
def kld(self, nu=0.0, rho=1.0):
log_sigma = self.log_sigma - self._shift
normal_kld = kld_gaussian(self._mean, log_sigma, nu=nu, rho=rho)
return self._const + normal_kld
class VariationalLogNormalGammaDropout(VariationalBase):
"""Вариационный слой регуляризации с априорным гамма и апостериорным
логнормальным распределениями
"""
def __init__(self, input_size, truncate=None, momentum=0.99, eps=1e-8):
super().__init__(input_size, momentum=momentum, eps=eps)
self.truncate = truncate
self.sigma = nn.Parameter(torch.Tensor(input_size))
self.sigma.data.fill_(0.01)
self.eps = eps
self._mean = None
self._coeff = None
def forward(self, vector, mask=None):
epsilon = torch.randn(vector.size(), device=vector.device)
if self.truncate is not None:
epsilon = torch.fmod(epsilon, self.truncate)
xi = -0.5 * self.sigma * self.sigma + torch.abs(self.sigma) * epsilon
if self.training:
self._save_stat(vector)
if mask is not None:
xi = xi * mask[..., None]
vector = vector * torch.exp(xi)
if self.training:
self.update(vector, mask)
return vector
def kld(self, alpha=0.01, beta=0.1):
alpha, beta = float(alpha), float(beta)
if self._coeff is None:
self._coeff = {(alpha, beta): self._kld_coeff(alpha, beta)}
elif (alpha, beta) not in self._coeff:
self._coeff[(alpha, beta)] = self._kld_coeff(alpha, beta)
const_coeff = self._coeff[(alpha, beta)]
var_part = - torch.log(torch.abs(self.sigma) + self.eps) + 0.5 * alpha * self.sigma**2
mean_part = beta * torch.abs(self._mean) - alpha * torch.log(torch.abs(self._mean) + self.eps)
fval = const_coeff * torch.numel(var_part) + torch.sum(var_part) + torch.sum(mean_part)
return fval
@staticmethod
def _kld_coeff(alpha, beta):
return special.loggamma(alpha) - alpha * math.log(beta) \
- 0.5 * math.log(2 * math.pi) - 0.5
@torch.jit.unused
def _save_stat(self, vector):
self._mean = vector
|
[
"torch.numel",
"scipy.special.loggamma",
"torch.sqrt",
"torch.fmod",
"torch.abs",
"torch.exp",
"torch.Tensor",
"torch.zeros",
"math.log",
"torch.sum",
"torch.tensor"
] |
[((3146, 3171), 'torch.exp', 'torch.exp', (['self.log_sigma'], {}), '(self.log_sigma)\n', (3155, 3171), False, 'import torch\n'), ((4266, 4291), 'torch.exp', 'torch.exp', (['self.log_sigma'], {}), '(self.log_sigma)\n', (4275, 4291), False, 'import torch\n'), ((958, 981), 'torch.zeros', 'torch.zeros', (['input_size'], {}), '(input_size)\n', (969, 981), False, 'import torch\n'), ((1027, 1050), 'torch.zeros', 'torch.zeros', (['input_size'], {}), '(input_size)\n', (1038, 1050), False, 'import torch\n'), ((1133, 1166), 'torch.tensor', 'torch.tensor', (['(0)'], {'dtype': 'torch.long'}), '(0, dtype=torch.long)\n', (1145, 1166), False, 'import torch\n'), ((2681, 2705), 'torch.Tensor', 'torch.Tensor', (['input_size'], {}), '(input_size)\n', (2693, 2705), False, 'import torch\n'), ((3015, 3049), 'torch.fmod', 'torch.fmod', (['epsilon', 'self.truncate'], {}), '(epsilon, self.truncate)\n', (3025, 3049), False, 'import torch\n'), ((3787, 3811), 'torch.Tensor', 'torch.Tensor', (['input_size'], {}), '(input_size)\n', (3799, 3811), False, 'import torch\n'), ((3962, 3975), 'math.log', 'math.log', (['(5.0)'], {}), '(5.0)\n', (3970, 3975), False, 'import math\n'), ((5153, 5177), 'torch.Tensor', 'torch.Tensor', (['input_size'], {}), '(input_size)\n', (5165, 5177), False, 'import torch\n'), ((5462, 5496), 'torch.fmod', 'torch.fmod', (['epsilon', 'self.truncate'], {}), '(epsilon, self.truncate)\n', (5472, 5496), False, 'import torch\n'), ((5733, 5746), 'torch.exp', 'torch.exp', (['xi'], {}), '(xi)\n', (5742, 5746), False, 'import torch\n'), ((6466, 6486), 'torch.sum', 'torch.sum', (['mean_part'], {}), '(mean_part)\n', (6475, 6486), False, 'import torch\n'), ((552, 586), 'torch.fmod', 'torch.fmod', (['epsilon', 'self.truncate'], {}), '(epsilon, self.truncate)\n', (562, 586), False, 'import torch\n'), ((2276, 2296), 'torch.sqrt', 'torch.sqrt', (['self.var'], {}), '(self.var)\n', (2286, 2296), False, 'import torch\n'), ((3905, 3929), 'math.log', 'math.log', (['(90.0 * math.pi)'], {}), '(90.0 * math.pi)\n', (3913, 3929), False, 'import math\n'), ((5543, 5564), 'torch.abs', 'torch.abs', (['self.sigma'], {}), '(self.sigma)\n', (5552, 5564), False, 'import torch\n'), ((6315, 6336), 'torch.abs', 'torch.abs', (['self._mean'], {}), '(self._mean)\n', (6324, 6336), False, 'import torch\n'), ((6444, 6463), 'torch.sum', 'torch.sum', (['var_part'], {}), '(var_part)\n', (6453, 6463), False, 'import torch\n'), ((6420, 6441), 'torch.numel', 'torch.numel', (['var_part'], {}), '(var_part)\n', (6431, 6441), False, 'import torch\n'), ((6574, 6597), 'scipy.special.loggamma', 'special.loggamma', (['alpha'], {}), '(alpha)\n', (6590, 6597), False, 'from scipy import special\n'), ((6648, 6669), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (6656, 6669), False, 'import math\n'), ((6224, 6245), 'torch.abs', 'torch.abs', (['self.sigma'], {}), '(self.sigma)\n', (6233, 6245), False, 'import torch\n'), ((6357, 6378), 'torch.abs', 'torch.abs', (['self._mean'], {}), '(self._mean)\n', (6366, 6378), False, 'import torch\n'), ((6608, 6622), 'math.log', 'math.log', (['beta'], {}), '(beta)\n', (6616, 6622), False, 'import math\n')]
|
import PyPDF2 as p
from translate import translator
import os
filename = 'sample.pdf'
filename2 = 'output.pdf'
try:
file = open(filename, mode = "rb")
except:
print("File not Found, Please Enter Filename along with Directory [ex-Dowload/1.pdf]")
def translate():
cur_lang = 'en'
dest_lang = 'ko'
try:
data = p.PdfFileReader(file)
print(f"Total number of pages are...{data.getNumPages()}")
except:
print("...Oops! Something Error While opening Pdf file")
for page in range(data.getNumPages()):
ok = data.getPage(int(page))
dat = ok.extractText()
try:
print(f"....Translation Started....pageNo.{data.getNumPages()}")
final_data = translator(cur_lang,dest_lang,dat)
except:
print("Sorry, The Translation cant happen Now.Please Try again after some Time.....")
return final_data
if __name__ == "__main__":
translated_pdf = translate()
print(translated_pdf)
finle = open(filename2+".pdf", mode="wb")
p.PdfFileWriter.write(finle,translated_pdf)
finle.save()
print("Your new file is saved...")
file.close()
|
[
"PyPDF2.PdfFileReader",
"PyPDF2.PdfFileWriter.write",
"translate.translator"
] |
[((940, 984), 'PyPDF2.PdfFileWriter.write', 'p.PdfFileWriter.write', (['finle', 'translated_pdf'], {}), '(finle, translated_pdf)\n', (961, 984), True, 'import PyPDF2 as p\n'), ((324, 345), 'PyPDF2.PdfFileReader', 'p.PdfFileReader', (['file'], {}), '(file)\n', (339, 345), True, 'import PyPDF2 as p\n'), ((662, 698), 'translate.translator', 'translator', (['cur_lang', 'dest_lang', 'dat'], {}), '(cur_lang, dest_lang, dat)\n', (672, 698), False, 'from translate import translator\n')]
|
from anvil.interfaces.maya.dependencies import DEFAULT_API
import anvil.interfaces.api_proxy as api_proxy
import anvil.config as cfg
default_properties = {
"layer": api_proxy.STR_TYPE,
"name": api_proxy.STR_TYPE,
"remove": api_proxy.BOOL_TYPE,
"targetList": api_proxy.BOOL_TYPE,
"weight": api_proxy.NUM_TYPE,
"weightAliasList": api_proxy.BOOL_TYPE,
}
cacheable_schema = {
"createCache": api_proxy.LINEAR_ANGLE_TYPE,
"deleteCache": api_proxy.BOOL_TYPE,
}
offset_schema = {
"maintainOffset": api_proxy.merge_dicts(api_proxy.BOOL_TYPE, {cfg.DEFAULT: True}),
"offset": api_proxy.POSITION_TYPE,
"skip": api_proxy.STR_TYPE,
}
aim_schema = {
"aimVector": api_proxy.POSITION_TYPE,
"upVector": api_proxy.POSITION_TYPE,
"worldUpObject": api_proxy.STR_TYPE,
"worldUpType": api_proxy.STR_TYPE,
"worldUpVector": api_proxy.POSITION_TYPE,
}
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA, {'properties': {'ignoreUnitConversion': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'mute')
def connected_attr(attribute_dag_path_1, attribute_dag_path_2, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties':
{'disable': api_proxy.BOOL_TYPE,
'force': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'mute')
def mute(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties':
{'connection': api_proxy.BOOL_TYPE,
'datablock': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'isDirty')
def dirty_attr(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties':
{'destinationFromSource': api_proxy.BOOL_TYPE,
'getExactDestination': api_proxy.BOOL_TYPE, 'getExactSource': api_proxy.BOOL_TYPE,
'getLockedAncestor': api_proxy.BOOL_TYPE, 'isDestination': api_proxy.BOOL_TYPE,
'isExactDestination': api_proxy.BOOL_TYPE, 'isExactSource': api_proxy.BOOL_TYPE,
'isLocked': api_proxy.BOOL_TYPE, 'isSource': api_proxy.BOOL_TYPE,
'sourceFromDestination': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'connectionInfo')
def connection_info(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties':
{'allAttributes': api_proxy.BOOL_TYPE, 'bool': api_proxy.BOOL_TYPE,
'enumerated': api_proxy.BOOL_TYPE, 'hidden': api_proxy.BOOL_TYPE,
'internal': api_proxy.BOOL_TYPE, 'leaf': api_proxy.BOOL_TYPE,
'logicalAnd': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE,
'short': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE,
'userInterface': api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'attributeInfo')
def info_attr(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties':
{'allFuture': api_proxy.BOOL_TYPE, 'allGraphs': api_proxy.BOOL_TYPE,
'breadthFirst': api_proxy.BOOL_TYPE, 'future': api_proxy.BOOL_TYPE,
'futureLocalAttr': api_proxy.BOOL_TYPE, 'futureWorldAttr': api_proxy.BOOL_TYPE,
'groupLevels': api_proxy.BOOL_TYPE, 'historyAttr': api_proxy.BOOL_TYPE,
'interestLevel': api_proxy.INT_TYPE, 'leaf': api_proxy.BOOL_TYPE,
'levels': api_proxy.INT_TYPE, 'pruneDagObjects': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'listHistory')
def list_history(dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {'connections': api_proxy.BOOL_TYPE, 'destination': api_proxy.BOOL_TYPE,
'exactType': api_proxy.BOOL_TYPE, 'plugs': api_proxy.BOOL_TYPE,
'shapes': api_proxy.BOOL_TYPE, 'skipConversionNodes': api_proxy.BOOL_TYPE,
'source': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE}
}),
DEFAULT_API, 'listConnections')
def list_connections(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'remove': api_proxy.BOOL_TYPE,
'query': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'aliasAttr')
def alias_attr(attribute_dag_path, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'affectsAppearance': api_proxy.BOOL_TYPE, 'affectsWorldspace': api_proxy.BOOL_TYPE,
'attributeType': api_proxy.BOOL_TYPE, 'cachedInternally': api_proxy.BOOL_TYPE,
'categories': api_proxy.BOOL_TYPE, 'channelBox': api_proxy.BOOL_TYPE,
'connectable': api_proxy.BOOL_TYPE, 'enum': api_proxy.BOOL_TYPE,
'exists': api_proxy.BOOL_TYPE, 'hidden': api_proxy.BOOL_TYPE,
'indeterminant': api_proxy.BOOL_TYPE, 'indexMatters': api_proxy.BOOL_TYPE,
'internal': api_proxy.BOOL_TYPE, 'internalGet': api_proxy.BOOL_TYPE,
'internalSet': api_proxy.BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE,
'listChildren': api_proxy.BOOL_TYPE, 'listDefault': api_proxy.BOOL_TYPE,
'listEnum': api_proxy.BOOL_TYPE, 'listParent': api_proxy.BOOL_TYPE,
'listSiblings': api_proxy.BOOL_TYPE, 'longName': api_proxy.BOOL_TYPE,
'maxExists': api_proxy.BOOL_TYPE, 'maximum': api_proxy.BOOL_TYPE,
'message': api_proxy.BOOL_TYPE, 'minExists': api_proxy.BOOL_TYPE,
'minimum': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE,
'niceName': api_proxy.BOOL_TYPE, 'node': api_proxy.STR_TYPE,
'numberOfChildren': api_proxy.BOOL_TYPE, 'range': api_proxy.BOOL_TYPE,
'rangeExists': api_proxy.BOOL_TYPE, 'readable': api_proxy.BOOL_TYPE,
'renderSource': api_proxy.BOOL_TYPE, 'shortName': api_proxy.BOOL_TYPE,
'softMax': api_proxy.BOOL_TYPE, 'softMaxExists': api_proxy.BOOL_TYPE,
'softMin': api_proxy.BOOL_TYPE, 'softMinExists': api_proxy.BOOL_TYPE,
'softRange': api_proxy.BOOL_TYPE, 'softRangeExists': api_proxy.BOOL_TYPE,
'storable': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE,
'typeExact': api_proxy.STR_TYPE, 'usedAsColor': api_proxy.BOOL_TYPE,
'usedAsFilename': api_proxy.BOOL_TYPE, 'usesMultiBuilder': api_proxy.BOOL_TYPE,
'worldspace': api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'attributeQuery')
def query_attr(attribute, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'caching': api_proxy.BOOL_TYPE,
'category': api_proxy.STR_TYPE,
'changedSinceFileOpen': api_proxy.BOOL_TYPE,
'channelBox': api_proxy.BOOL_TYPE,
'connectable': api_proxy.BOOL_TYPE,
'extension': api_proxy.BOOL_TYPE,
'fromPlugin': api_proxy.BOOL_TYPE,
'hasData': api_proxy.BOOL_TYPE,
'hasNullData': api_proxy.BOOL_TYPE,
'inUse': api_proxy.BOOL_TYPE,
'keyable': api_proxy.BOOL_TYPE,
'leaf': api_proxy.BOOL_TYPE,
'locked': api_proxy.BOOL_TYPE,
'multi': api_proxy.BOOL_TYPE,
'output': api_proxy.BOOL_TYPE,
'ramp': api_proxy.BOOL_TYPE,
'read': api_proxy.BOOL_TYPE,
'readOnly': api_proxy.BOOL_TYPE,
'scalar': api_proxy.BOOL_TYPE,
'scalarAndArray': api_proxy.BOOL_TYPE,
'settable': api_proxy.BOOL_TYPE,
'shortNames': api_proxy.BOOL_TYPE,
'string': api_proxy.STR_TYPE,
'unlocked': api_proxy.BOOL_TYPE,
'usedAsFilename': api_proxy.BOOL_TYPE,
'userDefined': api_proxy.BOOL_TYPE,
'visible': api_proxy.BOOL_TYPE,
'write': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'listAttr')
def list_attr(node, *args, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'attribute': api_proxy.STR_TYPE,
'name': api_proxy.STR_TYPE}}),
DEFAULT_API, 'deleteAttr')
def delete_attr(attribute_dag_path_or_node, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'asString': api_proxy.BOOL_TYPE,
'caching': api_proxy.BOOL_TYPE,
'channelBox': api_proxy.BOOL_TYPE,
'expandEnvironmentVariables': api_proxy.BOOL_TYPE,
'keyable': api_proxy.BOOL_TYPE,
'lock': api_proxy.BOOL_TYPE,
'multiIndices': api_proxy.BOOL_TYPE,
'settable': api_proxy.BOOL_TYPE,
'silent': api_proxy.BOOL_TYPE,
'size': api_proxy.BOOL_TYPE,
'time': api_proxy.NUM_TYPE,
'type': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'getAttr')
def get_attr(attribute_dag_path, *args, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'alteredValue': api_proxy.BOOL_TYPE,
'caching': api_proxy.BOOL_TYPE,
'capacityHint': api_proxy.NUM_TYPE,
'channelBox': api_proxy.BOOL_TYPE,
'clamp': api_proxy.BOOL_TYPE,
'keyable': api_proxy.BOOL_TYPE,
'lock': api_proxy.BOOL_TYPE,
'size': api_proxy.NUM_TYPE,
'type': api_proxy.STR_TYPE}}),
DEFAULT_API, 'setAttr')
def set_attr(attribute, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'attributeType': api_proxy.STR_TYPE, 'binaryTag': api_proxy.STR_TYPE,
'cachedInternally': api_proxy.BOOL_TYPE, 'category': api_proxy.STR_TYPE,
'dataType': api_proxy.STR_TYPE, 'defaultValue': api_proxy.NUM_TYPE,
'disconnectBehaviour': api_proxy.INT_TYPE, 'enumName': api_proxy.STR_TYPE,
'exists': api_proxy.BOOL_TYPE, 'fromPlugin': api_proxy.BOOL_TYPE,
'hasMaxValue': api_proxy.BOOL_TYPE, 'hasMinValue': api_proxy.BOOL_TYPE,
'hasSoftMaxValue': api_proxy.BOOL_TYPE, 'hasSoftMinValue': api_proxy.BOOL_TYPE,
'hidden': api_proxy.BOOL_TYPE, 'indexMatters': api_proxy.BOOL_TYPE,
'api_proxy.INT_TYPEernalSet': api_proxy.BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE,
'longName': api_proxy.STR_TYPE, 'maxValue': api_proxy.NUM_TYPE,
'minValue': api_proxy.NUM_TYPE, 'multi': api_proxy.BOOL_TYPE,
'niceName': api_proxy.STR_TYPE, 'numberOfChildren': api_proxy.INT_TYPE,
'parent': api_proxy.STR_TYPE, 'proxy': api_proxy.STR_TYPE,
'readable': api_proxy.BOOL_TYPE, 'shortName': api_proxy.STR_TYPE,
'softMaxValue': api_proxy.NUM_TYPE, 'softMinValue': api_proxy.NUM_TYPE,
'storable': api_proxy.BOOL_TYPE, 'usedAsColor': api_proxy.BOOL_TYPE,
'usedAsFilename': api_proxy.BOOL_TYPE, 'usedAsProxy': api_proxy.BOOL_TYPE,
'writable': api_proxy.BOOL_TYPE
}}),
DEFAULT_API, 'addAttr')
def add_attr(attribute, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': {
'force': api_proxy.BOOL_TYPE,
'lock': api_proxy.BOOL_TYPE,
'nextAvailable': api_proxy.STR_TYPE,
'referenceDest': api_proxy.STR_TYPE}}),
DEFAULT_API, 'connectAttr')
def connect_attr(source_attribute, destination_attribute, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA, {'properties': {'nextAvailable': api_proxy.BOOL_TYPE}}),
DEFAULT_API, 'disconnectAttr')
def disconnect_attr(*attributes, **kwargs):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties, offset_schema)}),
DEFAULT_API, 'pointConstraint')
def translate(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties, offset_schema, cacheable_schema)}),
DEFAULT_API, 'orientConstraint')
def rotate(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties, offset_schema, aim_schema)}),
DEFAULT_API, 'aimConstraint')
def aim(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties,
{"scaleCompensate": api_proxy.BOOL_TYPE,
"targetList": api_proxy.BOOL_TYPE},
offset_schema)}),
DEFAULT_API, 'scaleConstraint')
def scale(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties,
{"decompRotationToChild": api_proxy.BOOL_TYPE,
"skipRotate": api_proxy.STR_TYPE,
"skipTranslate": api_proxy.STR_TYPE,
},
offset_schema,
cacheable_schema)}),
DEFAULT_API, 'parentConstraint')
def parent(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties, aim_schema)}),
DEFAULT_API, 'tangentConstraint')
def tangent(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA, {'properties': api_proxy.merge_dicts(default_properties)}),
DEFAULT_API, 'geometryConstraint')
def geometry_point(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA,
{'properties': api_proxy.merge_dicts(default_properties, aim_schema)}),
DEFAULT_API, 'normalConstraint')
def geometry_normal(source, targets, **flags):
pass
@api_proxy.APIProxy.validate(
api_proxy.merge_dicts(api_proxy.DEFAULT_SCHEMA, {'properties': api_proxy.merge_dicts(default_properties)}),
DEFAULT_API, 'poleVectorConstraint')
def pole_vector(source, targets, **flags):
pass
|
[
"anvil.interfaces.api_proxy.merge_dicts"
] |
[((529, 592), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.BOOL_TYPE', '{cfg.DEFAULT: True}'], {}), '(api_proxy.BOOL_TYPE, {cfg.DEFAULT: True})\n', (550, 592), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((930, 1045), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'ignoreUnitConversion': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'ignoreUnitConversion': api_proxy.BOOL_TYPE}})\n", (951, 1045), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((1186, 1317), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'disable': api_proxy.BOOL_TYPE, 'force': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'disable':\n api_proxy.BOOL_TYPE, 'force': api_proxy.BOOL_TYPE}})\n", (1207, 1317), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((1514, 1653), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'connection': api_proxy.BOOL_TYPE, 'datablock': api_proxy.\n BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'connection': api_proxy.BOOL_TYPE, 'datablock': api_proxy.BOOL_TYPE}})\n", (1535, 1653), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((1858, 2351), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'destinationFromSource': api_proxy.BOOL_TYPE,\n 'getExactDestination': api_proxy.BOOL_TYPE, 'getExactSource': api_proxy\n .BOOL_TYPE, 'getLockedAncestor': api_proxy.BOOL_TYPE, 'isDestination':\n api_proxy.BOOL_TYPE, 'isExactDestination': api_proxy.BOOL_TYPE,\n 'isExactSource': api_proxy.BOOL_TYPE, 'isLocked': api_proxy.BOOL_TYPE,\n 'isSource': api_proxy.BOOL_TYPE, 'sourceFromDestination': api_proxy.\n BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'destinationFromSource': api_proxy.BOOL_TYPE, 'getExactDestination':\n api_proxy.BOOL_TYPE, 'getExactSource': api_proxy.BOOL_TYPE,\n 'getLockedAncestor': api_proxy.BOOL_TYPE, 'isDestination': api_proxy.\n BOOL_TYPE, 'isExactDestination': api_proxy.BOOL_TYPE, 'isExactSource':\n api_proxy.BOOL_TYPE, 'isLocked': api_proxy.BOOL_TYPE, 'isSource':\n api_proxy.BOOL_TYPE, 'sourceFromDestination': api_proxy.BOOL_TYPE}})\n", (1879, 2351), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((2675, 3154), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'allAttributes': api_proxy.BOOL_TYPE, 'bool': api_proxy.\n BOOL_TYPE, 'enumerated': api_proxy.BOOL_TYPE, 'hidden': api_proxy.\n BOOL_TYPE, 'internal': api_proxy.BOOL_TYPE, 'leaf': api_proxy.BOOL_TYPE,\n 'logicalAnd': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE,\n 'short': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE,\n 'userInterface': api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'allAttributes': api_proxy.BOOL_TYPE, 'bool': api_proxy.BOOL_TYPE,\n 'enumerated': api_proxy.BOOL_TYPE, 'hidden': api_proxy.BOOL_TYPE,\n 'internal': api_proxy.BOOL_TYPE, 'leaf': api_proxy.BOOL_TYPE,\n 'logicalAnd': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE,\n 'short': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE,\n 'userInterface': api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}})\n", (2696, 3154), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((3472, 3985), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'allFuture': api_proxy.BOOL_TYPE, 'allGraphs': api_proxy.\n BOOL_TYPE, 'breadthFirst': api_proxy.BOOL_TYPE, 'future': api_proxy.\n BOOL_TYPE, 'futureLocalAttr': api_proxy.BOOL_TYPE, 'futureWorldAttr':\n api_proxy.BOOL_TYPE, 'groupLevels': api_proxy.BOOL_TYPE, 'historyAttr':\n api_proxy.BOOL_TYPE, 'interestLevel': api_proxy.INT_TYPE, 'leaf':\n api_proxy.BOOL_TYPE, 'levels': api_proxy.INT_TYPE, 'pruneDagObjects':\n api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'allFuture':\n api_proxy.BOOL_TYPE, 'allGraphs': api_proxy.BOOL_TYPE, 'breadthFirst':\n api_proxy.BOOL_TYPE, 'future': api_proxy.BOOL_TYPE, 'futureLocalAttr':\n api_proxy.BOOL_TYPE, 'futureWorldAttr': api_proxy.BOOL_TYPE,\n 'groupLevels': api_proxy.BOOL_TYPE, 'historyAttr': api_proxy.BOOL_TYPE,\n 'interestLevel': api_proxy.INT_TYPE, 'leaf': api_proxy.BOOL_TYPE,\n 'levels': api_proxy.INT_TYPE, 'pruneDagObjects': api_proxy.BOOL_TYPE}})\n", (3493, 3985), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((4295, 4648), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'connections': api_proxy.BOOL_TYPE, 'destination':\n api_proxy.BOOL_TYPE, 'exactType': api_proxy.BOOL_TYPE, 'plugs':\n api_proxy.BOOL_TYPE, 'shapes': api_proxy.BOOL_TYPE,\n 'skipConversionNodes': api_proxy.BOOL_TYPE, 'source': api_proxy.\n BOOL_TYPE, 'type': api_proxy.STR_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'connections': api_proxy.BOOL_TYPE, 'destination': api_proxy.BOOL_TYPE,\n 'exactType': api_proxy.BOOL_TYPE, 'plugs': api_proxy.BOOL_TYPE,\n 'shapes': api_proxy.BOOL_TYPE, 'skipConversionNodes': api_proxy.\n BOOL_TYPE, 'source': api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE}})\n", (4316, 4648), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((4945, 5075), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'remove': api_proxy.BOOL_TYPE, 'query': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'remove':\n api_proxy.BOOL_TYPE, 'query': api_proxy.BOOL_TYPE}})\n", (4966, 5075), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((5281, 7195), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'affectsAppearance': api_proxy.BOOL_TYPE,\n 'affectsWorldspace': api_proxy.BOOL_TYPE, 'attributeType': api_proxy.\n BOOL_TYPE, 'cachedInternally': api_proxy.BOOL_TYPE, 'categories':\n api_proxy.BOOL_TYPE, 'channelBox': api_proxy.BOOL_TYPE, 'connectable':\n api_proxy.BOOL_TYPE, 'enum': api_proxy.BOOL_TYPE, 'exists': api_proxy.\n BOOL_TYPE, 'hidden': api_proxy.BOOL_TYPE, 'indeterminant': api_proxy.\n BOOL_TYPE, 'indexMatters': api_proxy.BOOL_TYPE, 'internal': api_proxy.\n BOOL_TYPE, 'internalGet': api_proxy.BOOL_TYPE, 'internalSet': api_proxy\n .BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE, 'listChildren': api_proxy.\n BOOL_TYPE, 'listDefault': api_proxy.BOOL_TYPE, 'listEnum': api_proxy.\n BOOL_TYPE, 'listParent': api_proxy.BOOL_TYPE, 'listSiblings': api_proxy\n .BOOL_TYPE, 'longName': api_proxy.BOOL_TYPE, 'maxExists': api_proxy.\n BOOL_TYPE, 'maximum': api_proxy.BOOL_TYPE, 'message': api_proxy.\n BOOL_TYPE, 'minExists': api_proxy.BOOL_TYPE, 'minimum': api_proxy.\n BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE, 'niceName': api_proxy.\n BOOL_TYPE, 'node': api_proxy.STR_TYPE, 'numberOfChildren': api_proxy.\n BOOL_TYPE, 'range': api_proxy.BOOL_TYPE, 'rangeExists': api_proxy.\n BOOL_TYPE, 'readable': api_proxy.BOOL_TYPE, 'renderSource': api_proxy.\n BOOL_TYPE, 'shortName': api_proxy.BOOL_TYPE, 'softMax': api_proxy.\n BOOL_TYPE, 'softMaxExists': api_proxy.BOOL_TYPE, 'softMin': api_proxy.\n BOOL_TYPE, 'softMinExists': api_proxy.BOOL_TYPE, 'softRange': api_proxy\n .BOOL_TYPE, 'softRangeExists': api_proxy.BOOL_TYPE, 'storable':\n api_proxy.BOOL_TYPE, 'type': api_proxy.STR_TYPE, 'typeExact': api_proxy\n .STR_TYPE, 'usedAsColor': api_proxy.BOOL_TYPE, 'usedAsFilename':\n api_proxy.BOOL_TYPE, 'usesMultiBuilder': api_proxy.BOOL_TYPE,\n 'worldspace': api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'affectsAppearance': api_proxy.BOOL_TYPE, 'affectsWorldspace':\n api_proxy.BOOL_TYPE, 'attributeType': api_proxy.BOOL_TYPE,\n 'cachedInternally': api_proxy.BOOL_TYPE, 'categories': api_proxy.\n BOOL_TYPE, 'channelBox': api_proxy.BOOL_TYPE, 'connectable': api_proxy.\n BOOL_TYPE, 'enum': api_proxy.BOOL_TYPE, 'exists': api_proxy.BOOL_TYPE,\n 'hidden': api_proxy.BOOL_TYPE, 'indeterminant': api_proxy.BOOL_TYPE,\n 'indexMatters': api_proxy.BOOL_TYPE, 'internal': api_proxy.BOOL_TYPE,\n 'internalGet': api_proxy.BOOL_TYPE, 'internalSet': api_proxy.BOOL_TYPE,\n 'keyable': api_proxy.BOOL_TYPE, 'listChildren': api_proxy.BOOL_TYPE,\n 'listDefault': api_proxy.BOOL_TYPE, 'listEnum': api_proxy.BOOL_TYPE,\n 'listParent': api_proxy.BOOL_TYPE, 'listSiblings': api_proxy.BOOL_TYPE,\n 'longName': api_proxy.BOOL_TYPE, 'maxExists': api_proxy.BOOL_TYPE,\n 'maximum': api_proxy.BOOL_TYPE, 'message': api_proxy.BOOL_TYPE,\n 'minExists': api_proxy.BOOL_TYPE, 'minimum': api_proxy.BOOL_TYPE,\n 'multi': api_proxy.BOOL_TYPE, 'niceName': api_proxy.BOOL_TYPE, 'node':\n api_proxy.STR_TYPE, 'numberOfChildren': api_proxy.BOOL_TYPE, 'range':\n api_proxy.BOOL_TYPE, 'rangeExists': api_proxy.BOOL_TYPE, 'readable':\n api_proxy.BOOL_TYPE, 'renderSource': api_proxy.BOOL_TYPE, 'shortName':\n api_proxy.BOOL_TYPE, 'softMax': api_proxy.BOOL_TYPE, 'softMaxExists':\n api_proxy.BOOL_TYPE, 'softMin': api_proxy.BOOL_TYPE, 'softMinExists':\n api_proxy.BOOL_TYPE, 'softRange': api_proxy.BOOL_TYPE,\n 'softRangeExists': api_proxy.BOOL_TYPE, 'storable': api_proxy.BOOL_TYPE,\n 'type': api_proxy.STR_TYPE, 'typeExact': api_proxy.STR_TYPE,\n 'usedAsColor': api_proxy.BOOL_TYPE, 'usedAsFilename': api_proxy.\n BOOL_TYPE, 'usesMultiBuilder': api_proxy.BOOL_TYPE, 'worldspace':\n api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}})\n", (5302, 7195), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((7983, 9043), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'caching': api_proxy.BOOL_TYPE, 'category': api_proxy.\n STR_TYPE, 'changedSinceFileOpen': api_proxy.BOOL_TYPE, 'channelBox':\n api_proxy.BOOL_TYPE, 'connectable': api_proxy.BOOL_TYPE, 'extension':\n api_proxy.BOOL_TYPE, 'fromPlugin': api_proxy.BOOL_TYPE, 'hasData':\n api_proxy.BOOL_TYPE, 'hasNullData': api_proxy.BOOL_TYPE, 'inUse':\n api_proxy.BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE, 'leaf': api_proxy.\n BOOL_TYPE, 'locked': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE,\n 'output': api_proxy.BOOL_TYPE, 'ramp': api_proxy.BOOL_TYPE, 'read':\n api_proxy.BOOL_TYPE, 'readOnly': api_proxy.BOOL_TYPE, 'scalar':\n api_proxy.BOOL_TYPE, 'scalarAndArray': api_proxy.BOOL_TYPE, 'settable':\n api_proxy.BOOL_TYPE, 'shortNames': api_proxy.BOOL_TYPE, 'string':\n api_proxy.STR_TYPE, 'unlocked': api_proxy.BOOL_TYPE, 'usedAsFilename':\n api_proxy.BOOL_TYPE, 'userDefined': api_proxy.BOOL_TYPE, 'visible':\n api_proxy.BOOL_TYPE, 'write': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'caching':\n api_proxy.BOOL_TYPE, 'category': api_proxy.STR_TYPE,\n 'changedSinceFileOpen': api_proxy.BOOL_TYPE, 'channelBox': api_proxy.\n BOOL_TYPE, 'connectable': api_proxy.BOOL_TYPE, 'extension': api_proxy.\n BOOL_TYPE, 'fromPlugin': api_proxy.BOOL_TYPE, 'hasData': api_proxy.\n BOOL_TYPE, 'hasNullData': api_proxy.BOOL_TYPE, 'inUse': api_proxy.\n BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE, 'leaf': api_proxy.BOOL_TYPE,\n 'locked': api_proxy.BOOL_TYPE, 'multi': api_proxy.BOOL_TYPE, 'output':\n api_proxy.BOOL_TYPE, 'ramp': api_proxy.BOOL_TYPE, 'read': api_proxy.\n BOOL_TYPE, 'readOnly': api_proxy.BOOL_TYPE, 'scalar': api_proxy.\n BOOL_TYPE, 'scalarAndArray': api_proxy.BOOL_TYPE, 'settable': api_proxy\n .BOOL_TYPE, 'shortNames': api_proxy.BOOL_TYPE, 'string': api_proxy.\n STR_TYPE, 'unlocked': api_proxy.BOOL_TYPE, 'usedAsFilename': api_proxy.\n BOOL_TYPE, 'userDefined': api_proxy.BOOL_TYPE, 'visible': api_proxy.\n BOOL_TYPE, 'write': api_proxy.BOOL_TYPE}})\n", (8004, 9043), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((9958, 10088), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'attribute': api_proxy.STR_TYPE, 'name': api_proxy.STR_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'attribute':\n api_proxy.STR_TYPE, 'name': api_proxy.STR_TYPE}})\n", (9979, 10088), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((10304, 10792), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'asString': api_proxy.BOOL_TYPE, 'caching': api_proxy.\n BOOL_TYPE, 'channelBox': api_proxy.BOOL_TYPE,\n 'expandEnvironmentVariables': api_proxy.BOOL_TYPE, 'keyable': api_proxy\n .BOOL_TYPE, 'lock': api_proxy.BOOL_TYPE, 'multiIndices': api_proxy.\n BOOL_TYPE, 'settable': api_proxy.BOOL_TYPE, 'silent': api_proxy.\n BOOL_TYPE, 'size': api_proxy.BOOL_TYPE, 'time': api_proxy.NUM_TYPE,\n 'type': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'asString':\n api_proxy.BOOL_TYPE, 'caching': api_proxy.BOOL_TYPE, 'channelBox':\n api_proxy.BOOL_TYPE, 'expandEnvironmentVariables': api_proxy.BOOL_TYPE,\n 'keyable': api_proxy.BOOL_TYPE, 'lock': api_proxy.BOOL_TYPE,\n 'multiIndices': api_proxy.BOOL_TYPE, 'settable': api_proxy.BOOL_TYPE,\n 'silent': api_proxy.BOOL_TYPE, 'size': api_proxy.BOOL_TYPE, 'time':\n api_proxy.NUM_TYPE, 'type': api_proxy.BOOL_TYPE}})\n", (10325, 10792), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((11281, 11655), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'alteredValue': api_proxy.BOOL_TYPE, 'caching': api_proxy.\n BOOL_TYPE, 'capacityHint': api_proxy.NUM_TYPE, 'channelBox': api_proxy.\n BOOL_TYPE, 'clamp': api_proxy.BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE,\n 'lock': api_proxy.BOOL_TYPE, 'size': api_proxy.NUM_TYPE, 'type':\n api_proxy.STR_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'alteredValue': api_proxy.BOOL_TYPE, 'caching': api_proxy.BOOL_TYPE,\n 'capacityHint': api_proxy.NUM_TYPE, 'channelBox': api_proxy.BOOL_TYPE,\n 'clamp': api_proxy.BOOL_TYPE, 'keyable': api_proxy.BOOL_TYPE, 'lock':\n api_proxy.BOOL_TYPE, 'size': api_proxy.NUM_TYPE, 'type': api_proxy.\n STR_TYPE}})\n", (11302, 11655), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((12040, 13412), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'attributeType': api_proxy.STR_TYPE, 'binaryTag': api_proxy\n .STR_TYPE, 'cachedInternally': api_proxy.BOOL_TYPE, 'category':\n api_proxy.STR_TYPE, 'dataType': api_proxy.STR_TYPE, 'defaultValue':\n api_proxy.NUM_TYPE, 'disconnectBehaviour': api_proxy.INT_TYPE,\n 'enumName': api_proxy.STR_TYPE, 'exists': api_proxy.BOOL_TYPE,\n 'fromPlugin': api_proxy.BOOL_TYPE, 'hasMaxValue': api_proxy.BOOL_TYPE,\n 'hasMinValue': api_proxy.BOOL_TYPE, 'hasSoftMaxValue': api_proxy.\n BOOL_TYPE, 'hasSoftMinValue': api_proxy.BOOL_TYPE, 'hidden': api_proxy.\n BOOL_TYPE, 'indexMatters': api_proxy.BOOL_TYPE,\n 'api_proxy.INT_TYPEernalSet': api_proxy.BOOL_TYPE, 'keyable': api_proxy\n .BOOL_TYPE, 'longName': api_proxy.STR_TYPE, 'maxValue': api_proxy.\n NUM_TYPE, 'minValue': api_proxy.NUM_TYPE, 'multi': api_proxy.BOOL_TYPE,\n 'niceName': api_proxy.STR_TYPE, 'numberOfChildren': api_proxy.INT_TYPE,\n 'parent': api_proxy.STR_TYPE, 'proxy': api_proxy.STR_TYPE, 'readable':\n api_proxy.BOOL_TYPE, 'shortName': api_proxy.STR_TYPE, 'softMaxValue':\n api_proxy.NUM_TYPE, 'softMinValue': api_proxy.NUM_TYPE, 'storable':\n api_proxy.BOOL_TYPE, 'usedAsColor': api_proxy.BOOL_TYPE,\n 'usedAsFilename': api_proxy.BOOL_TYPE, 'usedAsProxy': api_proxy.\n BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'attributeType': api_proxy.STR_TYPE, 'binaryTag': api_proxy.STR_TYPE,\n 'cachedInternally': api_proxy.BOOL_TYPE, 'category': api_proxy.STR_TYPE,\n 'dataType': api_proxy.STR_TYPE, 'defaultValue': api_proxy.NUM_TYPE,\n 'disconnectBehaviour': api_proxy.INT_TYPE, 'enumName': api_proxy.\n STR_TYPE, 'exists': api_proxy.BOOL_TYPE, 'fromPlugin': api_proxy.\n BOOL_TYPE, 'hasMaxValue': api_proxy.BOOL_TYPE, 'hasMinValue': api_proxy\n .BOOL_TYPE, 'hasSoftMaxValue': api_proxy.BOOL_TYPE, 'hasSoftMinValue':\n api_proxy.BOOL_TYPE, 'hidden': api_proxy.BOOL_TYPE, 'indexMatters':\n api_proxy.BOOL_TYPE, 'api_proxy.INT_TYPEernalSet': api_proxy.BOOL_TYPE,\n 'keyable': api_proxy.BOOL_TYPE, 'longName': api_proxy.STR_TYPE,\n 'maxValue': api_proxy.NUM_TYPE, 'minValue': api_proxy.NUM_TYPE, 'multi':\n api_proxy.BOOL_TYPE, 'niceName': api_proxy.STR_TYPE, 'numberOfChildren':\n api_proxy.INT_TYPE, 'parent': api_proxy.STR_TYPE, 'proxy': api_proxy.\n STR_TYPE, 'readable': api_proxy.BOOL_TYPE, 'shortName': api_proxy.\n STR_TYPE, 'softMaxValue': api_proxy.NUM_TYPE, 'softMinValue': api_proxy\n .NUM_TYPE, 'storable': api_proxy.BOOL_TYPE, 'usedAsColor': api_proxy.\n BOOL_TYPE, 'usedAsFilename': api_proxy.BOOL_TYPE, 'usedAsProxy':\n api_proxy.BOOL_TYPE, 'writable': api_proxy.BOOL_TYPE}})\n", (12061, 13412), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((14036, 14242), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'force': api_proxy.BOOL_TYPE, 'lock': api_proxy.BOOL_TYPE,\n 'nextAvailable': api_proxy.STR_TYPE, 'referenceDest': api_proxy.STR_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {'force':\n api_proxy.BOOL_TYPE, 'lock': api_proxy.BOOL_TYPE, 'nextAvailable':\n api_proxy.STR_TYPE, 'referenceDest': api_proxy.STR_TYPE}})\n", (14057, 14242), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((14529, 14637), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['api_proxy.DEFAULT_SCHEMA', "{'properties': {'nextAvailable': api_proxy.BOOL_TYPE}}"], {}), "(api_proxy.DEFAULT_SCHEMA, {'properties': {\n 'nextAvailable': api_proxy.BOOL_TYPE}})\n", (14550, 14637), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((14847, 14903), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', 'offset_schema'], {}), '(default_properties, offset_schema)\n', (14868, 14903), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((15118, 15192), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', 'offset_schema', 'cacheable_schema'], {}), '(default_properties, offset_schema, cacheable_schema)\n', (15139, 15192), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((15405, 15473), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', 'offset_schema', 'aim_schema'], {}), '(default_properties, offset_schema, aim_schema)\n', (15426, 15473), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((15680, 15818), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', "{'scaleCompensate': api_proxy.BOOL_TYPE, 'targetList': api_proxy.BOOL_TYPE}", 'offset_schema'], {}), "(default_properties, {'scaleCompensate': api_proxy.\n BOOL_TYPE, 'targetList': api_proxy.BOOL_TYPE}, offset_schema)\n", (15701, 15818), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((16214, 16415), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', "{'decompRotationToChild': api_proxy.BOOL_TYPE, 'skipRotate': api_proxy.\n STR_TYPE, 'skipTranslate': api_proxy.STR_TYPE}", 'offset_schema', 'cacheable_schema'], {}), "(default_properties, {'decompRotationToChild':\n api_proxy.BOOL_TYPE, 'skipRotate': api_proxy.STR_TYPE, 'skipTranslate':\n api_proxy.STR_TYPE}, offset_schema, cacheable_schema)\n", (16235, 16415), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((17003, 17056), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', 'aim_schema'], {}), '(default_properties, aim_schema)\n', (17024, 17056), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((17245, 17286), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties'], {}), '(default_properties)\n', (17266, 17286), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((17509, 17562), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties', 'aim_schema'], {}), '(default_properties, aim_schema)\n', (17530, 17562), True, 'import anvil.interfaces.api_proxy as api_proxy\n'), ((17758, 17799), 'anvil.interfaces.api_proxy.merge_dicts', 'api_proxy.merge_dicts', (['default_properties'], {}), '(default_properties)\n', (17779, 17799), True, 'import anvil.interfaces.api_proxy as api_proxy\n')]
|
"""
Django settings for angular_site project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import datetime, environ, os, ldap
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Set up environ vars
env = environ.Env(
ALLOWED_HOSTS=(list, ['127.0.0.1']),
)
environ.Env.read_env('.env')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
COMPRESS_ENABLED=True
ALLOWED_HOSTS = env('ALLOWED_HOSTS')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authentication',
'compressor',
'ldap',
'microblog',
'rest_framework'
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
}
JWT_AUTH = {
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(seconds=3600),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'angular_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'angular_site.wsgi.application'
# LDAP Auth
# https://django-auth-ldap.readthedocs.io/en/latest/
AUTHENTICATION_BACKENDS = [
'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
]
AUTH_LDAP_BIND_AS_AUTHENTICATING_USER=True
AUTH_LDAP_SERVER_URI = "ldap://10.10.10.1"
AUTH_LDAP_USER_DN_TEMPLATE = "uid=%(user)s,ou=People,dc=example,dc=org"
AUTH_LDAP_USER_ATTR_MAP = {"username": "uid", "first_name": "cn", "last_name": "sn", "email": "mail"}
AUTH_USER_MODEL = 'authentication.Account'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/opt/src/angular_site/staticfiles'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
|
[
"os.path.abspath",
"environ.Env.read_env",
"datetime.timedelta",
"os.path.join",
"environ.Env"
] |
[((519, 567), 'environ.Env', 'environ.Env', ([], {'ALLOWED_HOSTS': "(list, ['127.0.0.1'])"}), "(ALLOWED_HOSTS=(list, ['127.0.0.1']))\n", (530, 567), False, 'import datetime, environ, os, ldap\n'), ((579, 607), 'environ.Env.read_env', 'environ.Env.read_env', (['""".env"""'], {}), "('.env')\n", (599, 607), False, 'import datetime, environ, os, ldap\n'), ((1742, 1774), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (1760, 1774), False, 'import datetime, environ, os, ldap\n'), ((4380, 4412), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""static"""'], {}), "(BASE_DIR, 'static')\n", (4392, 4412), False, 'import datetime, environ, os, ldap\n'), ((462, 487), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (477, 487), False, 'import datetime, environ, os, ldap\n'), ((3431, 3467), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""db.sqlite3"""'], {}), "(BASE_DIR, 'db.sqlite3')\n", (3443, 3467), False, 'import datetime, environ, os, ldap\n')]
|
import re
from django.http import HttpResponse, Http404
from mauveinternet.markdown import get_models, get_model
# This needs to be added to the URL configuration as /admin/markdown/links
# so that the Javascript can find it.
def links(request):
"""Returns a list of the models and instances of that model
that can be linked to in the admin models page."""
if 'model' not in request.GET:
# output a JSON serialisation of the model list
return HttpResponse('LinkDialog.updateModels({%s});' % (u',\n'.join([u"'%s': '%s'" % (mname, vname.replace(u"'", u"\\'")) for mname, vname in get_models()])).encode('utf8'), content_type='application/javascript; charset=UTF-8')
else:
try:
model, queryset = get_model(request.GET['model'])
except ValueError:
raise Http404()
links = [(i.pk, unicode(i)) for i in queryset]
return HttpResponse('LinkDialog.updateInstances({%s})' %
(u',\n'.join([u"%d: '%s'" % (pk, title.replace("'", "\\'")) for pk, title in links])).encode('utf8'),
content_type='application/javascript; charset=UTF=8')
|
[
"mauveinternet.markdown.get_model",
"mauveinternet.markdown.get_models",
"django.http.Http404"
] |
[((747, 778), 'mauveinternet.markdown.get_model', 'get_model', (["request.GET['model']"], {}), "(request.GET['model'])\n", (756, 778), False, 'from mauveinternet.markdown import get_models, get_model\n'), ((824, 833), 'django.http.Http404', 'Http404', ([], {}), '()\n', (831, 833), False, 'from django.http import HttpResponse, Http404\n'), ((608, 620), 'mauveinternet.markdown.get_models', 'get_models', ([], {}), '()\n', (618, 620), False, 'from mauveinternet.markdown import get_models, get_model\n')]
|
"""
Quinitc Polynomials Planner
author: <NAME> (@Atsushi_twi)
Ref:
- [Local Path Planning And Motion Control For Agv In Positioning](http://ieeexplore.ieee.org/document/637936/)
"""
import numpy as np
import matplotlib.pyplot as plt
import math
# parameter
MAX_T = 100.0 # maximum time to the goal [s]
MIN_T = 5.0 # minimum time to the goal[s]
show_animation = True
class quinic_polynomial:
def __init__(self, xs, vxs, axs, xe, vxe, axe, T):
# calc coefficient of quinic polynomial
self.xs = xs
self.vxs = vxs
self.axs = axs
self.xe = xe
self.vxe = vxe
self.axe = axe
self.a0 = xs
self.a1 = vxs
self.a2 = axs / 2.0
A = np.array([[T**3, T**4, T**5],
[3 * T ** 2, 4 * T ** 3, 5 * T ** 4],
[6 * T, 12 * T ** 2, 20 * T ** 3]])
b = np.array([xe - self.a0 - self.a1 * T - self.a2 * T**2,
vxe - self.a1 - 2 * self.a2 * T,
axe - 2 * self.a2])
x = np.linalg.solve(A, b)
self.a3 = x[0]
self.a4 = x[1]
self.a5 = x[2]
def calc_point(self, t):
xt = self.a0 + self.a1 * t + self.a2 * t**2 + \
self.a3 * t**3 + self.a4 * t**4 + self.a5 * t**5
return xt
def calc_first_derivative(self, t):
xt = self.a1 + 2 * self.a2 * t + \
3 * self.a3 * t**2 + 4 * self.a4 * t**3 + 5 * self.a5 * t**4
return xt
def calc_second_derivative(self, t):
xt = 2 * self.a2 + 6 * self.a3 * t + 12 * self.a4 * t**2 + 20 * self.a5 * t**3
return xt
def calc_third_derivative(self, t):
xt = 6 * self.a3 + 24 * self.a4 * t + 60 * self.a5 * t**2
return xt
def quinic_polynomials_planner(sx, sy, syaw, sv, sa, gx, gy, gyaw, gv, ga, max_accel, max_jerk, dt):
"""
quinic polynomial planner
input
sx: start x position [m]
sy: start y position [m]
syaw: start yaw angle [rad]
sa: start accel [m/ss]
gx: goal x position [m]
gy: goal y position [m]
gyaw: goal yaw angle [rad]
ga: goal accel [m/ss]
max_accel: maximum accel [m/ss]
max_jerk: maximum jerk [m/sss]
dt: time tick [s]
return
time: time result
rx: x position result list
ry: y position result list
ryaw: yaw angle result list
rv: velocity result list
ra: accel result list
"""
vxs = sv * math.cos(syaw)
vys = sv * math.sin(syaw)
vxg = gv * math.cos(gyaw)
vyg = gv * math.sin(gyaw)
axs = sa * math.cos(syaw)
ays = sa * math.sin(syaw)
axg = ga * math.cos(gyaw)
ayg = ga * math.sin(gyaw)
for T in np.arange(MIN_T, MAX_T, MIN_T):
xqp = quinic_polynomial(sx, vxs, axs, gx, vxg, axg, T)
yqp = quinic_polynomial(sy, vys, ays, gy, vyg, ayg, T)
time, rx, ry, ryaw, rv, ra, rj = [], [], [], [], [], [], []
for t in np.arange(0.0, T + dt, dt):
time.append(t)
rx.append(xqp.calc_point(t))
ry.append(yqp.calc_point(t))
vx = xqp.calc_first_derivative(t)
vy = yqp.calc_first_derivative(t)
v = np.hypot(vx, vy)
yaw = math.atan2(vy, vx)
rv.append(v)
ryaw.append(yaw)
ax = xqp.calc_second_derivative(t)
ay = yqp.calc_second_derivative(t)
a = np.hypot(ax, ay)
if len(rv) >= 2 and rv[-1] - rv[-2] < 0.0:
a *= -1
ra.append(a)
jx = xqp.calc_third_derivative(t)
jy = yqp.calc_third_derivative(t)
j = np.hypot(jx, jy)
if len(ra) >= 2 and ra[-1] - ra[-2] < 0.0:
j *= -1
rj.append(j)
if max([abs(i) for i in ra]) <= max_accel and max([abs(i) for i in rj]) <= max_jerk:
print("find path!!")
break
if show_animation:
for i in range(len(rx)):
plt.cla()
plt.grid(True)
plt.axis("equal")
plot_arrow(sx, sy, syaw)
plot_arrow(gx, gy, gyaw)
plot_arrow(rx[i], ry[i], ryaw[i])
plt.title("Time[s]:" + str(time[i])[0:4] +
" v[m/s]:" + str(rv[i])[0:4] +
" a[m/ss]:" + str(ra[i])[0:4] +
" jerk[m/sss]:" + str(rj[i])[0:4],
)
plt.pause(0.001)
return time, rx, ry, ryaw, rv, ra, rj
def plot_arrow(x, y, yaw, length=1.0, width=0.5, fc="r", ec="k"):
"""
Plot arrow
"""
if not isinstance(x, float):
for (ix, iy, iyaw) in zip(x, y, yaw):
plot_arrow(ix, iy, iyaw)
else:
plt.arrow(x, y, length * math.cos(yaw), length * math.sin(yaw),
fc=fc, ec=ec, head_width=width, head_length=width)
plt.plot(x, y)
def main():
print(__file__ + " start!!")
sx = 10.0 # start x position [m]
sy = 10.0 # start y position [m]
syaw = math.radians(10.0) # start yaw angle [rad]
sv = 1.0 # start speed [m/s]
sa = 0.1 # start accel [m/ss]
gx = 30.0 # goal x position [m]
gy = -10.0 # goal y position [m]
gyaw = math.radians(20.0) # goal yaw angle [rad]
gv = 1.0 # goal speed [m/s]
ga = 0.1 # goal accel [m/ss]
max_accel = 1.0 # max accel [m/ss]
max_jerk = 0.5 # max jerk [m/sss]
dt = 0.1 # time tick [s]
time, x, y, yaw, v, a, j = quinic_polynomials_planner(
sx, sy, syaw, sv, sa, gx, gy, gyaw, gv, ga, max_accel, max_jerk, dt)
if show_animation:
plt.plot(x, y, "-r")
# plt.subplots()
# plt.plot(time, [math.degrees(i) for i in yaw], "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("Yaw[deg]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, v, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("Speed[m/s]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, a, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("accel[m/ss]")
# plt.grid(True)
#
# plt.subplots()
# plt.plot(time, j, "-r")
# plt.xlabel("Time[s]")
# plt.ylabel("jerk[m/sss]")
# plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.atan2",
"math.radians",
"matplotlib.pyplot.axis",
"math.sin",
"numpy.hypot",
"numpy.arange",
"math.cos",
"numpy.array",
"matplotlib.pyplot.cla",
"matplotlib.pyplot.pause",
"numpy.linalg.solve",
"matplotlib.pyplot.grid"
] |
[((2750, 2780), 'numpy.arange', 'np.arange', (['MIN_T', 'MAX_T', 'MIN_T'], {}), '(MIN_T, MAX_T, MIN_T)\n', (2759, 2780), True, 'import numpy as np\n'), ((5052, 5070), 'math.radians', 'math.radians', (['(10.0)'], {}), '(10.0)\n', (5064, 5070), False, 'import math\n'), ((5251, 5269), 'math.radians', 'math.radians', (['(20.0)'], {}), '(20.0)\n', (5263, 5269), False, 'import math\n'), ((727, 841), 'numpy.array', 'np.array', (['[[T ** 3, T ** 4, T ** 5], [3 * T ** 2, 4 * T ** 3, 5 * T ** 4], [6 * T, 12 *\n T ** 2, 20 * T ** 3]]'], {}), '([[T ** 3, T ** 4, T ** 5], [3 * T ** 2, 4 * T ** 3, 5 * T ** 4], [\n 6 * T, 12 * T ** 2, 20 * T ** 3]])\n', (735, 841), True, 'import numpy as np\n'), ((887, 1000), 'numpy.array', 'np.array', (['[xe - self.a0 - self.a1 * T - self.a2 * T ** 2, vxe - self.a1 - 2 * self.a2 *\n T, axe - 2 * self.a2]'], {}), '([xe - self.a0 - self.a1 * T - self.a2 * T ** 2, vxe - self.a1 - 2 *\n self.a2 * T, axe - 2 * self.a2])\n', (895, 1000), True, 'import numpy as np\n'), ((1051, 1072), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (1066, 1072), True, 'import numpy as np\n'), ((2510, 2524), 'math.cos', 'math.cos', (['syaw'], {}), '(syaw)\n', (2518, 2524), False, 'import math\n'), ((2540, 2554), 'math.sin', 'math.sin', (['syaw'], {}), '(syaw)\n', (2548, 2554), False, 'import math\n'), ((2570, 2584), 'math.cos', 'math.cos', (['gyaw'], {}), '(gyaw)\n', (2578, 2584), False, 'import math\n'), ((2600, 2614), 'math.sin', 'math.sin', (['gyaw'], {}), '(gyaw)\n', (2608, 2614), False, 'import math\n'), ((2631, 2645), 'math.cos', 'math.cos', (['syaw'], {}), '(syaw)\n', (2639, 2645), False, 'import math\n'), ((2661, 2675), 'math.sin', 'math.sin', (['syaw'], {}), '(syaw)\n', (2669, 2675), False, 'import math\n'), ((2691, 2705), 'math.cos', 'math.cos', (['gyaw'], {}), '(gyaw)\n', (2699, 2705), False, 'import math\n'), ((2721, 2735), 'math.sin', 'math.sin', (['gyaw'], {}), '(gyaw)\n', (2729, 2735), False, 'import math\n'), ((2995, 3021), 'numpy.arange', 'np.arange', (['(0.0)', '(T + dt)', 'dt'], {}), '(0.0, T + dt, dt)\n', (3004, 3021), True, 'import numpy as np\n'), ((4902, 4916), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4910, 4916), True, 'import matplotlib.pyplot as plt\n'), ((5639, 5659), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""-r"""'], {}), "(x, y, '-r')\n", (5647, 5659), True, 'import matplotlib.pyplot as plt\n'), ((6333, 6343), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6341, 6343), True, 'import matplotlib.pyplot as plt\n'), ((3241, 3257), 'numpy.hypot', 'np.hypot', (['vx', 'vy'], {}), '(vx, vy)\n', (3249, 3257), True, 'import numpy as np\n'), ((3276, 3294), 'math.atan2', 'math.atan2', (['vy', 'vx'], {}), '(vy, vx)\n', (3286, 3294), False, 'import math\n'), ((3460, 3476), 'numpy.hypot', 'np.hypot', (['ax', 'ay'], {}), '(ax, ay)\n', (3468, 3476), True, 'import numpy as np\n'), ((3690, 3706), 'numpy.hypot', 'np.hypot', (['jx', 'jy'], {}), '(jx, jy)\n', (3698, 3706), True, 'import numpy as np\n'), ((4025, 4034), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (4032, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4047, 4061), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4055, 4061), True, 'import matplotlib.pyplot as plt\n'), ((4074, 4091), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4082, 4091), True, 'import matplotlib.pyplot as plt\n'), ((4467, 4483), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (4476, 4483), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4799), 'math.cos', 'math.cos', (['yaw'], {}), '(yaw)\n', (4794, 4799), False, 'import math\n'), ((4810, 4823), 'math.sin', 'math.sin', (['yaw'], {}), '(yaw)\n', (4818, 4823), False, 'import math\n')]
|
import turtle
turtle.mode("logo")
turtle.shape("turtle")
turtle.bgcolor("black")
turtle.pensize(7)
turtle.colormode(255)
turtle.pencolor(157, 208, 228)
for i in range(4):
turtle.pu()
turtle.setpos(30*i, 0)
turtle.pd()
turtle.forward(80)
turtle.done()
|
[
"turtle.shape",
"turtle.pensize",
"turtle.mode",
"turtle.done",
"turtle.colormode",
"turtle.pd",
"turtle.forward",
"turtle.pu",
"turtle.setpos",
"turtle.bgcolor",
"turtle.pencolor"
] |
[((15, 34), 'turtle.mode', 'turtle.mode', (['"""logo"""'], {}), "('logo')\n", (26, 34), False, 'import turtle\n'), ((35, 57), 'turtle.shape', 'turtle.shape', (['"""turtle"""'], {}), "('turtle')\n", (47, 57), False, 'import turtle\n'), ((58, 81), 'turtle.bgcolor', 'turtle.bgcolor', (['"""black"""'], {}), "('black')\n", (72, 81), False, 'import turtle\n'), ((82, 99), 'turtle.pensize', 'turtle.pensize', (['(7)'], {}), '(7)\n', (96, 99), False, 'import turtle\n'), ((100, 121), 'turtle.colormode', 'turtle.colormode', (['(255)'], {}), '(255)\n', (116, 121), False, 'import turtle\n'), ((122, 152), 'turtle.pencolor', 'turtle.pencolor', (['(157)', '(208)', '(228)'], {}), '(157, 208, 228)\n', (137, 152), False, 'import turtle\n'), ((258, 271), 'turtle.done', 'turtle.done', ([], {}), '()\n', (269, 271), False, 'import turtle\n'), ((177, 188), 'turtle.pu', 'turtle.pu', ([], {}), '()\n', (186, 188), False, 'import turtle\n'), ((194, 218), 'turtle.setpos', 'turtle.setpos', (['(30 * i)', '(0)'], {}), '(30 * i, 0)\n', (207, 218), False, 'import turtle\n'), ((222, 233), 'turtle.pd', 'turtle.pd', ([], {}), '()\n', (231, 233), False, 'import turtle\n'), ((239, 257), 'turtle.forward', 'turtle.forward', (['(80)'], {}), '(80)\n', (253, 257), False, 'import turtle\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-27 17:09
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('household', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='EnergieType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='e.g. Gas, Water', max_length=256, unique=True, verbose_name='energie type')),
('e_unit', models.CharField(help_text='e.g. KW', max_length=128, verbose_name='energy unit')),
],
options={
'verbose_name_plural': 'EnergieTypes',
},
),
migrations.CreateModel(
name='MeterReading',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('meter_reading', models.FloatField(verbose_name='meter reading')),
('ts', models.DateTimeField(verbose_name='timestamp measurement')),
('household', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='household.Household')),
('meter_register', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MeterType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='e.g. day counter elektricity', max_length=256, verbose_name='meter type')),
('variant', models.CharField(default='standard', max_length=256, verbose_name='variant')),
('color_whole', models.CharField(default='White digits on black blackground', max_length=128, verbose_name='color whole meter part')),
('max_whole', models.IntegerField(default=6, verbose_name='maximum number of figures whole meter part')),
('color_fraction', models.CharField(blank=True, default='White digits on black blackground', max_length=128, null=True, verbose_name='Color fraction figure')),
('max_fraction', models.IntegerField(default=4, verbose_name='maximum number of figures fraction part')),
('photo', models.ImageField(blank=True, null=True, upload_to='', verbose_name='photo meter (200x200)')),
('energie_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meter.EnergieType')),
],
),
migrations.AddField(
model_name='meterreading',
name='meter_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='meter.MeterType'),
),
migrations.AlterUniqueTogether(
name='meterreading',
unique_together=set([('ts', 'household', 'meter_type')]),
),
]
|
[
"django.db.migrations.swappable_dependency",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.FloatField",
"django.db.models.AutoField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((311, 368), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (342, 368), False, 'from django.db import migrations, models\n'), ((3031, 3120), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""meter.MeterType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'meter.MeterType')\n", (3048, 3120), False, 'from django.db import migrations, models\n'), ((543, 636), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (559, 636), False, 'from django.db import migrations, models\n'), ((660, 767), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""e.g. Gas, Water"""', 'max_length': '(256)', 'unique': '(True)', 'verbose_name': '"""energie type"""'}), "(help_text='e.g. Gas, Water', max_length=256, unique=True,\n verbose_name='energie type')\n", (676, 767), False, 'from django.db import migrations, models\n'), ((793, 879), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""e.g. KW"""', 'max_length': '(128)', 'verbose_name': '"""energy unit"""'}), "(help_text='e.g. KW', max_length=128, verbose_name=\n 'energy unit')\n", (809, 879), False, 'from django.db import migrations, models\n'), ((1104, 1197), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1120, 1197), False, 'from django.db import migrations, models\n'), ((1230, 1277), 'django.db.models.FloatField', 'models.FloatField', ([], {'verbose_name': '"""meter reading"""'}), "(verbose_name='meter reading')\n", (1247, 1277), False, 'from django.db import migrations, models\n'), ((1303, 1361), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'verbose_name': '"""timestamp measurement"""'}), "(verbose_name='timestamp measurement')\n", (1323, 1361), False, 'from django.db import migrations, models\n'), ((1394, 1487), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""household.Household"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'household.Household')\n", (1411, 1487), False, 'from django.db import migrations, models\n'), ((1520, 1616), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': 'settings.AUTH_USER_MODEL'}), '(on_delete=django.db.models.deletion.CASCADE, to=settings.\n AUTH_USER_MODEL)\n', (1537, 1616), False, 'from django.db import migrations, models\n'), ((1746, 1839), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1762, 1839), False, 'from django.db import migrations, models\n'), ((1863, 1968), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""e.g. day counter elektricity"""', 'max_length': '(256)', 'verbose_name': '"""meter type"""'}), "(help_text='e.g. day counter elektricity', max_length=256,\n verbose_name='meter type')\n", (1879, 1968), False, 'from django.db import migrations, models\n'), ((1995, 2071), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""standard"""', 'max_length': '(256)', 'verbose_name': '"""variant"""'}), "(default='standard', max_length=256, verbose_name='variant')\n", (2011, 2071), False, 'from django.db import migrations, models\n'), ((2106, 2227), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""White digits on black blackground"""', 'max_length': '(128)', 'verbose_name': '"""color whole meter part"""'}), "(default='White digits on black blackground', max_length=\n 128, verbose_name='color whole meter part')\n", (2122, 2227), False, 'from django.db import migrations, models\n'), ((2255, 2349), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(6)', 'verbose_name': '"""maximum number of figures whole meter part"""'}), "(default=6, verbose_name=\n 'maximum number of figures whole meter part')\n", (2274, 2349), False, 'from django.db import migrations, models\n'), ((2382, 2524), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""White digits on black blackground"""', 'max_length': '(128)', 'null': '(True)', 'verbose_name': '"""Color fraction figure"""'}), "(blank=True, default='White digits on black blackground',\n max_length=128, null=True, verbose_name='Color fraction figure')\n", (2398, 2524), False, 'from django.db import migrations, models\n'), ((2556, 2647), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(4)', 'verbose_name': '"""maximum number of figures fraction part"""'}), "(default=4, verbose_name=\n 'maximum number of figures fraction part')\n", (2575, 2647), False, 'from django.db import migrations, models\n'), ((2671, 2768), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '""""""', 'verbose_name': '"""photo meter (200x200)"""'}), "(blank=True, null=True, upload_to='', verbose_name=\n 'photo meter (200x200)')\n", (2688, 2768), False, 'from django.db import migrations, models\n'), ((2799, 2890), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""meter.EnergieType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'meter.EnergieType')\n", (2816, 2890), False, 'from django.db import migrations, models\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypothesis import assume, given
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class DropoutTest(hu.HypothesisTestCase):
@given(X=hu.tensor(),
in_place=st.booleans(),
ratio=st.floats(0, 0.999),
**mu.gcs)
def test_dropout_is_test(self, X, in_place, ratio, gc, dc):
"""Test with is_test=True for a deterministic reference impl."""
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'],
ratio=ratio, is_test=True)
self.assertDeviceChecks(dc, op, [X], [0])
# No sense in checking gradients for test phase
def reference_dropout_test(x):
return x, np.ones(x.shape, dtype=np.bool)
self.assertReferenceChecks(
gc, op, [X], reference_dropout_test,
# The 'mask' output may be uninitialized
outputs_to_check=[0])
@given(X=hu.tensor(),
in_place=st.booleans(),
output_mask=st.booleans(),
**mu.gcs)
@unittest.skipIf(True, "Skip duo to different rand seed.")
def test_dropout_ratio0(self, X, in_place, output_mask, gc, dc):
"""Test with ratio=0 for a deterministic reference impl."""
is_test = not output_mask
op = core.CreateOperator('Dropout', ['X'],
['X' if in_place else 'Y'] +
(['mask'] if output_mask else []),
ratio=0.0, is_test=is_test)
self.assertDeviceChecks(dc, op, [X], [0])
def reference_dropout_ratio0(x):
return (x,) if is_test else (x, np.ones(x.shape, dtype=np.bool))
self.assertReferenceChecks(
gc, op, [X], reference_dropout_ratio0, outputs_to_check=[0])
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"unittest.skipIf",
"caffe2.python.hypothesis_test_util.tensor",
"numpy.ones",
"hypothesis.strategies.booleans",
"caffe2.python.core.CreateOperator",
"hypothesis.strategies.floats"
] |
[((430, 495), 'unittest.skipIf', 'unittest.skipIf', (['(not workspace.C.use_mkldnn)', '"""No MKLDNN support."""'], {}), "(not workspace.C.use_mkldnn, 'No MKLDNN support.')\n", (445, 495), False, 'import unittest\n'), ((1467, 1524), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""Skip duo to different rand seed."""'], {}), "(True, 'Skip duo to different rand seed.')\n", (1482, 1524), False, 'import unittest\n'), ((2249, 2264), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2262, 2264), False, 'import unittest\n'), ((809, 906), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Dropout"""', "['X']", "['X' if in_place else 'Y']"], {'ratio': 'ratio', 'is_test': '(True)'}), "('Dropout', ['X'], ['X' if in_place else 'Y'], ratio=\n ratio, is_test=True)\n", (828, 906), False, 'from caffe2.python import core, workspace\n'), ((1709, 1843), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Dropout"""', "['X']", "(['X' if in_place else 'Y'] + (['mask'] if output_mask else []))"], {'ratio': '(0.0)', 'is_test': 'is_test'}), "('Dropout', ['X'], ['X' if in_place else 'Y'] + (['mask'\n ] if output_mask else []), ratio=0.0, is_test=is_test)\n", (1728, 1843), False, 'from caffe2.python import core, workspace\n'), ((552, 563), 'caffe2.python.hypothesis_test_util.tensor', 'hu.tensor', ([], {}), '()\n', (561, 563), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((585, 598), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (596, 598), True, 'import hypothesis.strategies as st\n'), ((617, 636), 'hypothesis.strategies.floats', 'st.floats', (['(0)', '(0.999)'], {}), '(0, 0.999)\n', (626, 636), True, 'import hypothesis.strategies as st\n'), ((1355, 1366), 'caffe2.python.hypothesis_test_util.tensor', 'hu.tensor', ([], {}), '()\n', (1364, 1366), True, 'import caffe2.python.hypothesis_test_util as hu\n'), ((1388, 1401), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1399, 1401), True, 'import hypothesis.strategies as st\n'), ((1426, 1439), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (1437, 1439), True, 'import hypothesis.strategies as st\n'), ((1137, 1168), 'numpy.ones', 'np.ones', (['x.shape'], {'dtype': 'np.bool'}), '(x.shape, dtype=np.bool)\n', (1144, 1168), True, 'import numpy as np\n'), ((2075, 2106), 'numpy.ones', 'np.ones', (['x.shape'], {'dtype': 'np.bool'}), '(x.shape, dtype=np.bool)\n', (2082, 2106), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
"""Tests the Netstat client action."""
from absl import app
from grr_response_client.client_actions import network
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr.test_lib import client_test_lib
from grr.test_lib import test_lib
class NetstatActionTest(client_test_lib.EmptyActionTest):
"""Tests the Netstat client action."""
def testListNetworkConnections(self):
result = self.RunAction(
network.ListNetworkConnections,
arg=rdf_client_action.ListNetworkConnectionsArgs())
for r in result:
self.assertTrue(r.process_name)
self.assertTrue(r.local_address)
def testListNetworkConnectionsFilter(self):
result = self.RunAction(
network.ListNetworkConnections,
arg=rdf_client_action.ListNetworkConnectionsArgs(listening_only=True))
for r in result:
self.assertTrue(r.process_name)
self.assertTrue(r.local_address)
self.assertEqual(r.state, "LISTEN")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
[
"grr_response_core.lib.rdfvalues.client_action.ListNetworkConnectionsArgs",
"absl.app.run",
"grr.test_lib.test_lib.main"
] |
[((1017, 1036), 'grr.test_lib.test_lib.main', 'test_lib.main', (['argv'], {}), '(argv)\n', (1030, 1036), False, 'from grr.test_lib import test_lib\n'), ((1068, 1081), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1075, 1081), False, 'from absl import app\n'), ((516, 562), 'grr_response_core.lib.rdfvalues.client_action.ListNetworkConnectionsArgs', 'rdf_client_action.ListNetworkConnectionsArgs', ([], {}), '()\n', (560, 562), True, 'from grr_response_core.lib.rdfvalues import client_action as rdf_client_action\n'), ((790, 855), 'grr_response_core.lib.rdfvalues.client_action.ListNetworkConnectionsArgs', 'rdf_client_action.ListNetworkConnectionsArgs', ([], {'listening_only': '(True)'}), '(listening_only=True)\n', (834, 855), True, 'from grr_response_core.lib.rdfvalues import client_action as rdf_client_action\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import textwrap
import tokenize
from collections import deque
from dataclasses import dataclass
from enum import Enum
from io import BytesIO
from typing import Iterable, Mapping, Optional, Sequence
from fixit.common.line_mapping import LineMappingInfo
BODY_PREFIX = "# lint:"
BODY_PREFIX_WITH_SPACE = f"{BODY_PREFIX} "
MAX_LINES_PLACEHOLDER = " ..."
DEFAULT_CODE_WIDTH = 88
DEFAULT_MIN_COMMENT_WIDTH = 40
class SuppressionCommentKind(Enum):
IGNORE = "lint-ignore"
FIXME = "lint-fixme"
@dataclass(frozen=True, order=True)
class SuppressionComment:
"""
Represents a `# lint-fixme` or `# lint-ignore` comment that prevents the linter from
firing on the subsequent line. If the `message` is too long, the comment is wrapped
to fill multiple lines.
Wrapped lines are prefixed with `# lint:`, which allows us to figure out where a
suppression comment starts and ends, so we can automate removal of unused multiline
comments.
At some point in the future, we may extend this to support `# lint-ignore` comments
too.
"""
kind: SuppressionCommentKind
before_line: int # 1-indexed
code: str
message: Optional[str] = None
max_lines: int = 3
def to_lines(self, width: int) -> Sequence[str]:
message = self.message
if message is None:
return [f"# {self.kind.value}: {self.code}"]
# chunk the message up splitting by newlines
raw_message_lines = message.split("\n")
lines = []
lines.extend(
textwrap.wrap(
f"# {self.kind.value}: {self.code}: " + raw_message_lines[0],
width=width,
subsequent_indent=BODY_PREFIX_WITH_SPACE,
)
)
# textwrap replaces newlines (`\n`) with a space. This isn't the behavior we
# want, so we need to wrap each line independently.
for rml in raw_message_lines[1:]:
if rml == "":
lines.append(BODY_PREFIX)
else:
lines.extend(
textwrap.wrap(
rml,
width=width,
initial_indent=BODY_PREFIX_WITH_SPACE,
subsequent_indent=BODY_PREFIX_WITH_SPACE,
)
)
# Unfortunately our custom handling of newlines also means that we can't use
# textwrap's `max_lines` and `placeholder` features, and we have to do it
# ourselves.
if len(lines) > self.max_lines:
lines = lines[: self.max_lines]
last_line = lines[-1]
# keep removing words from the end until we have room for our placeholder
while last_line and len(last_line) > (width - len(MAX_LINES_PLACEHOLDER)):
# this must remove at least one character each time, otherwise we could
# get stuck in an infinite loop
last_line = re.sub(r"(\s*\S+|\s+)\Z", "", last_line)
# if we removed too much, add the `# lint:` prefix back
if len(last_line) < len(BODY_PREFIX):
last_line = BODY_PREFIX
last_line += MAX_LINES_PLACEHOLDER
lines[-1] = last_line
return lines
@dataclass(frozen=True)
class InsertSuppressionsResult:
# An updated sequence of lines with included newlines. These lines can be joined to
# generate the resulting source code.
updated_source: bytes
# It may not be possible to insert a comment where one was needed (e.g. we can't
# insert a comment inside a multiline string). In those cases, we need to mark these
# attempted insertions as failures.
failed_insertions: Sequence[SuppressionComment]
def _get_indentations(tokens: Iterable[tokenize.TokenInfo]) -> Mapping[int, str]:
"""
Maps logical lines to their indentations.
This only works for logical lines because logical lines are the only lines with
measurable indentation.
"""
result = {}
# We're 1-indexed, so 0 isn't a real line. The ENCODING dummy token uses it, but we
# want to skip ENCODING.
prev_line = 0
for tok in tokens:
if tok.type in (tokenize.INDENT, tokenize.DEDENT):
# These dummy tokens have the wrong start column and always exist alongside
# a non-dummy token. Skip them.
continue
# we only want to know about the first token on each unique line
if tok.start[0] != prev_line:
result[tok.start[0]] = tok.line[: tok.start[1]]
prev_line = tok.start[0]
return result
def insert_suppressions(
source: bytes,
comments: Iterable[SuppressionComment],
*,
code_width: int = DEFAULT_CODE_WIDTH,
min_comment_width: int = DEFAULT_MIN_COMMENT_WIDTH,
) -> InsertSuppressionsResult:
"""
Given an iterable of `lines`, forms a new sequence of lines with `comments`
inserted.
"""
encoding = tokenize.detect_encoding(BytesIO(source).readline)[0]
tokens = tuple(tokenize.tokenize(BytesIO(source).readline))
indentations = _get_indentations(tokens)
physical_to_logical = LineMappingInfo.compute(tokens=tokens).physical_to_logical
comments_queue = deque(sorted(comments)) # sort by line number
updated_lines = []
for line_number, line_bytes in enumerate(BytesIO(source).readlines(), start=1):
while comments_queue:
target_line = physical_to_logical[comments_queue[0].before_line]
if target_line == line_number:
indent = indentations[line_number]
width = max(code_width - len(indent), min_comment_width)
for line in comments_queue.popleft().to_lines(width):
updated_lines.append(f"{indent}{line}\n".encode(encoding))
else:
break
updated_lines.append(line_bytes)
return InsertSuppressionsResult(
updated_source=b"".join(updated_lines), failed_insertions=tuple(comments_queue)
)
|
[
"io.BytesIO",
"fixit.common.line_mapping.LineMappingInfo.compute",
"textwrap.wrap",
"re.sub",
"dataclasses.dataclass"
] |
[((689, 723), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)', 'order': '(True)'}), '(frozen=True, order=True)\n', (698, 723), False, 'from dataclasses import dataclass\n'), ((3449, 3471), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (3458, 3471), False, 'from dataclasses import dataclass\n'), ((5338, 5376), 'fixit.common.line_mapping.LineMappingInfo.compute', 'LineMappingInfo.compute', ([], {'tokens': 'tokens'}), '(tokens=tokens)\n', (5361, 5376), False, 'from fixit.common.line_mapping import LineMappingInfo\n'), ((1722, 1856), 'textwrap.wrap', 'textwrap.wrap', (["(f'# {self.kind.value}: {self.code}: ' + raw_message_lines[0])"], {'width': 'width', 'subsequent_indent': 'BODY_PREFIX_WITH_SPACE'}), "(f'# {self.kind.value}: {self.code}: ' + raw_message_lines[0],\n width=width, subsequent_indent=BODY_PREFIX_WITH_SPACE)\n", (1735, 1856), False, 'import textwrap\n'), ((3144, 3187), 're.sub', 're.sub', (['"""(\\\\s*\\\\S+|\\\\s+)\\\\Z"""', '""""""', 'last_line'], {}), "('(\\\\s*\\\\S+|\\\\s+)\\\\Z', '', last_line)\n", (3150, 3187), False, 'import re\n'), ((5174, 5189), 'io.BytesIO', 'BytesIO', (['source'], {}), '(source)\n', (5181, 5189), False, 'from io import BytesIO\n'), ((5240, 5255), 'io.BytesIO', 'BytesIO', (['source'], {}), '(source)\n', (5247, 5255), False, 'from io import BytesIO\n'), ((5534, 5549), 'io.BytesIO', 'BytesIO', (['source'], {}), '(source)\n', (5541, 5549), False, 'from io import BytesIO\n'), ((2250, 2366), 'textwrap.wrap', 'textwrap.wrap', (['rml'], {'width': 'width', 'initial_indent': 'BODY_PREFIX_WITH_SPACE', 'subsequent_indent': 'BODY_PREFIX_WITH_SPACE'}), '(rml, width=width, initial_indent=BODY_PREFIX_WITH_SPACE,\n subsequent_indent=BODY_PREFIX_WITH_SPACE)\n', (2263, 2366), False, 'import textwrap\n')]
|
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.6.4'
from . import layers
from . import preprocessing
from . import utils
from . import data
from . import initializers
from . import losses
from . import metrics
from . import eval_metrics
from . import regularizers
from . import hyopt
from . import optimizers
from .legacy.get_data import prepare_data
# Add all the custom objects to keras
from keras.utils.generic_utils import get_custom_objects
custom_objects_modules = [initializers, metrics, regularizers, layers,
losses, optimizers]
custom_objects = {}
for mod in custom_objects_modules:
for f in mod.AVAILABLE:
get_custom_objects()[f] = mod.get(f)
custom_objects = mod.get(f)
# remove variables from the scope
del get_custom_objects
# Setup logging
import logging
log_formatter = \
logging.Formatter('%(levelname)s:%(asctime)s:%(name)s] %(message)s')
_logger = logging.getLogger('concise')
_handler = logging.StreamHandler()
_handler.setLevel(logging.DEBUG)
_handler.setFormatter(log_formatter)
_logger.setLevel(logging.DEBUG)
_logger.addHandler(_handler)
|
[
"logging.Formatter",
"logging.StreamHandler",
"keras.utils.generic_utils.get_custom_objects",
"logging.getLogger"
] |
[((883, 951), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s:%(asctime)s:%(name)s] %(message)s"""'], {}), "('%(levelname)s:%(asctime)s:%(name)s] %(message)s')\n", (900, 951), False, 'import logging\n'), ((962, 990), 'logging.getLogger', 'logging.getLogger', (['"""concise"""'], {}), "('concise')\n", (979, 990), False, 'import logging\n'), ((1002, 1025), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1023, 1025), False, 'import logging\n'), ((696, 716), 'keras.utils.generic_utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (714, 716), False, 'from keras.utils.generic_utils import get_custom_objects\n')]
|
# TODO: remove this once WPILib is public, and use the real thing
import sys
import pytest
from unittest.mock import MagicMock
def pytest_runtest_setup():
pass
def pytest_runtest_teardown():
pass
@pytest.fixture(scope="function")
def wpimock(monkeypatch):
mock = MagicMock(name="wpimock")
monkeypatch.setitem(sys.modules, "wpilib", mock)
return mock
@pytest.fixture(scope="function")
def wpitime():
import hal.simulation
class FakeTime:
def step(self, seconds):
delta = int(seconds * 1000000)
hal.simulation.stepTimingAsync(delta)
hal.simulation.pauseTiming()
hal.simulation.restartTiming()
yield FakeTime()
hal.simulation.resumeTiming()
@pytest.fixture(scope="function")
def hal(wpitime):
import hal.simulation
yield
# Reset the HAL handles
hal.simulation.resetGlobalHandles()
# Reset the HAL data
hal.simulation.resetAllData()
|
[
"pytest.fixture",
"unittest.mock.MagicMock"
] |
[((213, 245), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (227, 245), False, 'import pytest\n'), ((381, 413), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (395, 413), False, 'import pytest\n'), ((731, 763), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (745, 763), False, 'import pytest\n'), ((283, 308), 'unittest.mock.MagicMock', 'MagicMock', ([], {'name': '"""wpimock"""'}), "(name='wpimock')\n", (292, 308), False, 'from unittest.mock import MagicMock\n')]
|
from threading import Thread, current_thread, RLock
from typing import NamedTuple, Callable, List, Optional
from utils.libs.chatbridge.common.logger import ChatBridgeLogger
from .cryptor import AESCryptor
class Address(NamedTuple):
hostname: str
port: int
def __str__(self):
return '{}:{}'.format(self.hostname, self.port)
class ChatBridgeBase:
def __init__(self, name: str, aes_key: str):
super().__init__()
self.__name = name
self.logger = ChatBridgeLogger(self.get_logging_name(), file_name=self.get_logging_file_name())
self.aes_key = aes_key
self._cryptor = AESCryptor(aes_key)
self.__thread_run: Optional[Thread] = None
self.__thread_run_lock = RLock()
def get_name(self) -> str:
return self.__name
def get_logging_name(self) -> str:
return self.get_name()
def get_logging_file_name(self) -> Optional[str]:
"""
None for no file handler
"""
return type(self).__name__
def _start_thread(self, target: Callable, name: str) -> Thread:
thread = Thread(target=target, args=(), name=name, daemon=True)
thread.start()
self.logger.debug('Started thread {}: {}'.format(name, thread))
return thread
@classmethod
def _get_main_loop_thread_name(cls):
return 'MainLoop'
def start(self):
def func():
self._main_loop()
self.logger.close_file()
with self.__thread_run_lock:
self.__thread_run = None
with self.__thread_run_lock:
if self.__thread_run is not None:
raise RuntimeError('Already running')
self.__thread_run = self._start_thread(func, self._get_main_loop_thread_name())
def stop(self):
"""
Stop the client/server, and wait until the MainLoop thread exits
Need to be called on a non-MainLoop thread
"""
self.logger.debug('Joining MainLoop thread')
with self.__thread_run_lock:
thread = self.__thread_run
if thread is not None:
if thread is not current_thread():
thread.join()
else:
self.logger.warning('Joining current thread {}'.format(thread))
self.logger.debug('Joined MainLoop thread')
def _main_loop(self):
pass
|
[
"threading.Thread",
"threading.RLock",
"threading.current_thread"
] |
[((677, 684), 'threading.RLock', 'RLock', ([], {}), '()\n', (682, 684), False, 'from threading import Thread, current_thread, RLock\n'), ((994, 1048), 'threading.Thread', 'Thread', ([], {'target': 'target', 'args': '()', 'name': 'name', 'daemon': '(True)'}), '(target=target, args=(), name=name, daemon=True)\n', (1000, 1048), False, 'from threading import Thread, current_thread, RLock\n'), ((1853, 1869), 'threading.current_thread', 'current_thread', ([], {}), '()\n', (1867, 1869), False, 'from threading import Thread, current_thread, RLock\n')]
|
import torch
import torch.nn as nn
from torchvision import models
import os
class MRnet(nn.Module):
"""MRnet uses pretrained resnet50 as a backbone to extract features, this is multilabel classifying model
"""
def __init__(self): # add conf file
super(MRnet,self).__init__()
# init three backbones for three axis
self.axial = models.alexnet(pretrained=True).features
self.coronal = models.alexnet(pretrained=True).features
self.saggital = models.alexnet(pretrained=True).features
self.pool_axial = nn.AdaptiveAvgPool2d(1)
self.pool_coronal = nn.AdaptiveAvgPool2d(1)
self.pool_saggital = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_features=3*256,out_features=3)
)
def forward(self,x):
""" Input is given in the form of `[image1, image2, image3]` where
`image1 = [1, slices, 3, 224, 224]`. Note that `1` is due to the
dataloader assigning it a single batch.
"""
# squeeze the first dimension as there
# is only one patient in each batch
images = [torch.squeeze(img, dim=0) for img in x]
image1 = self.axial(images[0])
image2 = self.coronal(images[1])
image3 = self.saggital(images[2])
image1 = self.pool_axial(image1).view(image1.size(0), -1)
image2 = self.pool_coronal(image2).view(image2.size(0), -1)
image3 = self.pool_saggital(image3).view(image3.size(0), -1)
image1 = torch.max(image1,dim=0,keepdim=True)[0]
image2 = torch.max(image2,dim=0,keepdim=True)[0]
image3 = torch.max(image3,dim=0,keepdim=True)[0]
output = torch.cat([image1,image2,image3], dim=1)
output = self.fc(output)
return output
def _load_wieghts(self):
"""load pretrained weights"""
pass
|
[
"torch.nn.AdaptiveAvgPool2d",
"torchvision.models.alexnet",
"torch.cat",
"torch.squeeze",
"torch.max",
"torch.nn.Linear"
] |
[((567, 590), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (587, 590), True, 'import torch.nn as nn\n'), ((619, 642), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (639, 642), True, 'import torch.nn as nn\n'), ((672, 695), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (692, 695), True, 'import torch.nn as nn\n'), ((1699, 1741), 'torch.cat', 'torch.cat', (['[image1, image2, image3]'], {'dim': '(1)'}), '([image1, image2, image3], dim=1)\n', (1708, 1741), False, 'import torch\n'), ((370, 401), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (384, 401), False, 'from torchvision import models\n'), ((434, 465), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (448, 465), False, 'from torchvision import models\n'), ((499, 530), 'torchvision.models.alexnet', 'models.alexnet', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (513, 530), False, 'from torchvision import models\n'), ((742, 788), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(3 * 256)', 'out_features': '(3)'}), '(in_features=3 * 256, out_features=3)\n', (751, 788), True, 'import torch.nn as nn\n'), ((1142, 1167), 'torch.squeeze', 'torch.squeeze', (['img'], {'dim': '(0)'}), '(img, dim=0)\n', (1155, 1167), False, 'import torch\n'), ((1527, 1565), 'torch.max', 'torch.max', (['image1'], {'dim': '(0)', 'keepdim': '(True)'}), '(image1, dim=0, keepdim=True)\n', (1536, 1565), False, 'import torch\n'), ((1584, 1622), 'torch.max', 'torch.max', (['image2'], {'dim': '(0)', 'keepdim': '(True)'}), '(image2, dim=0, keepdim=True)\n', (1593, 1622), False, 'import torch\n'), ((1641, 1679), 'torch.max', 'torch.max', (['image3'], {'dim': '(0)', 'keepdim': '(True)'}), '(image3, dim=0, keepdim=True)\n', (1650, 1679), False, 'import torch\n')]
|
from unittest.mock import MagicMock, patch
from scheduleServer import app
import unittest
from helperFunctions.helperFunctions import stdRet, AuthenticatedUser
class TestStaff_importStaff(unittest.TestCase):
def setUp(self):
# Set up a number of items that will be used for these tests.
# -- Mock the os.environ method so that we can create the server. --
# Helper Dict for holding the os.environ configuration
self.helper_osEnviron = {
"CLIENT_ID": "TEST CLIENT_ID",
"PROJECT_ID": "TEST PROJECT_ID",
"AUTH_URI": "TEST AUTH_URI",
"TOKEN_URI": "TEST TOKEN_URI",
"AUTH_PROVIDER_X509_CERT_URL": "TEST AUTH_PROVIDER_X509_CERT_URL",
"CLIENT_SECRET": "TEST CLIENT_SECRET",
"REDIRECT_URIS": "TEST1,TEST2,TEST3,TEST4",
"JAVASCRIPT_ORIGINS": "TEST5,TEST6",
"EXPLAIN_TEMPLATE_LOADING": "FALSE",
"LOG_LEVEL": "WARNING",
"USE_ADHOC": "FALSE",
"SECRET_KEY": "TEST SECRET KEY",
"OAUTHLIB_RELAX_TOKEN_SCOPE": "1",
"OAUTHLIB_INSECURE_TRANSPORT": "1",
"HOST_URL": "https://localhost:5000",
"DATABASE_URL": "postgres://ra_sched"
}
# Create a dictionary patcher for the os.environ method
self.patcher_osEnviron = patch.dict("os.environ",
self.helper_osEnviron)
# Start the os patchers (No mock object is returned since we used patch.dict())
self.patcher_osEnviron.start()
# -- Create an instance of ScheduleServer that we may test against. --
# Mark the application as being tested
app.config["TESTING"] = True
# Disable the login_required decorator
app.config["LOGIN_DISABLED"] = True
# Reinitialize the Login Manager to accept the new configuration
app.login_manager.init_app(app)
# Create the test server
self.server = app.test_client()
# -- Create a patcher for the getAuth() method from helperFunctions --
# since we have disabled the login manager for testing
# First we must create an object for the auth_level that we can manipulate
# as needed for the tests. By default, the auth_level is set to 1.
self.mocked_authLevel = MagicMock(return_value=1)
# In order for the authLevel to respond to __lt__, __gt__, and __eq__ calls,
# we need to create lambda functions that can effectively implement the
# respective magic methods.
self.mocked_authLevel_ltMock = lambda me, other: me.return_value < other
self.mocked_authLevel_gtMock = lambda me, other: me.return_value > other
self.mocked_authLevel_eqMock = lambda me, other: me.return_value == other
# We then set the auth_level mock to return the __lt__ Mock
self.mocked_authLevel.__lt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __gt__ Mock
self.mocked_authLevel.__gt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __eq__ Mock
self.mocked_authLevel.__eq__ = self.mocked_authLevel_ltMock
# Set the ra_id and hall_id to values that can be used throughout
self.user_ra_id = 1
self.user_hall_id = 1
self.associatedResHalls = [
{
"id": self.user_hall_id,
"auth_level": self.mocked_authLevel,
"name": "<NAME>"
}
]
# Assemble all of the desired values into an Authenticated User Object
self.helper_getAuth = AuthenticatedUser(
"<EMAIL>",
self.user_ra_id,
"Test",
"User",
self.associatedResHalls
)
# Create the patcher for the getAuth() method
self.patcher_getAuth = patch("staff.staff.getAuth", autospec=True)
# Start the patcher - mock returned
self.mocked_getAuth = self.patcher_getAuth.start()
# Configure the mocked_getAuth to return the helper_getAuth dictionary
self.mocked_getAuth.return_value = self.helper_getAuth
# -- Create a patcher for the appGlobals file --
self.patcher_appGlobals = patch("staff.staff.ag", autospec=True)
# Start the patcher - mock returned
self.mocked_appGlobals = self.patcher_appGlobals.start()
# Configure the mocked appGlobals as desired
self.mocked_appGlobals.baseOpts = {"HOST_URL": "https://localhost:5000"}
self.mocked_appGlobals.conn = MagicMock()
self.mocked_appGlobals.UPLOAD_FOLDER = "./static"
self.mocked_appGlobals.ALLOWED_EXTENSIONS = {"txt", "csv"}
# -- Create a patchers for the logging --
self.patcher_loggingDEBUG = patch("logging.debug", autospec=True)
self.patcher_loggingINFO = patch("logging.info", autospec=True)
self.patcher_loggingWARNING = patch("logging.warning", autospec=True)
self.patcher_loggingCRITICAL = patch("logging.critical", autospec=True)
self.patcher_loggingERROR = patch("logging.error", autospec=True)
# Start the patcher - mock returned
self.mocked_loggingDEBUG = self.patcher_loggingDEBUG.start()
self.mocked_loggingINFO = self.patcher_loggingINFO.start()
self.mocked_loggingWARNING = self.patcher_loggingWARNING.start()
self.mocked_loggingCRITICAL = self.patcher_loggingCRITICAL.start()
self.mocked_loggingERROR = self.patcher_loggingERROR.start()
def tearDown(self):
# Stop all of the patchers
self.patcher_getAuth.stop()
self.patcher_appGlobals.stop()
self.patcher_osEnviron.stop()
# Stop all of the logging patchers
self.patcher_loggingDEBUG.stop()
self.patcher_loggingINFO.stop()
self.patcher_loggingWARNING.stop()
self.patcher_loggingCRITICAL.stop()
self.patcher_loggingERROR.stop()
def resetAuthLevel(self):
# This function serves to reset the auth_level of the session
# to the default value which is 1.
self.mocked_authLevel.return_value = 1
def test_withUnauthorizedUser_returnsNotAuthorizedResponse(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withMissingFilePart_returnsAppropriateResponse(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withBlankFilename_returnsAppropriateResponse(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withInvalidFileType_returnsRedirectToErrorPage(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withValidFile_withInvalidFormatting_returnsAppropriateResponse(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withValidFile_withDuplicateRA_rollsBackDB(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
def test_withAuthorizedUser_withValidFile_withoutDuplicateRA_createsNewRARecord(self):
# -- Arrange --
# -- Act --
# -- Assert --
pass
|
[
"unittest.mock.MagicMock",
"scheduleServer.app.test_client",
"unittest.mock.patch.dict",
"helperFunctions.helperFunctions.AuthenticatedUser",
"unittest.mock.patch",
"scheduleServer.app.login_manager.init_app"
] |
[((1352, 1399), 'unittest.mock.patch.dict', 'patch.dict', (['"""os.environ"""', 'self.helper_osEnviron'], {}), "('os.environ', self.helper_osEnviron)\n", (1362, 1399), False, 'from unittest.mock import MagicMock, patch\n'), ((1909, 1940), 'scheduleServer.app.login_manager.init_app', 'app.login_manager.init_app', (['app'], {}), '(app)\n', (1935, 1940), False, 'from scheduleServer import app\n'), ((1996, 2013), 'scheduleServer.app.test_client', 'app.test_client', ([], {}), '()\n', (2011, 2013), False, 'from scheduleServer import app\n'), ((2353, 2378), 'unittest.mock.MagicMock', 'MagicMock', ([], {'return_value': '(1)'}), '(return_value=1)\n', (2362, 2378), False, 'from unittest.mock import MagicMock, patch\n'), ((3680, 3771), 'helperFunctions.helperFunctions.AuthenticatedUser', 'AuthenticatedUser', (['"""<EMAIL>"""', 'self.user_ra_id', '"""Test"""', '"""User"""', 'self.associatedResHalls'], {}), "('<EMAIL>', self.user_ra_id, 'Test', 'User', self.\n associatedResHalls)\n", (3697, 3771), False, 'from helperFunctions.helperFunctions import stdRet, AuthenticatedUser\n'), ((3923, 3966), 'unittest.mock.patch', 'patch', (['"""staff.staff.getAuth"""'], {'autospec': '(True)'}), "('staff.staff.getAuth', autospec=True)\n", (3928, 3966), False, 'from unittest.mock import MagicMock, patch\n'), ((4306, 4344), 'unittest.mock.patch', 'patch', (['"""staff.staff.ag"""'], {'autospec': '(True)'}), "('staff.staff.ag', autospec=True)\n", (4311, 4344), False, 'from unittest.mock import MagicMock, patch\n'), ((4628, 4639), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (4637, 4639), False, 'from unittest.mock import MagicMock, patch\n'), ((4852, 4889), 'unittest.mock.patch', 'patch', (['"""logging.debug"""'], {'autospec': '(True)'}), "('logging.debug', autospec=True)\n", (4857, 4889), False, 'from unittest.mock import MagicMock, patch\n'), ((4925, 4961), 'unittest.mock.patch', 'patch', (['"""logging.info"""'], {'autospec': '(True)'}), "('logging.info', autospec=True)\n", (4930, 4961), False, 'from unittest.mock import MagicMock, patch\n'), ((5000, 5039), 'unittest.mock.patch', 'patch', (['"""logging.warning"""'], {'autospec': '(True)'}), "('logging.warning', autospec=True)\n", (5005, 5039), False, 'from unittest.mock import MagicMock, patch\n'), ((5079, 5119), 'unittest.mock.patch', 'patch', (['"""logging.critical"""'], {'autospec': '(True)'}), "('logging.critical', autospec=True)\n", (5084, 5119), False, 'from unittest.mock import MagicMock, patch\n'), ((5156, 5193), 'unittest.mock.patch', 'patch', (['"""logging.error"""'], {'autospec': '(True)'}), "('logging.error', autospec=True)\n", (5161, 5193), False, 'from unittest.mock import MagicMock, patch\n')]
|
# Python imports
import unittest
import numpy as np
import os
import shutil
import xarray as xr
import pytest
import oggm
from scipy import optimize as optimization
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
# Locals
import oggm.cfg as cfg
from oggm import tasks, utils, workflow
from oggm.workflow import execute_entity_task
from oggm.tests.funcs import get_test_dir
from oggm.utils import get_demo_file
from oggm.core import gis, centerlines
from oggm.core.massbalance import ConstantMassBalance
pytestmark = pytest.mark.test_env("benchmark")
do_plot = False
class TestSouthGlacier(unittest.TestCase):
# Test case optained from ITMIX
# Data available at:
# oggm-sample-data/tree/master/benchmarks/south_glacier
#
# Citation:
#
# <NAME>., <NAME>, <NAME>, and <NAME> (2011). Present
# dynamics and future prognosis of a slowly surging glacier.
# The Cryosphere, 5, 299-313. DOI: 10.5194/tc-5-299-2011, 2011.
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
# Init
cfg.initialize()
cfg.PARAMS['use_multiprocessing'] = False
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = self.testdir
cfg.PATHS['dem_file'] = get_demo_file('dem_SouthGlacier.tif')
cfg.PARAMS['border'] = 10
self.tf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc')
self.pf = get_demo_file('cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc')
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def get_ref_data(self, gdir):
# Reference data
df = salem.read_shapefile(get_demo_file('IceThick_SouthGlacier.shp'))
coords = np.array([p.xy for p in df.geometry]).squeeze()
df['lon'] = coords[:, 0]
df['lat'] = coords[:, 1]
df = df[['lon', 'lat', 'thick']]
ii, jj = gdir.grid.transform(df['lon'], df['lat'], crs=salem.wgs84,
nearest=True)
df['i'] = ii
df['j'] = jj
df['ij'] = ['{:04d}_{:04d}'.format(i, j) for i, j in zip(ii, jj)]
return df.groupby('ij').mean()
def test_mb(self):
# This is a function to produce the MB function needed by Anna
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
mbref = salem.GeoTiff(get_demo_file('mb_SouthGlacier.tif'))
demref = salem.GeoTiff(get_demo_file('dem_SouthGlacier.tif'))
mbref = mbref.get_vardata()
mbref[mbref == -9999] = np.NaN
demref = demref.get_vardata()[np.isfinite(mbref)]
mbref = mbref[np.isfinite(mbref)] * 1000
# compute the bias to make it 0 SMB on the 2D DEM
rho = cfg.PARAMS['ice_density']
mbmod = ConstantMassBalance(gdirs[0], bias=0)
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
mbmod = ConstantMassBalance(gdirs[0], bias=np.average(mymb))
mymb = mbmod.get_annual_mb(demref) * cfg.SEC_IN_YEAR * rho
np.testing.assert_allclose(np.average(mymb), 0., atol=1e-3)
# Same for ref
mbref = mbref - np.average(mbref)
np.testing.assert_allclose(np.average(mbref), 0., atol=1e-3)
# Fit poly
p = np.polyfit(demref, mbref, deg=2)
poly = np.poly1d(p)
myfit = poly(demref)
np.testing.assert_allclose(np.average(myfit), 0., atol=1e-3)
if do_plot:
import matplotlib.pyplot as plt
plt.scatter(mbref, demref, s=5,
label='Obs (2007-2012), shifted to Avg(SMB) = 0')
plt.scatter(mymb, demref, s=5, label='OGGM MB at t*')
plt.scatter(myfit, demref, s=5, label='Polyfit', c='C3')
plt.xlabel('MB (mm w.e yr-1)')
plt.ylabel('Altidude (m)')
plt.legend()
plt.show()
def test_inversion_attributes(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Tested tasks
task_list = [
tasks.gridded_attributes,
tasks.gridded_mb_attributes,
]
for task in task_list:
execute_entity_task(task, gdirs)
# Check certain things
gdir = gdirs[0]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
# The max catchment area should be area of glacier
assert (ds['catchment_area'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
assert (ds['catchment_area_on_catch'].max() ==
ds['glacier_mask'].sum() * gdir.grid.dx**2)
# In the lowest parts of the glaciers the data should be equivalent
ds_low = ds.isel(y=ds.y < 6741500)
np.testing.assert_allclose(ds_low['lin_mb_above_z'],
ds_low['lin_mb_above_z_on_catch'])
np.testing.assert_allclose(ds_low['oggm_mb_above_z'],
ds_low['oggm_mb_above_z_on_catch'])
# Build some loose tests based on correlation
df = self.get_ref_data(gdir)
vns = ['topo',
'slope',
'aspect',
'slope_factor',
'dis_from_border',
'catchment_area',
'catchment_area_on_catch',
'lin_mb_above_z',
'lin_mb_above_z_on_catch',
'oggm_mb_above_z',
'oggm_mb_above_z_on_catch',
]
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
for vn in vns:
df[vn] = ds[vn].isel(x=('z', df['i']), y=('z', df['j']))
# Loose tests based on correlations
cf = df.corr()
assert cf.loc['slope', 'slope_factor'] < -0.9
assert cf.loc['slope', 'thick'] < -0.4
assert cf.loc['dis_from_border', 'thick'] > 0.2
assert cf.loc['oggm_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'thick'] > 0.5
assert cf.loc['lin_mb_above_z', 'oggm_mb_above_z'] > 0.95
def test_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs,
varname_suffix='_alt')
execute_entity_task(tasks.distribute_thickness_interp, gdirs,
varname_suffix='_int')
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
v = ds.distributed_thickness_alt
df['oggm_alt'] = v.isel(x=('z', df['i']), y=('z', df['j']))
v = ds.distributed_thickness_int
df['oggm_int'] = v.isel(x=('z', df['i']), y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness_int) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd_int = ((df.oggm_int - df.thick) ** 2).mean() ** .5
rmsd_alt = ((df.oggm_int - df.thick) ** 2).mean() ** .5
assert rmsd_int < 85
assert rmsd_alt < 85
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm_int, 50)
np.testing.assert_allclose(dfm.thick, dfm.oggm_alt, 50)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm_int', y='thick')
plt.axis('equal')
df.plot(kind='scatter', x='oggm_alt', y='thick')
plt.axis('equal')
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(12, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness_int.plot(ax=ax2)
ds.distributed_thickness_alt.plot(ax=ax3)
plt.tight_layout()
plt.show()
def test_optimize_inversion(self):
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Reference data
gdir = gdirs[0]
df = self.get_ref_data(gdir)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
glen_a = cfg.PARAMS['inversion_glen_a']
fs = cfg.PARAMS['inversion_fs']
def to_optimize(x):
tasks.mass_conservation_inversion(gdir,
glen_a=glen_a * x[0],
fs=fs * x[1])
tasks.distribute_thickness_per_altitude(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
thick = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
out = (np.abs(thick - df.thick)).mean()
return out
opti = optimization.minimize(to_optimize, [1., 1.],
bounds=((0.01, 10), (0.01, 10)),
tol=0.1)
# Check results and save.
execute_entity_task(tasks.mass_conservation_inversion, gdirs,
glen_a=glen_a*opti['x'][0],
fs=0)
execute_entity_task(tasks.distribute_thickness_per_altitude, gdirs)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
df['oggm'] = ds.distributed_thickness.isel(x=('z', df['i']),
y=('z', df['j']))
ds['ref'] = xr.zeros_like(ds.distributed_thickness) * np.NaN
ds['ref'].data[df['j'], df['i']] = df['thick']
rmsd = ((df.oggm - df.thick) ** 2).mean() ** .5
assert rmsd < 30
dfm = df.mean()
np.testing.assert_allclose(dfm.thick, dfm.oggm, 10)
if do_plot:
import matplotlib.pyplot as plt
df.plot(kind='scatter', x='oggm', y='thick')
plt.axis('equal')
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 3))
ds.ref.plot(ax=ax1)
ds.distributed_thickness.plot(ax=ax2)
plt.tight_layout()
plt.show()
def test_workflow(self):
# This is a check that the inversion workflow works fine
# Download the RGI file for the run
# Make a new dataframe of those
rgidf = gpd.read_file(get_demo_file('SouthGlacier.shp'))
# Go - initialize working directories
gdirs = workflow.init_glacier_directories(rgidf)
# Preprocessing tasks
task_list = [
tasks.define_glacier_region,
tasks.glacier_masks,
tasks.compute_centerlines,
tasks.initialize_flowlines,
tasks.catchment_area,
tasks.catchment_intersections,
tasks.catchment_width_geom,
tasks.catchment_width_correction,
tasks.compute_downstream_line,
tasks.compute_downstream_bedshape,
]
for task in task_list:
execute_entity_task(task, gdirs)
execute_entity_task(tasks.process_cru_data, gdirs,
tmp_file=self.tf,
pre_file=self.pf)
execute_entity_task(tasks.local_t_star, gdirs)
execute_entity_task(tasks.mu_star_calibration, gdirs)
# Inversion tasks
execute_entity_task(tasks.prepare_for_inversion, gdirs)
# We use the default parameters for this run
execute_entity_task(tasks.mass_conservation_inversion, gdirs)
execute_entity_task(tasks.filter_inversion_output, gdirs)
df = utils.compile_glacier_statistics(gdirs)
assert df.inv_thickness_m[0] < 100
if do_plot:
import matplotlib.pyplot as plt
from oggm.graphics import plot_inversion
plot_inversion(gdirs)
plt.show()
class TestCoxeGlacier(unittest.TestCase):
# Test case for a tidewater glacier
def setUp(self):
# test directory
self.testdir = os.path.join(get_test_dir(), 'tmp')
if not os.path.exists(self.testdir):
os.makedirs(self.testdir)
self.clean_dir()
self.rgi_file = get_demo_file('rgi_RGI50-01.10299.shp')
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_RGI50-01.10299.tif')
cfg.PARAMS['border'] = 40
def tearDown(self):
self.rm_dir()
def rm_dir(self):
shutil.rmtree(self.testdir)
def clean_dir(self):
shutil.rmtree(self.testdir)
os.makedirs(self.testdir)
def test_set_width(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Test that area and area-altitude elev is fine
with utils.ncDataset(gdir.get_filepath('gridded_data')) as nc:
mask = nc.variables['glacier_mask'][:]
topo = nc.variables['topo_smoothed'][:]
rhgt = topo[np.where(mask)][:]
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
bs = 100
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
centerlines.terminus_width_correction(gdir, new_width=714)
fls = gdir.read_pickle('inversion_flowlines')
hgt, widths = gdir.get_inversion_flowline_hw()
# Check that the width is ok
np.testing.assert_allclose(fls[-1].widths[-1] * gdir.grid.dx, 714)
# Check for area distrib
bins = np.arange(utils.nicenumber(np.min(hgt), bs, lower=True),
utils.nicenumber(np.max(hgt), bs) + 1,
bs)
h1, b = np.histogram(hgt, weights=widths, density=True, bins=bins)
h2, b = np.histogram(rhgt, density=True, bins=bins)
h1 = h1 / np.sum(h1)
h2 = h2 / np.sum(h2)
assert utils.rmsd(h1, h2) < 0.02 # less than 2% error
new_area = np.sum(widths * fls[-1].dx * gdir.grid.dx)
np.testing.assert_allclose(new_area, gdir.rgi_area_m2)
def test_run(self):
entity = gpd.read_file(self.rgi_file).iloc[0]
gdir = oggm.GlacierDirectory(entity, base_dir=self.testdir)
gis.define_glacier_region(gdir)
gis.glacier_masks(gdir)
centerlines.compute_centerlines(gdir)
centerlines.initialize_flowlines(gdir)
centerlines.compute_downstream_line(gdir)
centerlines.compute_downstream_bedshape(gdir)
centerlines.catchment_area(gdir)
centerlines.catchment_intersections(gdir)
centerlines.catchment_width_geom(gdir)
centerlines.catchment_width_correction(gdir)
# Climate tasks -- only data IO and tstar interpolation!
tasks.process_dummy_cru_file(gdir, seed=0)
tasks.local_t_star(gdir)
tasks.mu_star_calibration(gdir)
# Inversion tasks
tasks.find_inversion_calving(gdir)
# Final preparation for the run
tasks.init_present_time_glacier(gdir)
# check that calving happens in the real context as well
tasks.run_constant_climate(gdir, bias=0, nyears=200,
temperature_bias=-0.5)
with xr.open_dataset(gdir.get_filepath('model_diagnostics')) as ds:
assert ds.calving_m3[-1] > 10
|
[
"oggm.tasks.init_present_time_glacier",
"oggm.core.centerlines.compute_centerlines",
"oggm.core.centerlines.catchment_width_correction",
"numpy.sum",
"oggm.cfg.initialize",
"numpy.polyfit",
"numpy.abs",
"oggm.core.centerlines.compute_downstream_bedshape",
"xarray.zeros_like",
"oggm.core.centerlines.initialize_flowlines",
"numpy.histogram",
"oggm.graphics.plot_inversion",
"oggm.core.massbalance.ConstantMassBalance",
"shutil.rmtree",
"oggm.core.centerlines.catchment_width_geom",
"matplotlib.pyplot.tight_layout",
"oggm.tasks.run_constant_climate",
"scipy.optimize.minimize",
"oggm.core.centerlines.compute_downstream_line",
"os.path.exists",
"numpy.isfinite",
"numpy.max",
"numpy.testing.assert_allclose",
"matplotlib.pyplot.subplots",
"oggm.tasks.mu_star_calibration",
"numpy.average",
"matplotlib.pyplot.show",
"oggm.tests.funcs.get_test_dir",
"oggm.core.gis.define_glacier_region",
"matplotlib.pyplot.legend",
"oggm.core.centerlines.catchment_intersections",
"oggm.utils.get_demo_file",
"oggm.core.centerlines.terminus_width_correction",
"oggm.tasks.local_t_star",
"oggm.tasks.find_inversion_calving",
"oggm.core.centerlines.catchment_area",
"numpy.min",
"pytest.mark.test_env",
"matplotlib.pyplot.ylabel",
"numpy.poly1d",
"pytest.importorskip",
"oggm.workflow.execute_entity_task",
"oggm.core.gis.glacier_masks",
"os.makedirs",
"oggm.GlacierDirectory",
"matplotlib.pyplot.scatter",
"oggm.workflow.init_glacier_directories",
"matplotlib.pyplot.axis",
"oggm.tasks.mass_conservation_inversion",
"oggm.tasks.distribute_thickness_per_altitude",
"numpy.where",
"numpy.array",
"oggm.utils.rmsd",
"oggm.tasks.process_dummy_cru_file",
"matplotlib.pyplot.xlabel",
"oggm.utils.compile_glacier_statistics"
] |
[((174, 202), 'pytest.importorskip', 'pytest.importorskip', (['"""salem"""'], {}), "('salem')\n", (193, 202), False, 'import pytest\n'), ((209, 241), 'pytest.importorskip', 'pytest.importorskip', (['"""geopandas"""'], {}), "('geopandas')\n", (228, 241), False, 'import pytest\n'), ((547, 580), 'pytest.mark.test_env', 'pytest.mark.test_env', (['"""benchmark"""'], {}), "('benchmark')\n", (567, 580), False, 'import pytest\n'), ((1221, 1237), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (1235, 1237), True, 'import oggm.cfg as cfg\n'), ((1413, 1450), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_SouthGlacier.tif"""'], {}), "('dem_SouthGlacier.tif')\n", (1426, 1450), False, 'from oggm.utils import get_demo_file\n'), ((1504, 1565), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc"""'], {}), "('cru_ts4.01.1901.2016.SouthGlacier.tmp.dat.nc')\n", (1517, 1565), False, 'from oggm.utils import get_demo_file\n'), ((1584, 1645), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc"""'], {}), "('cru_ts4.01.1901.2016.SouthGlacier.pre.dat.nc')\n", (1597, 1645), False, 'from oggm.utils import get_demo_file\n'), ((1724, 1751), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (1737, 1751), False, 'import shutil\n'), ((1786, 1813), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (1799, 1813), False, 'import shutil\n'), ((1822, 1847), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (1833, 1847), False, 'import os\n'), ((2750, 2790), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (2783, 2790), False, 'from oggm import tasks, utils, workflow\n'), ((3255, 3345), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (3274, 3345), False, 'from oggm.workflow import execute_entity_task\n'), ((3406, 3452), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (3425, 3452), False, 'from oggm.workflow import execute_entity_task\n'), ((3461, 3514), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (3480, 3514), False, 'from oggm.workflow import execute_entity_task\n'), ((3952, 3989), 'oggm.core.massbalance.ConstantMassBalance', 'ConstantMassBalance', (['gdirs[0]'], {'bias': '(0)'}), '(gdirs[0], bias=0)\n', (3971, 3989), False, 'from oggm.core.massbalance import ConstantMassBalance\n'), ((4428, 4460), 'numpy.polyfit', 'np.polyfit', (['demref', 'mbref'], {'deg': '(2)'}), '(demref, mbref, deg=2)\n', (4438, 4460), True, 'import numpy as np\n'), ((4476, 4488), 'numpy.poly1d', 'np.poly1d', (['p'], {}), '(p)\n', (4485, 4488), True, 'import numpy as np\n'), ((5290, 5330), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (5323, 5330), False, 'from oggm import tasks, utils, workflow\n'), ((5795, 5885), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (5814, 5885), False, 'from oggm.workflow import execute_entity_task\n'), ((5946, 5992), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (5965, 5992), False, 'from oggm.workflow import execute_entity_task\n'), ((6001, 6054), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (6020, 6054), False, 'from oggm.workflow import execute_entity_task\n'), ((8393, 8433), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (8426, 8433), False, 'from oggm import tasks, utils, workflow\n'), ((8898, 8988), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (8917, 8988), False, 'from oggm.workflow import execute_entity_task\n'), ((9049, 9095), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (9068, 9095), False, 'from oggm.workflow import execute_entity_task\n'), ((9104, 9157), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (9123, 9157), False, 'from oggm.workflow import execute_entity_task\n'), ((9193, 9248), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (9212, 9248), False, 'from oggm.workflow import execute_entity_task\n'), ((9310, 9371), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {}), '(tasks.mass_conservation_inversion, gdirs)\n', (9329, 9371), False, 'from oggm.workflow import execute_entity_task\n'), ((9380, 9474), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_per_altitude', 'gdirs'], {'varname_suffix': '"""_alt"""'}), "(tasks.distribute_thickness_per_altitude, gdirs,\n varname_suffix='_alt')\n", (9399, 9474), False, 'from oggm.workflow import execute_entity_task\n'), ((9507, 9595), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_interp', 'gdirs'], {'varname_suffix': '"""_int"""'}), "(tasks.distribute_thickness_interp, gdirs,\n varname_suffix='_int')\n", (9526, 9595), False, 'from oggm.workflow import execute_entity_task\n'), ((10371, 10426), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm_int', '(50)'], {}), '(dfm.thick, dfm.oggm_int, 50)\n', (10397, 10426), True, 'import numpy as np\n'), ((10435, 10490), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm_alt', '(50)'], {}), '(dfm.thick, dfm.oggm_alt, 50)\n', (10461, 10490), True, 'import numpy as np\n'), ((11254, 11294), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (11287, 11294), False, 'from oggm import tasks, utils, workflow\n'), ((11759, 11849), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (11778, 11849), False, 'from oggm.workflow import execute_entity_task\n'), ((11910, 11956), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (11929, 11956), False, 'from oggm.workflow import execute_entity_task\n'), ((11965, 12018), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (11984, 12018), False, 'from oggm.workflow import execute_entity_task\n'), ((12141, 12196), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (12160, 12196), False, 'from oggm.workflow import execute_entity_task\n'), ((12867, 12960), 'scipy.optimize.minimize', 'optimization.minimize', (['to_optimize', '[1.0, 1.0]'], {'bounds': '((0.01, 10), (0.01, 10))', 'tol': '(0.1)'}), '(to_optimize, [1.0, 1.0], bounds=((0.01, 10), (0.01, \n 10)), tol=0.1)\n', (12888, 12960), True, 'from scipy import optimize as optimization\n'), ((13070, 13171), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {'glen_a': "(glen_a * opti['x'][0])", 'fs': '(0)'}), "(tasks.mass_conservation_inversion, gdirs, glen_a=glen_a *\n opti['x'][0], fs=0)\n", (13089, 13171), False, 'from oggm.workflow import execute_entity_task\n'), ((13230, 13297), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.distribute_thickness_per_altitude', 'gdirs'], {}), '(tasks.distribute_thickness_per_altitude, gdirs)\n', (13249, 13297), False, 'from oggm.workflow import execute_entity_task\n'), ((13763, 13814), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['dfm.thick', 'dfm.oggm', '(10)'], {}), '(dfm.thick, dfm.oggm, 10)\n', (13789, 13814), True, 'import numpy as np\n'), ((14474, 14514), 'oggm.workflow.init_glacier_directories', 'workflow.init_glacier_directories', (['rgidf'], {}), '(rgidf)\n', (14507, 14514), False, 'from oggm import tasks, utils, workflow\n'), ((15069, 15159), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.process_cru_data', 'gdirs'], {'tmp_file': 'self.tf', 'pre_file': 'self.pf'}), '(tasks.process_cru_data, gdirs, tmp_file=self.tf,\n pre_file=self.pf)\n', (15088, 15159), False, 'from oggm.workflow import execute_entity_task\n'), ((15220, 15266), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.local_t_star', 'gdirs'], {}), '(tasks.local_t_star, gdirs)\n', (15239, 15266), False, 'from oggm.workflow import execute_entity_task\n'), ((15275, 15328), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mu_star_calibration', 'gdirs'], {}), '(tasks.mu_star_calibration, gdirs)\n', (15294, 15328), False, 'from oggm.workflow import execute_entity_task\n'), ((15364, 15419), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.prepare_for_inversion', 'gdirs'], {}), '(tasks.prepare_for_inversion, gdirs)\n', (15383, 15419), False, 'from oggm.workflow import execute_entity_task\n'), ((15481, 15542), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.mass_conservation_inversion', 'gdirs'], {}), '(tasks.mass_conservation_inversion, gdirs)\n', (15500, 15542), False, 'from oggm.workflow import execute_entity_task\n'), ((15551, 15608), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['tasks.filter_inversion_output', 'gdirs'], {}), '(tasks.filter_inversion_output, gdirs)\n', (15570, 15608), False, 'from oggm.workflow import execute_entity_task\n'), ((15623, 15662), 'oggm.utils.compile_glacier_statistics', 'utils.compile_glacier_statistics', (['gdirs'], {}), '(gdirs)\n', (15655, 15662), False, 'from oggm import tasks, utils, workflow\n'), ((16206, 16245), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""rgi_RGI50-01.10299.shp"""'], {}), "('rgi_RGI50-01.10299.shp')\n", (16219, 16245), False, 'from oggm.utils import get_demo_file\n'), ((16270, 16286), 'oggm.cfg.initialize', 'cfg.initialize', ([], {}), '()\n', (16284, 16286), True, 'import oggm.cfg as cfg\n'), ((16364, 16403), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_RGI50-01.10299.tif"""'], {}), "('dem_RGI50-01.10299.tif')\n", (16377, 16403), False, 'from oggm.utils import get_demo_file\n'), ((16516, 16543), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (16529, 16543), False, 'import shutil\n'), ((16578, 16605), 'shutil.rmtree', 'shutil.rmtree', (['self.testdir'], {}), '(self.testdir)\n', (16591, 16605), False, 'import shutil\n'), ((16614, 16639), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (16625, 16639), False, 'import os\n'), ((16741, 16793), 'oggm.GlacierDirectory', 'oggm.GlacierDirectory', (['entity'], {'base_dir': 'self.testdir'}), '(entity, base_dir=self.testdir)\n', (16762, 16793), False, 'import oggm\n'), ((16802, 16833), 'oggm.core.gis.define_glacier_region', 'gis.define_glacier_region', (['gdir'], {}), '(gdir)\n', (16827, 16833), False, 'from oggm.core import gis, centerlines\n'), ((16842, 16865), 'oggm.core.gis.glacier_masks', 'gis.glacier_masks', (['gdir'], {}), '(gdir)\n', (16859, 16865), False, 'from oggm.core import gis, centerlines\n'), ((16874, 16911), 'oggm.core.centerlines.compute_centerlines', 'centerlines.compute_centerlines', (['gdir'], {}), '(gdir)\n', (16905, 16911), False, 'from oggm.core import gis, centerlines\n'), ((16920, 16958), 'oggm.core.centerlines.initialize_flowlines', 'centerlines.initialize_flowlines', (['gdir'], {}), '(gdir)\n', (16952, 16958), False, 'from oggm.core import gis, centerlines\n'), ((16967, 17008), 'oggm.core.centerlines.compute_downstream_line', 'centerlines.compute_downstream_line', (['gdir'], {}), '(gdir)\n', (17002, 17008), False, 'from oggm.core import gis, centerlines\n'), ((17017, 17062), 'oggm.core.centerlines.compute_downstream_bedshape', 'centerlines.compute_downstream_bedshape', (['gdir'], {}), '(gdir)\n', (17056, 17062), False, 'from oggm.core import gis, centerlines\n'), ((17071, 17103), 'oggm.core.centerlines.catchment_area', 'centerlines.catchment_area', (['gdir'], {}), '(gdir)\n', (17097, 17103), False, 'from oggm.core import gis, centerlines\n'), ((17112, 17153), 'oggm.core.centerlines.catchment_intersections', 'centerlines.catchment_intersections', (['gdir'], {}), '(gdir)\n', (17147, 17153), False, 'from oggm.core import gis, centerlines\n'), ((17162, 17200), 'oggm.core.centerlines.catchment_width_geom', 'centerlines.catchment_width_geom', (['gdir'], {}), '(gdir)\n', (17194, 17200), False, 'from oggm.core import gis, centerlines\n'), ((17209, 17253), 'oggm.core.centerlines.catchment_width_correction', 'centerlines.catchment_width_correction', (['gdir'], {}), '(gdir)\n', (17247, 17253), False, 'from oggm.core import gis, centerlines\n'), ((17833, 17891), 'numpy.histogram', 'np.histogram', (['hgt'], {'weights': 'widths', 'density': '(True)', 'bins': 'bins'}), '(hgt, weights=widths, density=True, bins=bins)\n', (17845, 17891), True, 'import numpy as np\n'), ((17908, 17951), 'numpy.histogram', 'np.histogram', (['rhgt'], {'density': '(True)', 'bins': 'bins'}), '(rhgt, density=True, bins=bins)\n', (17920, 17951), True, 'import numpy as np\n'), ((18092, 18134), 'numpy.sum', 'np.sum', (['(widths * fls[-1].dx * gdir.grid.dx)'], {}), '(widths * fls[-1].dx * gdir.grid.dx)\n', (18098, 18134), True, 'import numpy as np\n'), ((18143, 18197), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['new_area', 'gdir.rgi_area_m2'], {}), '(new_area, gdir.rgi_area_m2)\n', (18169, 18197), True, 'import numpy as np\n'), ((18207, 18265), 'oggm.core.centerlines.terminus_width_correction', 'centerlines.terminus_width_correction', (['gdir'], {'new_width': '(714)'}), '(gdir, new_width=714)\n', (18244, 18265), False, 'from oggm.core import gis, centerlines\n'), ((18422, 18488), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['(fls[-1].widths[-1] * gdir.grid.dx)', '(714)'], {}), '(fls[-1].widths[-1] * gdir.grid.dx, 714)\n', (18448, 18488), True, 'import numpy as np\n'), ((18704, 18762), 'numpy.histogram', 'np.histogram', (['hgt'], {'weights': 'widths', 'density': '(True)', 'bins': 'bins'}), '(hgt, weights=widths, density=True, bins=bins)\n', (18716, 18762), True, 'import numpy as np\n'), ((18779, 18822), 'numpy.histogram', 'np.histogram', (['rhgt'], {'density': '(True)', 'bins': 'bins'}), '(rhgt, density=True, bins=bins)\n', (18791, 18822), True, 'import numpy as np\n'), ((18963, 19005), 'numpy.sum', 'np.sum', (['(widths * fls[-1].dx * gdir.grid.dx)'], {}), '(widths * fls[-1].dx * gdir.grid.dx)\n', (18969, 19005), True, 'import numpy as np\n'), ((19014, 19068), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['new_area', 'gdir.rgi_area_m2'], {}), '(new_area, gdir.rgi_area_m2)\n', (19040, 19068), True, 'import numpy as np\n'), ((19165, 19217), 'oggm.GlacierDirectory', 'oggm.GlacierDirectory', (['entity'], {'base_dir': 'self.testdir'}), '(entity, base_dir=self.testdir)\n', (19186, 19217), False, 'import oggm\n'), ((19226, 19257), 'oggm.core.gis.define_glacier_region', 'gis.define_glacier_region', (['gdir'], {}), '(gdir)\n', (19251, 19257), False, 'from oggm.core import gis, centerlines\n'), ((19266, 19289), 'oggm.core.gis.glacier_masks', 'gis.glacier_masks', (['gdir'], {}), '(gdir)\n', (19283, 19289), False, 'from oggm.core import gis, centerlines\n'), ((19298, 19335), 'oggm.core.centerlines.compute_centerlines', 'centerlines.compute_centerlines', (['gdir'], {}), '(gdir)\n', (19329, 19335), False, 'from oggm.core import gis, centerlines\n'), ((19344, 19382), 'oggm.core.centerlines.initialize_flowlines', 'centerlines.initialize_flowlines', (['gdir'], {}), '(gdir)\n', (19376, 19382), False, 'from oggm.core import gis, centerlines\n'), ((19391, 19432), 'oggm.core.centerlines.compute_downstream_line', 'centerlines.compute_downstream_line', (['gdir'], {}), '(gdir)\n', (19426, 19432), False, 'from oggm.core import gis, centerlines\n'), ((19441, 19486), 'oggm.core.centerlines.compute_downstream_bedshape', 'centerlines.compute_downstream_bedshape', (['gdir'], {}), '(gdir)\n', (19480, 19486), False, 'from oggm.core import gis, centerlines\n'), ((19495, 19527), 'oggm.core.centerlines.catchment_area', 'centerlines.catchment_area', (['gdir'], {}), '(gdir)\n', (19521, 19527), False, 'from oggm.core import gis, centerlines\n'), ((19536, 19577), 'oggm.core.centerlines.catchment_intersections', 'centerlines.catchment_intersections', (['gdir'], {}), '(gdir)\n', (19571, 19577), False, 'from oggm.core import gis, centerlines\n'), ((19586, 19624), 'oggm.core.centerlines.catchment_width_geom', 'centerlines.catchment_width_geom', (['gdir'], {}), '(gdir)\n', (19618, 19624), False, 'from oggm.core import gis, centerlines\n'), ((19633, 19677), 'oggm.core.centerlines.catchment_width_correction', 'centerlines.catchment_width_correction', (['gdir'], {}), '(gdir)\n', (19671, 19677), False, 'from oggm.core import gis, centerlines\n'), ((19752, 19794), 'oggm.tasks.process_dummy_cru_file', 'tasks.process_dummy_cru_file', (['gdir'], {'seed': '(0)'}), '(gdir, seed=0)\n', (19780, 19794), False, 'from oggm import tasks, utils, workflow\n'), ((19803, 19827), 'oggm.tasks.local_t_star', 'tasks.local_t_star', (['gdir'], {}), '(gdir)\n', (19821, 19827), False, 'from oggm import tasks, utils, workflow\n'), ((19836, 19867), 'oggm.tasks.mu_star_calibration', 'tasks.mu_star_calibration', (['gdir'], {}), '(gdir)\n', (19861, 19867), False, 'from oggm import tasks, utils, workflow\n'), ((19903, 19937), 'oggm.tasks.find_inversion_calving', 'tasks.find_inversion_calving', (['gdir'], {}), '(gdir)\n', (19931, 19937), False, 'from oggm import tasks, utils, workflow\n'), ((19987, 20024), 'oggm.tasks.init_present_time_glacier', 'tasks.init_present_time_glacier', (['gdir'], {}), '(gdir)\n', (20018, 20024), False, 'from oggm import tasks, utils, workflow\n'), ((20099, 20174), 'oggm.tasks.run_constant_climate', 'tasks.run_constant_climate', (['gdir'], {'bias': '(0)', 'nyears': '(200)', 'temperature_bias': '(-0.5)'}), '(gdir, bias=0, nyears=200, temperature_bias=-0.5)\n', (20125, 20174), False, 'from oggm import tasks, utils, workflow\n'), ((1066, 1080), 'oggm.tests.funcs.get_test_dir', 'get_test_dir', ([], {}), '()\n', (1078, 1080), False, 'from oggm.tests.funcs import get_test_dir\n'), ((1104, 1132), 'os.path.exists', 'os.path.exists', (['self.testdir'], {}), '(self.testdir)\n', (1118, 1132), False, 'import os\n'), ((1146, 1171), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (1157, 1171), False, 'import os\n'), ((1943, 1985), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""IceThick_SouthGlacier.shp"""'], {}), "('IceThick_SouthGlacier.shp')\n", (1956, 1985), False, 'from oggm.utils import get_demo_file\n'), ((2652, 2685), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (2665, 2685), False, 'from oggm.utils import get_demo_file\n'), ((3213, 3245), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (3232, 3245), False, 'from oggm.workflow import execute_entity_task\n'), ((3546, 3582), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""mb_SouthGlacier.tif"""'], {}), "('mb_SouthGlacier.tif')\n", (3559, 3582), False, 'from oggm.utils import get_demo_file\n'), ((3615, 3652), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""dem_SouthGlacier.tif"""'], {}), "('dem_SouthGlacier.tif')\n", (3628, 3652), False, 'from oggm.utils import get_demo_file\n'), ((3768, 3786), 'numpy.isfinite', 'np.isfinite', (['mbref'], {}), '(mbref)\n', (3779, 3786), True, 'import numpy as np\n'), ((4228, 4244), 'numpy.average', 'np.average', (['mymb'], {}), '(mymb)\n', (4238, 4244), True, 'import numpy as np\n'), ((4309, 4326), 'numpy.average', 'np.average', (['mbref'], {}), '(mbref)\n', (4319, 4326), True, 'import numpy as np\n'), ((4362, 4379), 'numpy.average', 'np.average', (['mbref'], {}), '(mbref)\n', (4372, 4379), True, 'import numpy as np\n'), ((4553, 4570), 'numpy.average', 'np.average', (['myfit'], {}), '(myfit)\n', (4563, 4570), True, 'import numpy as np\n'), ((4664, 4750), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mbref', 'demref'], {'s': '(5)', 'label': '"""Obs (2007-2012), shifted to Avg(SMB) = 0"""'}), "(mbref, demref, s=5, label=\n 'Obs (2007-2012), shifted to Avg(SMB) = 0')\n", (4675, 4750), True, 'import matplotlib.pyplot as plt\n'), ((4782, 4835), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mymb', 'demref'], {'s': '(5)', 'label': '"""OGGM MB at t*"""'}), "(mymb, demref, s=5, label='OGGM MB at t*')\n", (4793, 4835), True, 'import matplotlib.pyplot as plt\n'), ((4848, 4904), 'matplotlib.pyplot.scatter', 'plt.scatter', (['myfit', 'demref'], {'s': '(5)', 'label': '"""Polyfit"""', 'c': '"""C3"""'}), "(myfit, demref, s=5, label='Polyfit', c='C3')\n", (4859, 4904), True, 'import matplotlib.pyplot as plt\n'), ((4917, 4947), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""MB (mm w.e yr-1)"""'], {}), "('MB (mm w.e yr-1)')\n", (4927, 4947), True, 'import matplotlib.pyplot as plt\n'), ((4960, 4986), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Altidude (m)"""'], {}), "('Altidude (m)')\n", (4970, 4986), True, 'import matplotlib.pyplot as plt\n'), ((4999, 5011), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5009, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5024, 5034), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5032, 5034), True, 'import matplotlib.pyplot as plt\n'), ((5192, 5225), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (5205, 5225), False, 'from oggm.utils import get_demo_file\n'), ((5753, 5785), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (5772, 5785), False, 'from oggm.workflow import execute_entity_task\n'), ((6233, 6265), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (6252, 6265), False, 'from oggm.workflow import execute_entity_task\n'), ((6834, 6926), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ds_low['lin_mb_above_z']", "ds_low['lin_mb_above_z_on_catch']"], {}), "(ds_low['lin_mb_above_z'], ds_low[\n 'lin_mb_above_z_on_catch'])\n", (6860, 6926), True, 'import numpy as np\n'), ((6973, 7067), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (["ds_low['oggm_mb_above_z']", "ds_low['oggm_mb_above_z_on_catch']"], {}), "(ds_low['oggm_mb_above_z'], ds_low[\n 'oggm_mb_above_z_on_catch'])\n", (6999, 7067), True, 'import numpy as np\n'), ((8295, 8328), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (8308, 8328), False, 'from oggm.utils import get_demo_file\n'), ((8856, 8888), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (8875, 8888), False, 'from oggm.workflow import execute_entity_task\n'), ((10629, 10646), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10637, 10646), True, 'import matplotlib.pyplot as plt\n'), ((10720, 10737), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (10728, 10737), True, 'import matplotlib.pyplot as plt\n'), ((10771, 10806), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(12, 3)'}), '(1, 3, figsize=(12, 3))\n', (10783, 10806), True, 'import matplotlib.pyplot as plt\n'), ((10959, 10977), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10975, 10977), True, 'import matplotlib.pyplot as plt\n'), ((10990, 11000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10998, 11000), True, 'import matplotlib.pyplot as plt\n'), ((11156, 11189), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (11169, 11189), False, 'from oggm.utils import get_demo_file\n'), ((11717, 11749), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (11736, 11749), False, 'from oggm.workflow import execute_entity_task\n'), ((12327, 12402), 'oggm.tasks.mass_conservation_inversion', 'tasks.mass_conservation_inversion', (['gdir'], {'glen_a': '(glen_a * x[0])', 'fs': '(fs * x[1])'}), '(gdir, glen_a=glen_a * x[0], fs=fs * x[1])\n', (12360, 12402), False, 'from oggm import tasks, utils, workflow\n'), ((12507, 12552), 'oggm.tasks.distribute_thickness_per_altitude', 'tasks.distribute_thickness_per_altitude', (['gdir'], {}), '(gdir)\n', (12546, 12552), False, 'from oggm import tasks, utils, workflow\n'), ((13948, 13965), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (13956, 13965), True, 'import matplotlib.pyplot as plt\n'), ((13994, 14028), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 3)'}), '(1, 2, figsize=(8, 3))\n', (14006, 14028), True, 'import matplotlib.pyplot as plt\n'), ((14123, 14141), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14139, 14141), True, 'import matplotlib.pyplot as plt\n'), ((14154, 14164), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14162, 14164), True, 'import matplotlib.pyplot as plt\n'), ((14376, 14409), 'oggm.utils.get_demo_file', 'get_demo_file', (['"""SouthGlacier.shp"""'], {}), "('SouthGlacier.shp')\n", (14389, 14409), False, 'from oggm.utils import get_demo_file\n'), ((15027, 15059), 'oggm.workflow.execute_entity_task', 'execute_entity_task', (['task', 'gdirs'], {}), '(task, gdirs)\n', (15046, 15059), False, 'from oggm.workflow import execute_entity_task\n'), ((15836, 15857), 'oggm.graphics.plot_inversion', 'plot_inversion', (['gdirs'], {}), '(gdirs)\n', (15850, 15857), False, 'from oggm.graphics import plot_inversion\n'), ((15870, 15880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15878, 15880), True, 'import matplotlib.pyplot as plt\n'), ((16050, 16064), 'oggm.tests.funcs.get_test_dir', 'get_test_dir', ([], {}), '()\n', (16062, 16064), False, 'from oggm.tests.funcs import get_test_dir\n'), ((16088, 16116), 'os.path.exists', 'os.path.exists', (['self.testdir'], {}), '(self.testdir)\n', (16102, 16116), False, 'import os\n'), ((16130, 16155), 'os.makedirs', 'os.makedirs', (['self.testdir'], {}), '(self.testdir)\n', (16141, 16155), False, 'import os\n'), ((17970, 17980), 'numpy.sum', 'np.sum', (['h1'], {}), '(h1)\n', (17976, 17980), True, 'import numpy as np\n'), ((17999, 18009), 'numpy.sum', 'np.sum', (['h2'], {}), '(h2)\n', (18005, 18009), True, 'import numpy as np\n'), ((18025, 18043), 'oggm.utils.rmsd', 'utils.rmsd', (['h1', 'h2'], {}), '(h1, h2)\n', (18035, 18043), False, 'from oggm import tasks, utils, workflow\n'), ((18841, 18851), 'numpy.sum', 'np.sum', (['h1'], {}), '(h1)\n', (18847, 18851), True, 'import numpy as np\n'), ((18870, 18880), 'numpy.sum', 'np.sum', (['h2'], {}), '(h2)\n', (18876, 18880), True, 'import numpy as np\n'), ((18896, 18914), 'oggm.utils.rmsd', 'utils.rmsd', (['h1', 'h2'], {}), '(h1, h2)\n', (18906, 18914), False, 'from oggm import tasks, utils, workflow\n'), ((2004, 2041), 'numpy.array', 'np.array', (['[p.xy for p in df.geometry]'], {}), '([p.xy for p in df.geometry])\n', (2012, 2041), True, 'import numpy as np\n'), ((3810, 3828), 'numpy.isfinite', 'np.isfinite', (['mbref'], {}), '(mbref)\n', (3821, 3828), True, 'import numpy as np\n'), ((4108, 4124), 'numpy.average', 'np.average', (['mymb'], {}), '(mymb)\n', (4118, 4124), True, 'import numpy as np\n'), ((10039, 10082), 'xarray.zeros_like', 'xr.zeros_like', (['ds.distributed_thickness_int'], {}), '(ds.distributed_thickness_int)\n', (10052, 10082), True, 'import xarray as xr\n'), ((13540, 13579), 'xarray.zeros_like', 'xr.zeros_like', (['ds.distributed_thickness'], {}), '(ds.distributed_thickness)\n', (13553, 13579), True, 'import xarray as xr\n'), ((17505, 17519), 'numpy.where', 'np.where', (['mask'], {}), '(mask)\n', (17513, 17519), True, 'import numpy as np\n'), ((17694, 17705), 'numpy.min', 'np.min', (['hgt'], {}), '(hgt)\n', (17700, 17705), True, 'import numpy as np\n'), ((18565, 18576), 'numpy.min', 'np.min', (['hgt'], {}), '(hgt)\n', (18571, 18576), True, 'import numpy as np\n'), ((17766, 17777), 'numpy.max', 'np.max', (['hgt'], {}), '(hgt)\n', (17772, 17777), True, 'import numpy as np\n'), ((18637, 18648), 'numpy.max', 'np.max', (['hgt'], {}), '(hgt)\n', (18643, 18648), True, 'import numpy as np\n'), ((12795, 12819), 'numpy.abs', 'np.abs', (['(thick - df.thick)'], {}), '(thick - df.thick)\n', (12801, 12819), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# Copyright © 2018 Broadcom. All Rights Reserved. The term “Broadcom” refers to
# Broadcom Inc. and/or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`license_remove` - PyFOS util to trigger the license_remove operation.
*******************************************************************************
The :mod:`license_remove` util is used to execute the license_remove operation.
This module is a stand-alone script and API that can be used to execute the \
license_remove operation.
* Input:
| Infrastructure Options:
| -i,--ipaddr=IPADDR: The IP address of the FOS switch.
| -L,--login=LOGIN: The login name.
| -P,--password=PASSWORD: <PASSWORD>.
| -f,--vfid=VFID: The VFID to which the request is directed [OPTIONAL].
| -s,--secured=MODE: The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose: Verbose mode [OPTIONAL].
| Util Script Options:
| --name=LICENSE-KEY: License key string.
* Output:
* Status of the license remove operation.
"""
import sys
from time import sleep
from pyfos import pyfos_auth
from pyfos.pyfos_brocade_operation_show_status import show_status
from pyfos.pyfos_brocade_operation_license import license
from pyfos import pyfos_util
from pyfos.utils import brcd_util
def usage():
print(" Util Script Options:")
print()
print(" --name=LICENSE-KEY License key string.")
print()
def main(argv):
valid_options = ["name"]
inputs = brcd_util.generic_input(argv, usage, valid_options)
session = pyfos_auth.login(inputs["login"], inputs["password"],
inputs["ipaddr"], inputs["secured"],
verbose=inputs["verbose"])
if pyfos_auth.is_failed_login(session):
print("login failed because",
session.get(pyfos_auth.CREDENTIAL_KEY)[pyfos_auth.LOGIN_ERROR_KEY])
brcd_util.full_usage(usage, valid_options)
sys.exit()
brcd_util.exit_register(session)
if "name" not in inputs:
print("License key is required")
brcd_util.full_usage(usage, valid_options)
sys.exit()
l_obj = license()
l_obj.set_action("remove")
l_obj.set_name(inputs["name"])
l_rsp_obj = l_obj.post(session)
if ("info-message" in l_rsp_obj and
l_rsp_obj["info-message"] ==
"Switch version is lower than the object"):
pyfos_util.response_print(l_rsp_obj)
pyfos_auth.logout(session)
sys.exit()
if pyfos_util.is_failed_resp(l_rsp_obj):
print("Firmwaredownload operation failed.\n")
pyfos_util.response_print(l_rsp_obj)
else:
pyfos_util.response_print(l_rsp_obj)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"pyfos.pyfos_auth.login",
"pyfos.pyfos_util.response_print",
"pyfos.pyfos_util.is_failed_resp",
"pyfos.utils.brcd_util.generic_input",
"pyfos.utils.brcd_util.exit_register",
"pyfos.pyfos_brocade_operation_license.license",
"pyfos.pyfos_auth.is_failed_login",
"pyfos.utils.brcd_util.full_usage",
"pyfos.pyfos_auth.logout",
"sys.exit"
] |
[((1980, 2031), 'pyfos.utils.brcd_util.generic_input', 'brcd_util.generic_input', (['argv', 'usage', 'valid_options'], {}), '(argv, usage, valid_options)\n', (2003, 2031), False, 'from pyfos.utils import brcd_util\n'), ((2047, 2168), 'pyfos.pyfos_auth.login', 'pyfos_auth.login', (["inputs['login']", "inputs['password']", "inputs['ipaddr']", "inputs['secured']"], {'verbose': "inputs['verbose']"}), "(inputs['login'], inputs['password'], inputs['ipaddr'],\n inputs['secured'], verbose=inputs['verbose'])\n", (2063, 2168), False, 'from pyfos import pyfos_auth\n'), ((2234, 2269), 'pyfos.pyfos_auth.is_failed_login', 'pyfos_auth.is_failed_login', (['session'], {}), '(session)\n', (2260, 2269), False, 'from pyfos import pyfos_auth\n'), ((2466, 2498), 'pyfos.utils.brcd_util.exit_register', 'brcd_util.exit_register', (['session'], {}), '(session)\n', (2489, 2498), False, 'from pyfos.utils import brcd_util\n'), ((2653, 2662), 'pyfos.pyfos_brocade_operation_license.license', 'license', ([], {}), '()\n', (2660, 2662), False, 'from pyfos.pyfos_brocade_operation_license import license\n'), ((2998, 3034), 'pyfos.pyfos_util.is_failed_resp', 'pyfos_util.is_failed_resp', (['l_rsp_obj'], {}), '(l_rsp_obj)\n', (3023, 3034), False, 'from pyfos import pyfos_util\n'), ((3195, 3221), 'pyfos.pyfos_auth.logout', 'pyfos_auth.logout', (['session'], {}), '(session)\n', (3212, 3221), False, 'from pyfos import pyfos_auth\n'), ((2399, 2441), 'pyfos.utils.brcd_util.full_usage', 'brcd_util.full_usage', (['usage', 'valid_options'], {}), '(usage, valid_options)\n', (2419, 2441), False, 'from pyfos.utils import brcd_util\n'), ((2450, 2460), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2458, 2460), False, 'import sys\n'), ((2578, 2620), 'pyfos.utils.brcd_util.full_usage', 'brcd_util.full_usage', (['usage', 'valid_options'], {}), '(usage, valid_options)\n', (2598, 2620), False, 'from pyfos.utils import brcd_util\n'), ((2629, 2639), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2637, 2639), False, 'import sys\n'), ((2900, 2936), 'pyfos.pyfos_util.response_print', 'pyfos_util.response_print', (['l_rsp_obj'], {}), '(l_rsp_obj)\n', (2925, 2936), False, 'from pyfos import pyfos_util\n'), ((2945, 2971), 'pyfos.pyfos_auth.logout', 'pyfos_auth.logout', (['session'], {}), '(session)\n', (2962, 2971), False, 'from pyfos import pyfos_auth\n'), ((2980, 2990), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2988, 2990), False, 'import sys\n'), ((3098, 3134), 'pyfos.pyfos_util.response_print', 'pyfos_util.response_print', (['l_rsp_obj'], {}), '(l_rsp_obj)\n', (3123, 3134), False, 'from pyfos import pyfos_util\n'), ((3153, 3189), 'pyfos.pyfos_util.response_print', 'pyfos_util.response_print', (['l_rsp_obj'], {}), '(l_rsp_obj)\n', (3178, 3189), False, 'from pyfos import pyfos_util\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains training and sampling functions an autoregressive model."""
import functools
from typing import Any, Callable
from absl import logging
from flax import linen as nn
from flax import struct
from flax.training import common_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
from autoregressive_diffusion.model import distributions
from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils
from autoregressive_diffusion.utils import util_fns
def cross_entropy(logits, targets):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
onehot_targets = common_utils.onehot(targets, vocab_size)
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
d = np.prod(targets.shape[1:])
loss = util_fns.sum_except_batch(loss) / d / np.log(2)
return loss
def compute_accuracy(logits, targets):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.shape[:-1] != targets.shape[:-1]:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
logits = logits[:, :, None, :] # Insert empty channel axis.
d = float(np.prod(logits.shape[1:-1]))
acc = jnp.equal(jnp.argmax(logits, axis=-1), targets) / d
acc = util_fns.sum_except_batch(acc)
return acc
class ARM(struct.PyTreeNode):
"""Static model object that wraps important model functions."""
config: ml_collections.config_dict.config_dict.ConfigDict
apply_fn: Callable[Ellipsis, Any]
logprob_fn: Callable[Ellipsis, Any]
sample_fn: Callable[Ellipsis, Any]
neural_net: Any
num_steps: int
policy_support: bool = False
num_stages: int = 1
absorbing_state: int = 0
random_order: bool = False
def log_px(self, rng, params, x, train, context=None):
batch_size = x.shape[0]
if self.random_order:
logging.info('Log-likelihood for a random-order ARM XLNet style.')
rng, rng_perm = jax.random.split(rng)
permutations = ardm_utils.get_batch_permutations(rng_perm, batch_size,
self.num_steps)
else:
logging.info('Log-likelihood for a standard ARM.')
permutations = None
net_out = self.apply_fn(
{'params': params}, x, t=None, mask=None, train=train, context=context,
permutations=permutations,
rngs={'dropout': rng} if train else None)
d = float(np.prod(net_out.shape[1:-1]))
log_px_elementwise = util_fns.sum_except_batch(self.logprob_fn(x, net_out))
log_px = log_px_elementwise / d / np.log(2)
neg_acc = -compute_accuracy(logits=net_out, targets=x)
t_batch_dummy = jnp.zeros((batch_size,), dtype=jnp.int32)
loss_components_dummy = jnp.zeros((batch_size,))
return log_px, loss_components_dummy, neg_acc, t_batch_dummy
def elbo(self, rng, params, x, train, context=None):
return self.log_px(rng, params, x, train, context)
def sample(self, rng, params, batch_size, context=None):
chain_sharded = self.p_sample(rng, params, batch_size, context)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, 0),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample(self, rng, params, batch_size, context):
"""Samples from the model, calls sample_step for every timestep."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
logging.info('Sampling from model, hope you are patient...')
if self.random_order:
rng, rng_perm = jax.random.split(rng)
orders = ardm_utils.get_batch_permutations(rng_perm,
per_device_batch_size,
self.num_steps)
else:
orders = jnp.arange(0, self.num_steps)[None, :]
orders = jnp.repeat(orders, repeats=per_device_batch_size, axis=0)
chain = []
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
chain.append(x)
def next_sample_step(x, t):
x = self.sample_step(
jax.random.fold_in(rng, t), x,
t, orders, params, context)
return x, x
ts = jnp.arange(self.num_steps)
_, chain = jax.lax.scan(next_sample_step, init=x, xs=ts)
return chain
def get_naive_policy(self, budget=250):
assert budget <= self.num_steps
# We use budget+1 because a linspace contains the last step.
naive_policy = ardm_utils.integer_linspace(0, self.num_steps, budget+1)
# Last index does not need to be in policy.
naive_policy = naive_policy[:-1]
return naive_policy
def sample_with_naive_policy(self,
rng,
params,
batch_size,
budget=250):
logging.info('Sampling with naive policy.')
naive_policy = self.get_naive_policy(budget)
return self.sample_with_policy(rng, params, batch_size, naive_policy)
def sample_with_policy(self, rng, params, batch_size, policy):
"""Wrapper for p_sample_with_policy that takes care of unsharding."""
logging.info('Sampling from model (quickly)...')
chain_sharded = self.p_sample_with_policy(rng, params, batch_size, policy)
chain = chain_sharded.reshape(
chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])
return chain
@functools.partial(jax.pmap, in_axes=(None, None, 0, None, None),
out_axes=1,
static_broadcasted_argnums=(0, 3), axis_name='batch')
def p_sample_with_policy(self, rng, params, batch_size, policy):
"""Samples from the model, calls sample_step for every policy step."""
rng = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
assert batch_size % jax.local_device_count() == 0
per_device_batch_size = batch_size // jax.local_device_count()
rng, rng_perm = jax.random.split(rng)
sigmas = ardm_utils.get_batch_permutations(rng_perm, per_device_batch_size,
self.num_steps)
policy_extended = jnp.concatenate(
[policy, jnp.array([self.num_steps], dtype=jnp.int32)], axis=0)
x = jnp.full((per_device_batch_size, *self.config.data_shape),
fill_value=self.absorbing_state,
dtype=jnp.int32)
def next_sample_step(x, idx):
left_t = policy_extended[idx]
right_t = policy_extended[idx + 1]
x = self.sample_step_with_policy(
jax.random.fold_in(rng, idx), x, left_t, right_t, sigmas, params)
return x, x
x, chain = jax.lax.scan(next_sample_step, x, jnp.arange(len(policy)))
return chain
def sample_step_with_policy(self, rng, x, left_t, right_t, sigmas, params):
"""Sampling code for a single step starting at left_t until right_t."""
batch_size = x.shape[0]
left_t = jnp.full(batch_size, fill_value=left_t)
right_t = jnp.full(batch_size, fill_value=right_t)
prev_selection, current_selection = ardm_utils.get_selections_for_sigma_and_range(
sigmas, left_t, right_t, self.config.data_shape)
params_px = self.apply_fn(
{'params': params},
x, left_t, prev_selection, train=False)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def sample_step(self, rng, x, t, sigmas, params, context):
"""Sampling code for a single step t."""
batch_size = x.shape[0]
t_batch = jnp.full(batch_size, fill_value=t)
prev_selection, current_selection = ardm_utils.get_selection_for_sigma_and_t(
sigmas, t_batch, self.config.data_shape)
if self.random_order:
permutations = sigmas
else:
permutations = None
params_px = self.apply_fn(
{'params': params},
x, t_batch, prev_selection, train=False, context=context,
permutations=permutations)
new_x = self.sample_fn(rng, params_px)
x = (1 - current_selection) * x + current_selection * new_x
x = jnp.asarray(x, jnp.int32)
return x
def init_architecture(self, init_rng, tmp_x, tmp_t, context=None):
tmp_mask = None
if context is None:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask, train=False)
else:
return self.neural_net.init(init_rng, tmp_x, tmp_t, tmp_mask,
train=False, context=context)
@classmethod
def create(cls, config, get_architecture, random_order):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
required_num_outputs = config.num_classes
num_steps = int(np.prod(config.data_shape))
# We set num_steps=0 since this disables time conditioning, which is not
# necessary for ARMs.
neural_net = get_architecture(
config.num_classes, required_num_outputs, num_steps=0, is_causal=True)
out_dist = distributions.SoftmaxCategorical(config.data_shape[-1],
config.num_classes)
return cls(
config,
apply_fn=neural_net.apply,
logprob_fn=out_dist.log_prob,
sample_fn=out_dist.sample,
neural_net=neural_net,
num_steps=num_steps,
random_order=random_order
)
|
[
"autoregressive_diffusion.utils.util_fns.sum_except_batch",
"absl.logging.info",
"jax.lax.axis_index",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selections_for_sigma_and_range",
"jax.numpy.repeat",
"numpy.prod",
"jax.local_device_count",
"jax.random.fold_in",
"jax.numpy.argmax",
"jax.numpy.full",
"autoregressive_diffusion.model.distributions.SoftmaxCategorical",
"functools.partial",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.integer_linspace",
"jax.lax.scan",
"jax.numpy.asarray",
"jax.numpy.zeros",
"jax.numpy.array",
"flax.linen.log_softmax",
"numpy.log",
"flax.training.common_utils.onehot",
"jax.numpy.arange",
"jax.random.split",
"autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selection_for_sigma_and_t"
] |
[((1649, 1689), 'flax.training.common_utils.onehot', 'common_utils.onehot', (['targets', 'vocab_size'], {}), '(targets, vocab_size)\n', (1668, 1689), False, 'from flax.training import common_utils\n'), ((1766, 1792), 'numpy.prod', 'np.prod', (['targets.shape[1:]'], {}), '(targets.shape[1:])\n', (1773, 1792), True, 'import numpy as np\n'), ((2517, 2547), 'autoregressive_diffusion.utils.util_fns.sum_except_batch', 'util_fns.sum_except_batch', (['acc'], {}), '(acc)\n', (2542, 2547), False, 'from autoregressive_diffusion.utils import util_fns\n'), ((4418, 4549), 'functools.partial', 'functools.partial', (['jax.pmap'], {'in_axes': '(None, None, 0, None, 0)', 'out_axes': '(1)', 'static_broadcasted_argnums': '(0, 3)', 'axis_name': '"""batch"""'}), "(jax.pmap, in_axes=(None, None, 0, None, 0), out_axes=1,\n static_broadcasted_argnums=(0, 3), axis_name='batch')\n", (4435, 4549), False, 'import functools\n'), ((6930, 7064), 'functools.partial', 'functools.partial', (['jax.pmap'], {'in_axes': '(None, None, 0, None, None)', 'out_axes': '(1)', 'static_broadcasted_argnums': '(0, 3)', 'axis_name': '"""batch"""'}), "(jax.pmap, in_axes=(None, None, 0, None, None), out_axes=1,\n static_broadcasted_argnums=(0, 3), axis_name='batch')\n", (6947, 7064), False, 'import functools\n'), ((1841, 1850), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1847, 1850), True, 'import numpy as np\n'), ((2420, 2447), 'numpy.prod', 'np.prod', (['logits.shape[1:-1]'], {}), '(logits.shape[1:-1])\n', (2427, 2447), True, 'import numpy as np\n'), ((3892, 3933), 'jax.numpy.zeros', 'jnp.zeros', (['(batch_size,)'], {'dtype': 'jnp.int32'}), '((batch_size,), dtype=jnp.int32)\n', (3901, 3933), True, 'import jax.numpy as jnp\n'), ((3962, 3986), 'jax.numpy.zeros', 'jnp.zeros', (['(batch_size,)'], {}), '((batch_size,))\n', (3971, 3986), True, 'import jax.numpy as jnp\n'), ((4905, 4965), 'absl.logging.info', 'logging.info', (['"""Sampling from model, hope you are patient..."""'], {}), "('Sampling from model, hope you are patient...')\n", (4917, 4965), False, 'from absl import logging\n'), ((5394, 5507), 'jax.numpy.full', 'jnp.full', (['(per_device_batch_size, *self.config.data_shape)'], {'fill_value': 'self.absorbing_state', 'dtype': 'jnp.int32'}), '((per_device_batch_size, *self.config.data_shape), fill_value=self.\n absorbing_state, dtype=jnp.int32)\n', (5402, 5507), True, 'import jax.numpy as jnp\n'), ((5725, 5751), 'jax.numpy.arange', 'jnp.arange', (['self.num_steps'], {}), '(self.num_steps)\n', (5735, 5751), True, 'import jax.numpy as jnp\n'), ((5767, 5812), 'jax.lax.scan', 'jax.lax.scan', (['next_sample_step'], {'init': 'x', 'xs': 'ts'}), '(next_sample_step, init=x, xs=ts)\n', (5779, 5812), False, 'import jax\n'), ((5994, 6052), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.integer_linspace', 'ardm_utils.integer_linspace', (['(0)', 'self.num_steps', '(budget + 1)'], {}), '(0, self.num_steps, budget + 1)\n', (6021, 6052), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((6365, 6408), 'absl.logging.info', 'logging.info', (['"""Sampling with naive policy."""'], {}), "('Sampling with naive policy.')\n", (6377, 6408), False, 'from absl import logging\n'), ((6676, 6724), 'absl.logging.info', 'logging.info', (['"""Sampling from model (quickly)..."""'], {}), "('Sampling from model (quickly)...')\n", (6688, 6724), False, 'from absl import logging\n'), ((7451, 7472), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (7467, 7472), False, 'import jax\n'), ((7486, 7573), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'per_device_batch_size', 'self.num_steps'], {}), '(rng_perm, per_device_batch_size, self.\n num_steps)\n', (7519, 7573), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((7737, 7850), 'jax.numpy.full', 'jnp.full', (['(per_device_batch_size, *self.config.data_shape)'], {'fill_value': 'self.absorbing_state', 'dtype': 'jnp.int32'}), '((per_device_batch_size, *self.config.data_shape), fill_value=self.\n absorbing_state, dtype=jnp.int32)\n', (7745, 7850), True, 'import jax.numpy as jnp\n'), ((8414, 8453), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 'left_t'}), '(batch_size, fill_value=left_t)\n', (8422, 8453), True, 'import jax.numpy as jnp\n'), ((8468, 8508), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 'right_t'}), '(batch_size, fill_value=right_t)\n', (8476, 8508), True, 'import jax.numpy as jnp\n'), ((8550, 8649), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selections_for_sigma_and_range', 'ardm_utils.get_selections_for_sigma_and_range', (['sigmas', 'left_t', 'right_t', 'self.config.data_shape'], {}), '(sigmas, left_t, right_t, self\n .config.data_shape)\n', (8595, 8649), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((8878, 8903), 'jax.numpy.asarray', 'jnp.asarray', (['x', 'jnp.int32'], {}), '(x, jnp.int32)\n', (8889, 8903), True, 'import jax.numpy as jnp\n'), ((9066, 9100), 'jax.numpy.full', 'jnp.full', (['batch_size'], {'fill_value': 't'}), '(batch_size, fill_value=t)\n', (9074, 9100), True, 'import jax.numpy as jnp\n'), ((9142, 9228), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_selection_for_sigma_and_t', 'ardm_utils.get_selection_for_sigma_and_t', (['sigmas', 't_batch', 'self.config.data_shape'], {}), '(sigmas, t_batch, self.config.\n data_shape)\n', (9182, 9228), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((9601, 9626), 'jax.numpy.asarray', 'jnp.asarray', (['x', 'jnp.int32'], {}), '(x, jnp.int32)\n', (9612, 9626), True, 'import jax.numpy as jnp\n'), ((10457, 10532), 'autoregressive_diffusion.model.distributions.SoftmaxCategorical', 'distributions.SoftmaxCategorical', (['config.data_shape[-1]', 'config.num_classes'], {}), '(config.data_shape[-1], config.num_classes)\n', (10489, 10532), False, 'from autoregressive_diffusion.model import distributions\n'), ((1803, 1834), 'autoregressive_diffusion.utils.util_fns.sum_except_batch', 'util_fns.sum_except_batch', (['loss'], {}), '(loss)\n', (1828, 1834), False, 'from autoregressive_diffusion.utils import util_fns\n'), ((2467, 2494), 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (2477, 2494), True, 'import jax.numpy as jnp\n'), ((3092, 3158), 'absl.logging.info', 'logging.info', (['"""Log-likelihood for a random-order ARM XLNet style."""'], {}), "('Log-likelihood for a random-order ARM XLNet style.')\n", (3104, 3158), False, 'from absl import logging\n'), ((3181, 3202), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (3197, 3202), False, 'import jax\n'), ((3224, 3295), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'batch_size', 'self.num_steps'], {}), '(rng_perm, batch_size, self.num_steps)\n', (3257, 3295), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((3367, 3417), 'absl.logging.info', 'logging.info', (['"""Log-likelihood for a standard ARM."""'], {}), "('Log-likelihood for a standard ARM.')\n", (3379, 3417), False, 'from absl import logging\n'), ((3654, 3682), 'numpy.prod', 'np.prod', (['net_out.shape[1:-1]'], {}), '(net_out.shape[1:-1])\n', (3661, 3682), True, 'import numpy as np\n'), ((3802, 3811), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3808, 3811), True, 'import numpy as np\n'), ((4750, 4777), 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), "('batch')\n", (4768, 4777), False, 'import jax\n'), ((4876, 4900), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (4898, 4900), False, 'import jax\n'), ((5015, 5036), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (5031, 5036), False, 'import jax\n'), ((5052, 5139), 'autoregressive_diffusion.model.autoregressive_diffusion.ardm_utils.get_batch_permutations', 'ardm_utils.get_batch_permutations', (['rng_perm', 'per_device_batch_size', 'self.num_steps'], {}), '(rng_perm, per_device_batch_size, self.\n num_steps)\n', (5085, 5139), False, 'from autoregressive_diffusion.model.autoregressive_diffusion import ardm_utils\n'), ((5312, 5369), 'jax.numpy.repeat', 'jnp.repeat', (['orders'], {'repeats': 'per_device_batch_size', 'axis': '(0)'}), '(orders, repeats=per_device_batch_size, axis=0)\n', (5322, 5369), True, 'import jax.numpy as jnp\n'), ((7279, 7306), 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), "('batch')\n", (7297, 7306), False, 'import jax\n'), ((7405, 7429), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (7427, 7429), False, 'import jax\n'), ((10195, 10221), 'numpy.prod', 'np.prod', (['config.data_shape'], {}), '(config.data_shape)\n', (10202, 10221), True, 'import numpy as np\n'), ((1726, 1748), 'flax.linen.log_softmax', 'nn.log_softmax', (['logits'], {}), '(logits)\n', (1740, 1748), True, 'from flax import linen as nn\n'), ((4804, 4828), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (4826, 4828), False, 'import jax\n'), ((5258, 5287), 'jax.numpy.arange', 'jnp.arange', (['(0)', 'self.num_steps'], {}), '(0, self.num_steps)\n', (5268, 5287), True, 'import jax.numpy as jnp\n'), ((5628, 5654), 'jax.random.fold_in', 'jax.random.fold_in', (['rng', 't'], {}), '(rng, t)\n', (5646, 5654), False, 'import jax\n'), ((7333, 7357), 'jax.local_device_count', 'jax.local_device_count', ([], {}), '()\n', (7355, 7357), False, 'import jax\n'), ((7673, 7717), 'jax.numpy.array', 'jnp.array', (['[self.num_steps]'], {'dtype': 'jnp.int32'}), '([self.num_steps], dtype=jnp.int32)\n', (7682, 7717), True, 'import jax.numpy as jnp\n'), ((8042, 8070), 'jax.random.fold_in', 'jax.random.fold_in', (['rng', 'idx'], {}), '(rng, idx)\n', (8060, 8070), False, 'import jax\n')]
|
# Copyright 2018 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import six
from oslo_log import log as logging
from octavia_f5.common import constants
from octavia_f5.restclient import as3exceptions
LOG = logging.getLogger(__name__)
def unpack(obj):
if isinstance(obj, BaseDescription):
return obj.to_dict()
elif isinstance(obj, list):
return [unpack(o) for o in obj]
else:
return obj
class BaseDescription(object):
def __init__(self, data):
for item in data:
if item == 'self':
continue
if item.startswith('_'):
continue
if item == 'kwargs':
self.__dict__.update(data['kwargs'])
continue
self.__dict__.update({item: data[item]})
def __hash__(self):
return hash(frozenset(self.to_dict().items()))
def __eq__(self, other):
if isinstance(other, BaseDescription):
return self.to_dict() == other.to_dict()
return False
def require(self, key):
if getattr(self, key, None) is None:
raise as3exceptions.RequiredKeyMissingException(key)
def to_dict(self):
data = self.__dict__.copy()
for key, value in six.iteritems(data):
if isinstance(value, BaseDescription):
data[key] = value.to_dict()
elif isinstance(value, list):
data[key] = []
for item in self.__dict__[key]:
data[key].append(unpack(item))
return data
def to_json(self):
return json.dumps(self.to_dict(), sort_keys=True,
indent=4, separators=(',', ': '))
class AS3(BaseDescription):
ACTIONS = ['deploy', 'dry-run', 'patch', 'redeploy', 'retrieve', 'remove']
LOG_MAP = {
logging.CRITICAL: 'critical',
logging.FATAL: 'emergency',
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.NOTSET: 'warning',
logging.TRACE: 'debug'
}
def __init__(self, persist=True, action='deploy', _log_level=logging.WARNING, **kwargs):
if action not in self.ACTIONS:
raise as3exceptions.TypeNotSupportedException
super(AS3, self).__init__(locals())
setattr(self, 'class', 'AS3')
setattr(self, 'logLevel', self.LOG_MAP.get(_log_level, 'warning'))
setattr(self, 'trace', _log_level == logging.TRACE)
def set_action(self, action):
if action not in self.ACTIONS:
raise as3exceptions.TypeNotSupportedException
setattr(self, 'action', action)
def set_adc(self, adc):
setattr(self, 'declaration', adc)
def set_sync_to_group(self, group):
setattr(self, 'syncToGroup', group)
def set_bigip_target_host(self, host):
setattr(self, 'targetHost', host)
def set_target_username(self, username):
setattr(self, 'targetUsername', username)
def set_target_passphrase(self, passphrase):
setattr(self, 'targetPassphrase', passphrase)
def set_target_tokens(self, tokens):
setattr(self, 'targetTokens', tokens)
class ADC(BaseDescription):
def __init__(self, schemaVersion='3.19.0', updateMode='selective', **kwargs): # noqa
super(ADC, self).__init__(locals())
setattr(self, 'class', 'ADC')
self.require('id')
self.require('label')
def set_tenant(self, name, tenant):
setattr(self, name, tenant)
class Tenant(BaseDescription):
def __init__(self, **kwargs):
super(Tenant, self).__init__(locals())
setattr(self, 'class', 'Tenant')
def add_application(self, name, application):
setattr(self, name, application)
class Application(BaseDescription):
def __init__(self, template, **kwargs):
if template not in constants.SUPPORTED_APPLICATION_TEMPLATES:
raise as3exceptions.TypeNotSupportedException
super(Application, self).__init__(locals())
setattr(self, 'class', 'Application')
def set_service_main(self, service):
self.serviceMain = service # noqa
def add_entities(self, entities):
for name, entity in entities:
setattr(self, name, entity)
def add_endpoint_policy(self, name, policy_endpoint):
if hasattr(self, name):
raise as3exceptions.DuplicatedKeyException
setattr(self, name, policy_endpoint)
def add_tls_server(self, name, tls_server):
setattr(self, name, tls_server)
def add_certificate(self, name, certificate):
setattr(self, name, certificate)
class Service(BaseDescription):
def __init__(self, _servicetype, virtualAddresses=None, # noqa
virtualPort=None, **kwargs): # noqa
if _servicetype not in constants.SUPPORTED_SERVICES:
raise as3exceptions.TypeNotSupportedException
super(Service, self).__init__(locals())
setattr(self, 'class', _servicetype)
class ServiceAddress(BaseDescription):
def __init__(self, **kwargs):
super(ServiceAddress, self).__init__(locals())
setattr(self, 'class', 'Service_Address')
class Pool(BaseDescription):
def __init__(self, **kwargs):
super(Pool, self).__init__(locals())
setattr(self, 'class', 'Pool')
class Member(BaseDescription):
def __init__(self, enable=True, **kwargs):
super(Member, self).__init__(locals())
self.require('servicePort')
self.require('serverAddresses')
class Monitor(BaseDescription):
def __init__(self, **kwargs):
super(Monitor, self).__init__(locals())
setattr(self, 'class', 'Monitor')
class BigIP(BaseDescription):
def __init__(self, bigip, _common=True):
super(BigIP, self).__init__(locals())
self.require('bigip')
if _common:
setattr(self, 'bigip', '/Common/{}'.format(bigip))
class Service_Generic_profileTCP(BaseDescription):
def __init__(self, ingress, egress):
super(Service_Generic_profileTCP, self).__init__(locals())
self.require('ingress')
self.require('egress')
class IRule(BaseDescription):
def __init__(self, iRule, **kwargs):
super(IRule, self).__init__(locals())
setattr(self, 'class', 'iRule')
setattr(self, 'iRule', iRule)
class Persist(BaseDescription):
def __init__(self, **kwargs):
super(Persist, self).__init__(locals())
setattr(self, 'class', 'Persist')
class Endpoint_Policy(BaseDescription):
STRATEGY = ['all-match', 'best-match', 'first-match', 'custom']
def __init__(self, strategy, **kwargs):
super(Endpoint_Policy, self).__init__(locals())
setattr(self, 'class', 'Endpoint_Policy')
if strategy not in self.STRATEGY:
raise as3exceptions.TypeNotSupportedException
class Endpoint_Policy_Rule(BaseDescription):
def __init__(self, **kwargs):
super(Endpoint_Policy_Rule, self).__init__(locals())
class Policy_Condition(BaseDescription):
TYPE = ['httpHeader', 'httpUri', 'httpCookie', 'sslExtension']
def __init__(self, type, **kwargs):
if type not in self.TYPE:
raise as3exceptions.TypeNotSupportedException
super(Policy_Condition, self).__init__(locals())
class Policy_Action(BaseDescription):
def __init__(self, **kwargs):
super(Policy_Action, self).__init__(locals())
class Policy_Compare_String(BaseDescription):
def __init__(self, values, operand='equals', _case_sensitive=False):
super(Policy_Compare_String, self).__init__(locals())
setattr(self, 'caseSensitive', _case_sensitive)
class Pointer(BaseDescription):
def __init__(self, use):
super(Pointer, self).__init__(locals())
self.require('use')
class TLS_Server(BaseDescription):
def __init__(self, **kwargs):
super(TLS_Server, self).__init__(locals())
setattr(self, 'class', 'TLS_Server')
class TLS_Client(BaseDescription):
def __init__(self, **kwargs):
super(TLS_Client, self).__init__(locals())
setattr(self, 'class', 'TLS_Client')
class Certificate(BaseDescription):
def __init__(self, **kwargs):
super(Certificate, self).__init__(locals())
setattr(self, 'class', 'Certificate')
self.require('certificate')
class CA_Bundle(BaseDescription):
def __init__(self, **kwargs):
super(CA_Bundle, self).__init__(locals())
setattr(self, 'class', 'CA_Bundle')
self.require('bundle')
class HTTP_Profile(BaseDescription):
def __init__(self, **kwargs):
super(HTTP_Profile, self).__init__(locals())
setattr(self, 'class', 'HTTP_Profile')
|
[
"oslo_log.log.getLogger",
"six.iteritems",
"octavia_f5.restclient.as3exceptions.RequiredKeyMissingException"
] |
[((723, 750), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (740, 750), True, 'from oslo_log import log as logging\n'), ((1769, 1788), 'six.iteritems', 'six.iteritems', (['data'], {}), '(data)\n', (1782, 1788), False, 'import six\n'), ((1636, 1682), 'octavia_f5.restclient.as3exceptions.RequiredKeyMissingException', 'as3exceptions.RequiredKeyMissingException', (['key'], {}), '(key)\n', (1677, 1682), False, 'from octavia_f5.restclient import as3exceptions\n')]
|
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
MarketServiceUpAndDown.py
This Python class contains methods and attributes that help model market
services that provide service through discharging more OR charging less
relative to the power set points.
"""
from storagevet.ValueStreams.ValueStream import ValueStream
import cvxpy as cvx
import pandas as pd
import numpy as np
import storagevet.Library as Lib
class MarketServiceUpAndDown(ValueStream):
""" A market service that can provide services in the "up" and "down"
directions
"""
def __init__(self, name, full_name, params):
""" Generates the objective function, finds and creates constraints.
Args:
name (str): abbreviated name
full_name (str): the expanded name of the service
params (Dict): input parameters
"""
ValueStream.__init__(self, name, params)
self.full_name = full_name
self.combined_market = params['CombinedMarket']
self.duration = params['duration']
self.energy_growth = params['energyprice_growth']/100
self.eod_avg = params['eod']
self.eou_avg = params['eou']
self.growth = params['growth']/100
self.price_down = params['regd_price']
self.price_up = params['regu_price']
self.price_energy = params['energy_price']
self.variable_names = {'up_ch', 'up_dis', 'down_ch', 'down_dis'}
self.variables_df = pd.DataFrame(columns=self.variable_names)
def grow_drop_data(self, years, frequency, load_growth):
""" Adds data by growing the given data OR drops any extra data that
might have slipped in. Update variable that hold timeseries data
after adding growth data. These method should be called after
add_growth_data and before the optimization is run.
Args:
years (List): list of years for which analysis will occur on
frequency (str): period frequency of the timeseries data
load_growth (float): percent/ decimal value of the growth rate of
loads in this simulation
"""
self.price_energy = Lib.fill_extra_data(self.price_energy, years,
self.energy_growth, frequency)
self.price_energy = Lib.drop_extra_data(self.price_energy, years)
self.price_up = Lib.fill_extra_data(self.price_up, years,
self.growth, frequency)
self.price_up = Lib.drop_extra_data(self.price_up, years)
self.price_down = Lib.fill_extra_data(self.price_down, years,
self.growth, frequency)
self.price_down = Lib.drop_extra_data(self.price_down, years)
def initialize_variables(self, size):
""" Updates the optimization variable attribute with new optimization
variables of size SIZE
Variables added:
up_ch (Variable): A cvxpy variable for freq regulation capacity to
increase charging power
down_ch (Variable): A cvxpy variable for freq regulation capacity to
decrease charging power
up_dis (Variable): A cvxpy variable for freq regulation capacity to
increase discharging power
down_dis (Variable): A cvxpy variable for freq regulation capacity to
decrease discharging power
Args:
size (Int): Length of optimization variables to create
Returns:
Dictionary of optimization variables
"""
self.variables = {
'up_ch': cvx.Variable(shape=size, name=f'{self.name}_up_c'),
'down_ch': cvx.Variable(shape=size, name=f'{self.name}_regd_c'),
'up_dis': cvx.Variable(shape=size, name=f'{self.name}_up_dis'),
'down_dis': cvx.Variable(shape=size, name=f'{self.name}_regd_d')
}
def objective_function(self, mask, load_sum, tot_variable_gen,
generator_out_sum, net_ess_power, annuity_scalar=1):
""" Generates the full objective function, including the optimization
variables.
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent
generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional
generation within the system
net_ess_power (list, Expression): the sum of the net power of all
the ESS in the system. [= charge - discharge]
annuity_scalar (float): a scalar value to be multiplied by any
yearly cost or benefit that helps capture the cost/benefit over
the entire project lifetime (only to be set iff sizing)
Returns:
A dictionary with the portion of the objective function that it
affects, labeled by the expression's key. Default is {}.
"""
# pay for reg down energy, get paid for reg up energy
# paid revenue for capacity to do both
size = sum(mask)
p_regu = cvx.Parameter(size, value=self.price_up.loc[mask].values,
name=f'{self.name}_p_regu')
p_regd = cvx.Parameter(size, value=self.price_down.loc[mask].values,
name=f'{self.name}_p_regd')
p_ene = cvx.Parameter(size, value=self.price_energy.loc[mask].values,
name=f'{self.name}_price')
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
# REGULATION DOWN: PAYMENT
regdown_disch_payment \
= cvx.sum(self.variables['down_dis'] * -p_regd) * annuity_scalar
regdown_charge_payment \
= cvx.sum(self.variables['down_ch'] * -p_regd) * annuity_scalar
reg_down_tot = regdown_charge_payment + regdown_disch_payment
# REGULATION UP: PAYMENT
regup_disch_payment \
= cvx.sum(self.variables['up_dis'] * -p_regu) * annuity_scalar
regup_charge_payment \
= cvx.sum(self.variables['up_ch'] * -p_regu) * annuity_scalar
reg_up_tot = regup_charge_payment + regup_disch_payment
# REGULATION UP & DOWN: ENERGY SETTLEMENT
regdown_disch_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['down_dis'],
p_ene),
eod)) * self.dt * annuity_scalar
regdown_charge_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['down_ch'],
p_ene),
eod)) * self.dt * annuity_scalar
e_settlement = regdown_disch_settlement + regdown_charge_settlement
regup_disch_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['up_dis'],
-p_ene),
eou)) * self.dt * annuity_scalar
regup_charge_settlement \
= cvx.sum(cvx.multiply(cvx.multiply(self.variables['up_ch'],
-p_ene),
eou)) * self.dt * annuity_scalar
e_settlement += regup_disch_settlement + regup_charge_settlement
return {f'{self.name}_regup_prof': reg_up_tot,
f'{self.name}_regdown_prof': reg_down_tot,
f'{self.name}_energy_settlement': e_settlement}
def get_energy_option_up(self, mask):
""" transform the energy option up into a n x 1 vector
Args:
mask:
Returns: a CVXPY vector
"""
return cvx.promote(self.eou_avg, mask.loc[mask].shape)
def get_energy_option_down(self, mask):
""" transform the energy option down into a n x 1 vector
Args:
mask:
Returns: a CVXPY vector
"""
return cvx.promote(self.eod_avg, mask.loc[mask].shape)
def constraints(self, mask, load_sum, tot_variable_gen, generator_out_sum,
net_ess_power, combined_rating):
"""build constraint list method for the optimization engine
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
tot_variable_gen (Expression): the sum of the variable/intermittent
generation sources
load_sum (list, Expression): the sum of load within the system
generator_out_sum (list, Expression): the sum of conventional
generation within the system
net_ess_power (list, Expression): the sum of the net power of all
the ESS in the system. flow out into the grid is negative
combined_rating (Dictionary): the combined rating of each DER class
type
Returns:
An list of constraints for the optimization variables added to
the system of equations
"""
constraint_list = []
constraint_list += [cvx.NonPos(-self.variables['up_ch'])]
constraint_list += [cvx.NonPos(-self.variables['down_ch'])]
constraint_list += [cvx.NonPos(-self.variables['up_dis'])]
constraint_list += [cvx.NonPos(-self.variables['down_dis'])]
if self.combined_market:
constraint_list += [
cvx.Zero(self.variables['down_dis'] + self.variables['down_ch'] -
self.variables['up_dis'] - self.variables['up_ch'])
]
return constraint_list
def p_reservation_charge_up(self, mask):
""" the amount of charging power in the up direction (supplying power
up into the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['up_ch']
def p_reservation_charge_down(self, mask):
""" the amount of charging power in the up direction (pulling power
down from the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['down_ch']
def p_reservation_discharge_up(self, mask):
""" the amount of charging power in the up direction (supplying power
up into the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['up_dis']
def p_reservation_discharge_down(self, mask):
""" the amount of charging power in the up direction (pulling power
down from the grid) that needs to be reserved for this value stream
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: CVXPY parameter/variable
"""
return self.variables['down_dis']
def uenergy_option_stored(self, mask):
""" the deviation in energy due to changes in charge
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns:
"""
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
e_ch_less = cvx.multiply(self.variables['up_ch'], eou) * self.dt
e_ch_more = cvx.multiply(self.variables['down_ch'], eod) * self.dt
return e_ch_less - e_ch_more
def uenergy_option_provided(self, mask):
""" the deviation in energy due to changes in discharge
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns:
"""
eou = self.get_energy_option_up(mask)
eod = self.get_energy_option_down(mask)
e_dis_less = cvx.multiply(self.variables['down_dis'], eod) * self.dt
e_dis_more = cvx.multiply(self.variables['up_dis'], eou) * self.dt
return e_dis_more - e_dis_less
def worst_case_uenergy_stored(self, mask):
""" the amount of energy, from the current SOE that needs to be
reserved for this value stream to prevent any violates between the
steps in time that are not catpured in our timeseries.
Note: stored energy should be positive and provided energy should be
negative
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: tuple (stored, provided),
where the first value is the case where the systems would end up
with more energy than expected and the second corresponds to the
case where the systems would end up with less energy than expected
"""
stored \
= self.variables['down_ch'] * self.duration \
+ self.variables['down_dis'] * self.duration
return stored
def worst_case_uenergy_provided(self, mask):
""" the amount of energy, from the current SOE that needs to be
reserved for this value stream to prevent any violates between the
steps in time that are not catpured in our timeseries.
Note: stored energy should be positive and provided energy should be
negative
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns: tuple (stored, provided),
where the first value is the case where the systems would end up
with more energy than expected and the second corresponds to the
case where the systems would end up with less energy than expected
"""
provided \
= self.variables['up_ch'] * -self.duration \
+ self.variables['up_dis'] * -self.duration
return provided
def timeseries_report(self):
""" Summaries the optimization results for this Value Stream.
Returns: A timeseries dataframe with user-friendly column headers that
summarize the results pertaining to this instance
"""
report = pd.DataFrame(index=self.price_energy.index)
# GIVEN
report.loc[:, f"{self.name} Up Price ($/kW)"] \
= self.price_up
report.loc[:, f"{self.name} Down Price ($/kW)"] \
= self.price_down
report.loc[:, f"{self.name} Energy Settlement Price ($/kWh)"] = \
self.price_energy
# OPTIMIZATION VARIABLES
report.loc[:, f'{self.full_name} Down (Charging) (kW)'] \
= self.variables_df['down_ch']
report.loc[:, f'{self.full_name} Down (Discharging) (kW)'] \
= self.variables_df['down_dis']
report.loc[:, f'{self.full_name} Up (Charging) (kW)'] \
= self.variables_df['up_ch']
report.loc[:, f'{self.full_name} Up (Discharging) (kW)'] \
= self.variables_df['up_dis']
# CALCULATED EXPRESSIONS (ENERGY THROUGH-PUTS)
e_thru_down_dis = np.multiply(self.eod_avg,
self.variables_df['down_dis']) * self.dt
e_thru_down_ch = np.multiply(self.eod_avg,
self.variables_df['down_ch']) * self.dt
e_thru_up_dis = -np.multiply(self.eou_avg,
self.variables_df['up_dis']) * self.dt
e_thru_up_ch = -np.multiply(self.eou_avg,
self.variables_df['up_ch']) * self.dt
uenergy_down = e_thru_down_dis + e_thru_down_ch
uenergy_up = e_thru_up_dis + e_thru_up_ch
column_start = f"{self.name} Energy Throughput"
report.loc[:, f"{column_start} (kWh)"] = uenergy_down + uenergy_up
report.loc[:, f"{column_start} Up (Charging) (kWh)"] = e_thru_up_ch
report.loc[:, f"{column_start} Up (Discharging) (kWh)"] = e_thru_up_dis
report.loc[:, f"{column_start} Down (Charging) (kWh)"] = e_thru_down_ch
report.loc[:, f"{column_start} Down (Discharging) (kWh)"] \
= e_thru_down_dis
return report
def proforma_report(self, opt_years, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
opt_years (list): list of years the optimization problem ran for
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame): DataFrame with all the optimization variable solutions
Returns: A DateFrame (of with each year in opt_year as the
index and the corresponding value this stream provided)
"""
proforma = super().proforma_report(opt_years, apply_inflation_rate_func,
fill_forward_func, results)
pref = self.full_name
reg_up =\
results.loc[:, f'{pref} Up (Charging) (kW)'] \
+ results.loc[:, f'{pref} Up (Discharging) (kW)']
regulation_up_prof = np.multiply(reg_up, self.price_up)
reg_down = \
results.loc[:, f'{pref} Down (Charging) (kW)'] \
+ results.loc[:, f'{pref} Down (Discharging) (kW)']
regulation_down_prof = np.multiply(reg_down, self.price_down)
energy_throughput \
= results.loc[:, f"{self.name} Energy Throughput (kWh)"]
energy_through_prof = np.multiply(energy_throughput, self.price_energy)
# combine all potential value streams into one df for faster
# splicing into years
fr_results = pd.DataFrame({'E': energy_through_prof,
'RU': regulation_up_prof,
'RD': regulation_down_prof},
index=results.index)
market_results_only = proforma.copy(deep=True)
for year in opt_years:
year_subset = fr_results[fr_results.index.year == year]
yr_pd = pd.Period(year=year, freq='y')
proforma.loc[yr_pd, f'{self.name} Energy Throughput'] \
= -year_subset['E'].sum()
market_results_only.loc[yr_pd, f'{pref} Up'] \
= year_subset['RU'].sum()
market_results_only.loc[yr_pd, f'{pref} Down'] \
= year_subset['RD'].sum()
# forward fill growth columns with inflation at their corresponding growth rates
market_results_only = fill_forward_func(market_results_only, self.growth)
proforma = fill_forward_func(proforma, self.energy_growth)
# concat the two together
proforma = pd.concat([proforma, market_results_only], axis=1)
return proforma
|
[
"pandas.DataFrame",
"numpy.multiply",
"cvxpy.Parameter",
"cvxpy.multiply",
"cvxpy.Zero",
"storagevet.Library.drop_extra_data",
"storagevet.ValueStreams.ValueStream.ValueStream.__init__",
"cvxpy.promote",
"cvxpy.NonPos",
"cvxpy.sum",
"storagevet.Library.fill_extra_data",
"pandas.Period",
"cvxpy.Variable",
"pandas.concat"
] |
[((2376, 2416), 'storagevet.ValueStreams.ValueStream.ValueStream.__init__', 'ValueStream.__init__', (['self', 'name', 'params'], {}), '(self, name, params)\n', (2396, 2416), False, 'from storagevet.ValueStreams.ValueStream import ValueStream\n'), ((2974, 3015), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.variable_names'}), '(columns=self.variable_names)\n', (2986, 3015), True, 'import pandas as pd\n'), ((3675, 3751), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_energy', 'years', 'self.energy_growth', 'frequency'], {}), '(self.price_energy, years, self.energy_growth, frequency)\n', (3694, 3751), True, 'import storagevet.Library as Lib\n'), ((3828, 3873), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_energy', 'years'], {}), '(self.price_energy, years)\n', (3847, 3873), True, 'import storagevet.Library as Lib\n'), ((3899, 3964), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_up', 'years', 'self.growth', 'frequency'], {}), '(self.price_up, years, self.growth, frequency)\n', (3918, 3964), True, 'import storagevet.Library as Lib\n'), ((4033, 4074), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_up', 'years'], {}), '(self.price_up, years)\n', (4052, 4074), True, 'import storagevet.Library as Lib\n'), ((4102, 4169), 'storagevet.Library.fill_extra_data', 'Lib.fill_extra_data', (['self.price_down', 'years', 'self.growth', 'frequency'], {}), '(self.price_down, years, self.growth, frequency)\n', (4121, 4169), True, 'import storagevet.Library as Lib\n'), ((4242, 4285), 'storagevet.Library.drop_extra_data', 'Lib.drop_extra_data', (['self.price_down', 'years'], {}), '(self.price_down, years)\n', (4261, 4285), True, 'import storagevet.Library as Lib\n'), ((6873, 6963), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_up.loc[mask].values', 'name': 'f"""{self.name}_p_regu"""'}), "(size, value=self.price_up.loc[mask].values, name=\n f'{self.name}_p_regu')\n", (6886, 6963), True, 'import cvxpy as cvx\n'), ((7007, 7099), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_down.loc[mask].values', 'name': 'f"""{self.name}_p_regd"""'}), "(size, value=self.price_down.loc[mask].values, name=\n f'{self.name}_p_regd')\n", (7020, 7099), True, 'import cvxpy as cvx\n'), ((7142, 7235), 'cvxpy.Parameter', 'cvx.Parameter', (['size'], {'value': 'self.price_energy.loc[mask].values', 'name': 'f"""{self.name}_price"""'}), "(size, value=self.price_energy.loc[mask].values, name=\n f'{self.name}_price')\n", (7155, 7235), True, 'import cvxpy as cvx\n'), ((9500, 9547), 'cvxpy.promote', 'cvx.promote', (['self.eou_avg', 'mask.loc[mask].shape'], {}), '(self.eou_avg, mask.loc[mask].shape)\n', (9511, 9547), True, 'import cvxpy as cvx\n'), ((9752, 9799), 'cvxpy.promote', 'cvx.promote', (['self.eod_avg', 'mask.loc[mask].shape'], {}), '(self.eod_avg, mask.loc[mask].shape)\n', (9763, 9799), True, 'import cvxpy as cvx\n'), ((16674, 16717), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'self.price_energy.index'}), '(index=self.price_energy.index)\n', (16686, 16717), True, 'import pandas as pd\n'), ((19578, 19612), 'numpy.multiply', 'np.multiply', (['reg_up', 'self.price_up'], {}), '(reg_up, self.price_up)\n', (19589, 19612), True, 'import numpy as np\n'), ((19791, 19829), 'numpy.multiply', 'np.multiply', (['reg_down', 'self.price_down'], {}), '(reg_down, self.price_down)\n', (19802, 19829), True, 'import numpy as np\n'), ((19958, 20007), 'numpy.multiply', 'np.multiply', (['energy_throughput', 'self.price_energy'], {}), '(energy_throughput, self.price_energy)\n', (19969, 20007), True, 'import numpy as np\n'), ((20131, 20250), 'pandas.DataFrame', 'pd.DataFrame', (["{'E': energy_through_prof, 'RU': regulation_up_prof, 'RD': regulation_down_prof\n }"], {'index': 'results.index'}), "({'E': energy_through_prof, 'RU': regulation_up_prof, 'RD':\n regulation_down_prof}, index=results.index)\n", (20143, 20250), True, 'import pandas as pd\n'), ((21161, 21211), 'pandas.concat', 'pd.concat', (['[proforma, market_results_only]'], {'axis': '(1)'}), '([proforma, market_results_only], axis=1)\n', (21170, 21211), True, 'import pandas as pd\n'), ((5161, 5211), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_up_c"""'}), "(shape=size, name=f'{self.name}_up_c')\n", (5173, 5211), True, 'import cvxpy as cvx\n'), ((5236, 5288), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_regd_c"""'}), "(shape=size, name=f'{self.name}_regd_c')\n", (5248, 5288), True, 'import cvxpy as cvx\n'), ((5312, 5364), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_up_dis"""'}), "(shape=size, name=f'{self.name}_up_dis')\n", (5324, 5364), True, 'import cvxpy as cvx\n'), ((5390, 5442), 'cvxpy.Variable', 'cvx.Variable', ([], {'shape': 'size', 'name': 'f"""{self.name}_regd_d"""'}), "(shape=size, name=f'{self.name}_regd_d')\n", (5402, 5442), True, 'import cvxpy as cvx\n'), ((7436, 7481), 'cvxpy.sum', 'cvx.sum', (["(self.variables['down_dis'] * -p_regd)"], {}), "(self.variables['down_dis'] * -p_regd)\n", (7443, 7481), True, 'import cvxpy as cvx\n'), ((7546, 7590), 'cvxpy.sum', 'cvx.sum', (["(self.variables['down_ch'] * -p_regd)"], {}), "(self.variables['down_ch'] * -p_regd)\n", (7553, 7590), True, 'import cvxpy as cvx\n'), ((7756, 7799), 'cvxpy.sum', 'cvx.sum', (["(self.variables['up_dis'] * -p_regu)"], {}), "(self.variables['up_dis'] * -p_regu)\n", (7763, 7799), True, 'import cvxpy as cvx\n'), ((7862, 7904), 'cvxpy.sum', 'cvx.sum', (["(self.variables['up_ch'] * -p_regu)"], {}), "(self.variables['up_ch'] * -p_regu)\n", (7869, 7904), True, 'import cvxpy as cvx\n'), ((10927, 10963), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['up_ch'])"], {}), "(-self.variables['up_ch'])\n", (10937, 10963), True, 'import cvxpy as cvx\n'), ((10993, 11031), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['down_ch'])"], {}), "(-self.variables['down_ch'])\n", (11003, 11031), True, 'import cvxpy as cvx\n'), ((11061, 11098), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['up_dis'])"], {}), "(-self.variables['up_dis'])\n", (11071, 11098), True, 'import cvxpy as cvx\n'), ((11128, 11167), 'cvxpy.NonPos', 'cvx.NonPos', (["(-self.variables['down_dis'])"], {}), "(-self.variables['down_dis'])\n", (11138, 11167), True, 'import cvxpy as cvx\n'), ((13708, 13750), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_ch']", 'eou'], {}), "(self.variables['up_ch'], eou)\n", (13720, 13750), True, 'import cvxpy as cvx\n'), ((13781, 13825), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_ch']", 'eod'], {}), "(self.variables['down_ch'], eod)\n", (13793, 13825), True, 'import cvxpy as cvx\n'), ((14291, 14336), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_dis']", 'eod'], {}), "(self.variables['down_dis'], eod)\n", (14303, 14336), True, 'import cvxpy as cvx\n'), ((14368, 14411), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_dis']", 'eou'], {}), "(self.variables['up_dis'], eou)\n", (14380, 14411), True, 'import cvxpy as cvx\n'), ((17562, 17618), 'numpy.multiply', 'np.multiply', (['self.eod_avg', "self.variables_df['down_dis']"], {}), "(self.eod_avg, self.variables_df['down_dis'])\n", (17573, 17618), True, 'import numpy as np\n'), ((17692, 17747), 'numpy.multiply', 'np.multiply', (['self.eod_avg', "self.variables_df['down_ch']"], {}), "(self.eod_avg, self.variables_df['down_ch'])\n", (17703, 17747), True, 'import numpy as np\n'), ((20525, 20555), 'pandas.Period', 'pd.Period', ([], {'year': 'year', 'freq': '"""y"""'}), "(year=year, freq='y')\n", (20534, 20555), True, 'import pandas as pd\n'), ((11251, 11373), 'cvxpy.Zero', 'cvx.Zero', (["(self.variables['down_dis'] + self.variables['down_ch'] - self.variables[\n 'up_dis'] - self.variables['up_ch'])"], {}), "(self.variables['down_dis'] + self.variables['down_ch'] - self.\n variables['up_dis'] - self.variables['up_ch'])\n", (11259, 11373), True, 'import cvxpy as cvx\n'), ((17820, 17874), 'numpy.multiply', 'np.multiply', (['self.eou_avg', "self.variables_df['up_dis']"], {}), "(self.eou_avg, self.variables_df['up_dis'])\n", (17831, 17874), True, 'import numpy as np\n'), ((17946, 17999), 'numpy.multiply', 'np.multiply', (['self.eou_avg', "self.variables_df['up_ch']"], {}), "(self.eou_avg, self.variables_df['up_ch'])\n", (17957, 17999), True, 'import numpy as np\n'), ((8107, 8154), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_dis']", 'p_ene'], {}), "(self.variables['down_dis'], p_ene)\n", (8119, 8154), True, 'import cvxpy as cvx\n'), ((8343, 8389), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['down_ch']", 'p_ene'], {}), "(self.variables['down_ch'], p_ene)\n", (8355, 8389), True, 'import cvxpy as cvx\n'), ((8652, 8698), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_dis']", '(-p_ene)'], {}), "(self.variables['up_dis'], -p_ene)\n", (8664, 8698), True, 'import cvxpy as cvx\n'), ((8885, 8930), 'cvxpy.multiply', 'cvx.multiply', (["self.variables['up_ch']", '(-p_ene)'], {}), "(self.variables['up_ch'], -p_ene)\n", (8897, 8930), True, 'import cvxpy as cvx\n')]
|
import numpy as np
import tensorflow as tf
# the activation function
def g_1(x):
assert len(x.shape) == 1
rand = tf.random_uniform([x.shape.as_list()[0]], dtype=tf.float32)
t = tf.nn.sigmoid(x) - rand
return 0.5*(1 + t / (tf.abs(t) + 1e-8))
def g_2(x):
return tf.nn.sigmoid(x)
def g(x):
return tf.nn.leaky_relu(x)
def merge(inputs, weights):
assert len(inputs.shape)==1
assert len(weights.shape)==2
inputs = tf.reshape(inputs, [inputs.shape.as_list()[0], 1])
return tf.reshape(tf.matmul(weights, inputs), [weights.shape.as_list()[0]])
def rand_init(sizes):
assert len(sizes)<=2
if len(sizes)==0:
return np.float32(np.random.rand())
elif len(sizes)==1:
return np.float32(np.random.rand(sizes[0]))
elif len(sizes)==2:
return np.float32(np.random.rand(sizes[0], sizes[1]))
else:
assert False
class RealNN(object):
def __init__(self, feats):
# generate weight variables
self.weights = []
self.biases = []
self.in_dim = feats[0]
self.inputs = tf.placeholder(shape=[self.in_dim], dtype=tf.float32)
self.layers = [self.inputs]
self.before_act = []
self.alpha = 0.0
self.reg = None
self.opt = None
self.loss = None
self.minimizer = None
self.sess = None
for i in range(1, len(feats)):
w = tf.get_variable(initializer=rand_init([feats[i], feats[i-1]]), name='L%dW' % i)
self.weights.append(w)
b = tf.get_variable(initializer=rand_init([feats[i]]), name='L%dB' % i)
self.biases.append(b)
if i==len(feats)-1:
self.layers.append(merge(self.layers[-1], w)+b)
else:
self.before_act.append(merge(self.layers[-1], w)+b)
self.layers.append(g(self.before_act[-1]))
self.out_dim = feats[-1]
self.outputs = self.layers[-1]
self.truth = tf.placeholder(shape=[self.out_dim], dtype=tf.float32)
def train(self, x, y, max_iter):
self.opt = tf.train.GradientDescentOptimizer(learning_rate=1e-2)
self.loss = tf.reduce_mean(tf.abs(self.truth - self.outputs))
self.reg = 0.00
for i in range(len(self.before_act)):
self.reg = self.reg + tf.reduce_mean(tf.maximum(tf.abs(self.before_act[i])-3.0, 0))
self.reg = self.reg / len(self.before_act)
self.minimizer = self.opt.minimize((1-self.alpha)*self.loss + self.alpha*self.reg)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
_cnt = 0
while _cnt < max_iter:
ind = np.random.randint(0, len(x), [])
_, _loss, _reg, _output = self.sess.run(
[self.minimizer, self.loss, self.reg, self.layers[-1]],
feed_dict={
self.inputs: x[ind],
self.truth: y[ind]
})
print('ITR# %d\t LOSS=%.6f REG=%.6f' % (_cnt, _loss, _reg))
#print(_output)
_cnt += 1
saver.save(self.sess, 'models/model.ckpt')
print('model saved to path: models/....')
def infer(self, x):
return None
def int2bins(x):
x = np.uint8(x)
op = 0b10000000
bins = np.array([0.0] * 8)
for i in range(8):
if op & x == op:
bins[i]=1
else:
bins[i]=0
op = op >> 1
return bins
def concat(bins_1, bins_2):
return np.concatenate((bins_1, bins_2), axis=0)
def observe(size):
x = np.random.randint(0,256,[size,2])
_x = np.zeros([size, 16], dtype=np.float32)
_y = np.zeros([size, 2], dtype=np.float32)
for i in range(size):
_x[i] = concat(int2bins(x[i,0]), int2bins(x[i,1]))
if x[i,0] > x[i, 1]:
_y[i, 0] = 0
_y[i, 1] = 1
elif x[i, 0] <= x[i, 1]:
_y[i, 0] = 1
_y[i, 1] = 0
else:
_y[i, 0] = 1
_y[i, 1] = 1
return _x, _y
def check_acc(y, y_i):
_score = 0.0
for i in range(y.shape.as_list()[0]):
if y[i,0]==y_i[i,0] and y[i,1]==y_i[i,1]:
_score += 1
return _score / y.shape.as_list()[0]
if __name__ == '__main__':
nn = RealNN([16, 32, 16, 8, 2])
x, y = observe(10)
print(x)
print(y)
nn.train(x, y, 100000)
#x, y = observe(10)
#y_i = nn.infer(x)
#check_acc(y_i, y)
|
[
"numpy.uint8",
"tensorflow.abs",
"tensorflow.global_variables_initializer",
"numpy.zeros",
"tensorflow.Session",
"tensorflow.matmul",
"tensorflow.placeholder",
"numpy.random.randint",
"numpy.array",
"tensorflow.global_variables",
"numpy.random.rand",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.sigmoid",
"numpy.concatenate"
] |
[((284, 300), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (297, 300), True, 'import tensorflow as tf\n'), ((324, 343), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['x'], {}), '(x)\n', (340, 343), True, 'import tensorflow as tf\n'), ((3318, 3329), 'numpy.uint8', 'np.uint8', (['x'], {}), '(x)\n', (3326, 3329), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.array', 'np.array', (['([0.0] * 8)'], {}), '([0.0] * 8)\n', (3369, 3380), True, 'import numpy as np\n'), ((3565, 3605), 'numpy.concatenate', 'np.concatenate', (['(bins_1, bins_2)'], {'axis': '(0)'}), '((bins_1, bins_2), axis=0)\n', (3579, 3605), True, 'import numpy as np\n'), ((3635, 3671), 'numpy.random.randint', 'np.random.randint', (['(0)', '(256)', '[size, 2]'], {}), '(0, 256, [size, 2])\n', (3652, 3671), True, 'import numpy as np\n'), ((3678, 3716), 'numpy.zeros', 'np.zeros', (['[size, 16]'], {'dtype': 'np.float32'}), '([size, 16], dtype=np.float32)\n', (3686, 3716), True, 'import numpy as np\n'), ((3726, 3763), 'numpy.zeros', 'np.zeros', (['[size, 2]'], {'dtype': 'np.float32'}), '([size, 2], dtype=np.float32)\n', (3734, 3763), True, 'import numpy as np\n'), ((191, 207), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['x'], {}), '(x)\n', (204, 207), True, 'import tensorflow as tf\n'), ((525, 551), 'tensorflow.matmul', 'tf.matmul', (['weights', 'inputs'], {}), '(weights, inputs)\n', (534, 551), True, 'import tensorflow as tf\n'), ((1086, 1139), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.in_dim]', 'dtype': 'tf.float32'}), '(shape=[self.in_dim], dtype=tf.float32)\n', (1100, 1139), True, 'import tensorflow as tf\n'), ((1980, 2034), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.out_dim]', 'dtype': 'tf.float32'}), '(shape=[self.out_dim], dtype=tf.float32)\n', (1994, 2034), True, 'import tensorflow as tf\n'), ((2096, 2149), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(0.01)'}), '(learning_rate=0.01)\n', (2129, 2149), True, 'import tensorflow as tf\n'), ((2548, 2560), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2558, 2560), True, 'import tensorflow as tf\n'), ((680, 696), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (694, 696), True, 'import numpy as np\n'), ((2185, 2218), 'tensorflow.abs', 'tf.abs', (['(self.truth - self.outputs)'], {}), '(self.truth - self.outputs)\n', (2191, 2218), True, 'import tensorflow as tf\n'), ((2583, 2616), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2614, 2616), True, 'import tensorflow as tf\n'), ((2649, 2670), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2668, 2670), True, 'import tensorflow as tf\n'), ((748, 772), 'numpy.random.rand', 'np.random.rand', (['sizes[0]'], {}), '(sizes[0])\n', (762, 772), True, 'import numpy as np\n'), ((240, 249), 'tensorflow.abs', 'tf.abs', (['t'], {}), '(t)\n', (246, 249), True, 'import tensorflow as tf\n'), ((824, 858), 'numpy.random.rand', 'np.random.rand', (['sizes[0]', 'sizes[1]'], {}), '(sizes[0], sizes[1])\n', (838, 858), True, 'import numpy as np\n'), ((2350, 2376), 'tensorflow.abs', 'tf.abs', (['self.before_act[i]'], {}), '(self.before_act[i])\n', (2356, 2376), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import find_packages, setup
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
pkg_name = "tsgettoolbox"
version = open("VERSION").readline().strip()
if sys.argv[-1] == "publish":
os.system("cleanpy .")
os.system("python setup.py sdist")
os.system("twine upload dist/{pkg_name}-{version}.tar.gz".format(**locals()))
sys.exit()
README = open("README.rst").read()
install_requires = [
# List your project dependencies here.
# For more details, see:
# http://packages.python.org/distribute/setuptools.html#declaring-dependencies
"appdirs",
"beautifulsoup4",
"future",
"geojson",
"isodate",
"lxml",
"mechanize",
"requests",
"tables",
"tstoolbox >= 103",
"zeep",
"xarray",
"suds-jurko",
"netCDF4",
]
extras_require = {
"dev": [
"black",
"cleanpy",
"twine",
"pytest",
"coverage",
"flake8",
"pytest-cov",
"pytest-mpl",
"pre-commit",
"black-nbconvert",
"blacken-docs",
"velin",
"isort",
"pyroma",
"pyupgrade",
"commitizen",
]
}
setup(
name=pkg_name,
version=version,
description="Will get time series from different sources on the internet.",
long_description=README,
classifiers=[
# Get strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Environment :: Console",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="time_series uri url web_services rest",
author="<NAME>, PE",
author_email="<EMAIL>",
url="http://timcera.bitbucket.io/{pkg_name}/docs/index.html".format(**locals()),
license="BSD",
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
extras_require=extras_require,
entry_points={
"console_scripts": ["{pkg_name}={pkg_name}.{pkg_name}:main".format(**locals())]
},
test_suite="tests",
python_requires=">=3.7.1",
)
|
[
"setuptools.find_packages",
"os.system",
"sys.exit"
] |
[((410, 432), 'os.system', 'os.system', (['"""cleanpy ."""'], {}), "('cleanpy .')\n", (419, 432), False, 'import os\n'), ((437, 471), 'os.system', 'os.system', (['"""python setup.py sdist"""'], {}), "('python setup.py sdist')\n", (446, 471), False, 'import os\n'), ((558, 568), 'sys.exit', 'sys.exit', ([], {}), '()\n', (566, 568), False, 'import sys\n'), ((2380, 2400), 'setuptools.find_packages', 'find_packages', (['"""src"""'], {}), "('src')\n", (2393, 2400), False, 'from setuptools import find_packages, setup\n')]
|
import os
from django.apps import AppConfig
from ctf.settings import *
class SystemConfig(AppConfig):
name = 'system'
verbose_name = open(os.path.join(BASE_DIR, 'ctf', 'eventname'), 'r').read().strip()
|
[
"os.path.join"
] |
[((146, 188), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""ctf"""', '"""eventname"""'], {}), "(BASE_DIR, 'ctf', 'eventname')\n", (158, 188), False, 'import os\n')]
|