text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from . import gxapi_cy
from geosoft.gxapi import GXContext, float_ref, int_ref, str_ref
### endblock ClassImports
### block Header
# NOTICE: The code generator will not replace the code in this block
### endblock Header
### block ClassImplementation
# NOTICE: Do not edit anything here, it is generated code
class GXSHD(gxapi_cy.WrapSHD):
"""
GXSHD class.
This class supports fast interactive shadowing in a map or grid document.
The SHD object is created using the
StartShading_EMAP method.
"""
def __init__(self, handle=0):
super(GXSHD, self).__init__(GXContext._get_tls_geo(), handle)
@classmethod
def null(cls):
"""
A null (undefined) instance of `GXSHD <geosoft.gxapi.GXSHD>`
:returns: A null `GXSHD <geosoft.gxapi.GXSHD>`
:rtype: GXSHD
"""
return GXSHD()
def is_null(self):
"""
Check if this is a null (undefined) instance
:returns: True if this is a null (undefined) instance, False otherwise.
:rtype: bool
"""
return self._internal_handle() == 0
# Miscellaneous
def refresh(self, grid_path, inclination, declination, scale, contrast, brightness, wet_look):
"""
Refresh the SHD with new shading parameters.
:param grid_path: Grid path returned
:param inclination: inclination (degrees)
:param declination: declination (degrees)
:param scale: vertical scale relative to base scale
:param contrast: contrast 0-1 (recommended >0.1, can change with wet_look changes)
:param brightness: brightness 0-1 (can change with wet_look changes)
:param wet_look: Apply wet-look effect (shading layer uses lighter distribution)?
:type grid_path: str
:type inclination: float
:type declination: float
:type scale: float
:type contrast: float_ref
:type brightness: float_ref
:type wet_look: bool
.. versionadded:: 2021.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
contrast.value, brightness.value = self._refresh(grid_path.encode(), inclination, declination, scale, contrast.value, brightness.value, wet_look)
def track_interactive(self, constraint, inclination, declination):
"""
Track a line on map and get shading parameters based on its length and direction.
:param constraint: :ref:`SHD_FIX`
:param inclination: returned inclination
:param declination: returned declination
:type constraint: int
:type inclination: float_ref
:type declination: float_ref
:returns: 0 if tracking completed successfully.
1 if user cancelled or tracking failed.
:rtype: int
.. versionadded:: 2021.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val, inclination.value, declination.value = self._track_interactive(constraint, inclination.value, declination.value)
return ret_val
def end_shading(self, apply_changes):
"""
This ends interactive shading and must be called if any interactive changes should be applied. Passing false to apply changes is equivalent to simply disposing handle.
:param apply_changes: Apply changes to map.
:type apply_changes: bool
.. versionadded:: 2021.2
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
.. seealso::
StartShading_EMAP
"""
self._end_shading(apply_changes)
### endblock ClassImplementation
### block ClassExtend
# NOTICE: The code generator will not replace the code in this block
### endblock ClassExtend
### block Footer
# NOTICE: The code generator will not replace the code in this block
### endblock Footer
|
{
"content_hash": "fe5fb9730b32c08a2aa89deb0b0e0f4f",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 175,
"avg_line_length": 31.858156028368793,
"alnum_prop": 0.6351291184327693,
"repo_name": "GeosoftInc/gxpy",
"id": "3828435b80337b9b3753f6c4bc90065a6b07a077",
"size": "4602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxapi/GXSHD.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
}
|
import pytest
from sqldoc import sqldoc
from sqldoc.parser.parser import Parser
from sqldoc.renderer.renderer import Renderer
@pytest.fixture
def mock_load_plugins(mocker):
mocker.patch('sqldoc.sqldoc.load_plugins')
sqldoc.load_plugins.return_value = [
Plugin('parser_plugin', ParserPlugin),
Plugin('renderer_plugin', RendererPlugin)
]
@pytest.fixture
def mock_wrong_plugin_type(mocker):
mocker.patch('sqldoc.sqldoc.load_plugins')
sqldoc.load_plugins.return_value = [
Plugin('parser_plugin', InvalidPlugin)
]
class Plugin:
def __init__(self, name, cls):
self.name = name
self.cls = cls
def load(self):
return self.get_component
def get_component(self):
return self.cls
class InvalidPlugin:
def __init__(self, configuration):
pass
class ParserPlugin(Parser):
def __init__(self, configuration):
super().__init__(configuration)
def validate_configuration(self):
return True
def build_database_metadata(self, database_name):
return None
class RendererPlugin(Renderer):
def __init__(self, configuration):
super().__init__(configuration)
def validate_configuration(self):
return True
def render(self, databases, output_file):
pass
def test_plugin_added_to_map(mock_load_plugins):
parser_map = sqldoc.build_parser_map()
renderer_map = sqldoc.build_renderer_map()
default_parsers = len(parser_map)
default_renderers = len(renderer_map)
plugins = sqldoc.load_plugins()
sqldoc.register_plugins(parser_map, renderer_map, plugins)
assert len(parser_map) == default_parsers + 1
assert 'parser_plugin' in parser_map
assert len(renderer_map) == default_renderers + 1
assert 'renderer_plugin' in renderer_map
def test_plugin_conflict(mock_load_plugins):
parser_map = sqldoc.build_parser_map()
renderer_map = sqldoc.build_renderer_map()
plugins = sqldoc.load_plugins()
sqldoc.register_plugins(parser_map, renderer_map, plugins)
# registering same plugins twice should cause conflict
with pytest.raises(sqldoc.PluginConflictException):
sqldoc.register_plugins(parser_map, renderer_map, plugins)
def test_unknown_plugin_type(mock_wrong_plugin_type):
parser_map = sqldoc.build_parser_map()
renderer_map = sqldoc.build_renderer_map()
plugins = sqldoc.load_plugins()
with pytest.raises(sqldoc.InvalidPluginException):
sqldoc.register_plugins(parser_map, renderer_map, plugins)
|
{
"content_hash": "32eae8805e46086d08fe0a163e7d6c56",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 66,
"avg_line_length": 26.625,
"alnum_prop": 0.6889671361502347,
"repo_name": "korhner/sqldoc",
"id": "b0768b86aced4891e0840681544a163343b1a986",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sqldoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2270"
},
{
"name": "Python",
"bytes": "16595"
}
],
"symlink_target": ""
}
|
"""
User interface Controls for the layout.
"""
import time
from abc import ABCMeta, abstractmethod
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Hashable,
Iterable,
List,
NamedTuple,
Optional,
Union,
)
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.data_structures import Point
from prompt_toolkit.document import Document
from prompt_toolkit.filters import FilterOrBool, to_filter
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from prompt_toolkit.formatted_text.utils import (
fragment_list_to_text,
fragment_list_width,
split_lines,
)
from prompt_toolkit.lexers import Lexer, SimpleLexer
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from prompt_toolkit.search import SearchState
from prompt_toolkit.selection import SelectionType
from prompt_toolkit.utils import get_cwidth
from .processors import (
DisplayMultipleCursors,
HighlightIncrementalSearchProcessor,
HighlightSearchProcessor,
HighlightSelectionProcessor,
Processor,
TransformationInput,
merge_processors,
)
if TYPE_CHECKING:
from prompt_toolkit.key_binding.key_bindings import KeyBindingsBase
from prompt_toolkit.utils import Event
# The only two return values for a mouse hander are `None` and
# `NotImplemented`. For the type checker it's best to annotate this as
# `object`. (The consumer never expects a more specific instance: checking
# for NotImplemented can be done using `is NotImplemented`.)
NotImplementedOrNone = object
# Other non-working options are:
# * Optional[Literal[NotImplemented]]
# --> Doesn't work, Literal can't take an Any.
# * None
# --> Doesn't work. We can't assign the result of a function that
# returns `None` to a variable.
# * Any
# --> Works, but too broad.
__all__ = [
"BufferControl",
"SearchBufferControl",
"DummyControl",
"FormattedTextControl",
"UIControl",
"UIContent",
]
GetLinePrefixCallable = Callable[[int, int], AnyFormattedText]
class UIControl(metaclass=ABCMeta):
"""
Base class for all user interface controls.
"""
def reset(self) -> None:
# Default reset. (Doesn't have to be implemented.)
pass
def preferred_width(self, max_available_width: int) -> Optional[int]:
return None
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
return None
def is_focusable(self) -> bool:
"""
Tell whether this user control is focusable.
"""
return False
@abstractmethod
def create_content(self, width: int, height: int) -> "UIContent":
"""
Generate the content for this user control.
Returns a :class:`.UIContent` instance.
"""
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
Handle mouse events.
When `NotImplemented` is returned, it means that the given event is not
handled by the `UIControl` itself. The `Window` or key bindings can
decide to handle this event as scrolling or changing focus.
:param mouse_event: `MouseEvent` instance.
"""
return NotImplemented
def move_cursor_down(self) -> None:
"""
Request to move the cursor down.
This happens when scrolling down and the cursor is completely at the
top.
"""
def move_cursor_up(self) -> None:
"""
Request to move the cursor up.
"""
def get_key_bindings(self) -> Optional["KeyBindingsBase"]:
"""
The key bindings that are specific for this user control.
Return a :class:`.KeyBindings` object if some key bindings are
specified, or `None` otherwise.
"""
def get_invalidate_events(self) -> Iterable["Event[object]"]:
"""
Return a list of `Event` objects. This can be a generator.
(The application collects all these events, in order to bind redraw
handlers to these events.)
"""
return []
class UIContent:
"""
Content generated by a user control. This content consists of a list of
lines.
:param get_line: Callable that takes a line number and returns the current
line. This is a list of (style_str, text) tuples.
:param line_count: The number of lines.
:param cursor_position: a :class:`.Point` for the cursor position.
:param menu_position: a :class:`.Point` for the menu position.
:param show_cursor: Make the cursor visible.
"""
def __init__(
self,
get_line: Callable[[int], StyleAndTextTuples] = (lambda i: []),
line_count: int = 0,
cursor_position: Optional[Point] = None,
menu_position: Optional[Point] = None,
show_cursor: bool = True,
):
self.get_line = get_line
self.line_count = line_count
self.cursor_position = cursor_position or Point(x=0, y=0)
self.menu_position = menu_position
self.show_cursor = show_cursor
# Cache for line heights. Maps cache key -> height
self._line_heights_cache: Dict[Hashable, int] = {}
def __getitem__(self, lineno: int) -> StyleAndTextTuples:
" Make it iterable (iterate line by line). "
if lineno < self.line_count:
return self.get_line(lineno)
else:
raise IndexError
def get_height_for_line(
self,
lineno: int,
width: int,
get_line_prefix: Optional[GetLinePrefixCallable],
slice_stop: Optional[int] = None,
) -> int:
"""
Return the height that a given line would need if it is rendered in a
space with the given width (using line wrapping).
:param get_line_prefix: None or a `Window.get_line_prefix` callable
that returns the prefix to be inserted before this line.
:param slice_stop: Wrap only "line[:slice_stop]" and return that
partial result. This is needed for scrolling the window correctly
when line wrapping.
:returns: The computed height.
"""
# Instead of using `get_line_prefix` as key, we use render_counter
# instead. This is more reliable, because this function could still be
# the same, while the content would change over time.
key = get_app().render_counter, lineno, width, slice_stop
try:
return self._line_heights_cache[key]
except KeyError:
if width == 0:
height = 10 ** 8
else:
# Calculate line width first.
line = fragment_list_to_text(self.get_line(lineno))[:slice_stop]
text_width = get_cwidth(line)
if get_line_prefix:
# Add prefix width.
text_width += fragment_list_width(
to_formatted_text(get_line_prefix(lineno, 0))
)
# Slower path: compute path when there's a line prefix.
height = 1
# Keep wrapping as long as the line doesn't fit.
# Keep adding new prefixes for every wrapped line.
while text_width > width:
height += 1
text_width -= width
fragments2 = to_formatted_text(
get_line_prefix(lineno, height - 1)
)
prefix_width = get_cwidth(fragment_list_to_text(fragments2))
if prefix_width >= width: # Prefix doesn't fit.
height = 10 ** 8
break
text_width += prefix_width
else:
# Fast path: compute height when there's no line prefix.
try:
quotient, remainder = divmod(text_width, width)
except ZeroDivisionError:
height = 10 ** 8
else:
if remainder:
quotient += 1 # Like math.ceil.
height = max(1, quotient)
# Cache and return
self._line_heights_cache[key] = height
return height
class FormattedTextControl(UIControl):
"""
Control that displays formatted text. This can be either plain text, an
:class:`~prompt_toolkit.formatted_text.HTML` object an
:class:`~prompt_toolkit.formatted_text.ANSI` object, a list of ``(style_str,
text)`` tuples or a callable that takes no argument and returns one of
those, depending on how you prefer to do the formatting. See
``prompt_toolkit.layout.formatted_text`` for more information.
(It's mostly optimized for rather small widgets, like toolbars, menus, etc...)
When this UI control has the focus, the cursor will be shown in the upper
left corner of this control by default. There are two ways for specifying
the cursor position:
- Pass a `get_cursor_position` function which returns a `Point` instance
with the current cursor position.
- If the (formatted) text is passed as a list of ``(style, text)`` tuples
and there is one that looks like ``('[SetCursorPosition]', '')``, then
this will specify the cursor position.
Mouse support:
The list of fragments can also contain tuples of three items, looking like:
(style_str, text, handler). When mouse support is enabled and the user
clicks on this fragment, then the given handler is called. That handler
should accept two inputs: (Application, MouseEvent) and it should
either handle the event or return `NotImplemented` in case we want the
containing Window to handle this event.
:param focusable: `bool` or :class:`.Filter`: Tell whether this control is
focusable.
:param text: Text or formatted text to be displayed.
:param style: Style string applied to the content. (If you want to style
the whole :class:`~prompt_toolkit.layout.Window`, pass the style to the
:class:`~prompt_toolkit.layout.Window` instead.)
:param key_bindings: a :class:`.KeyBindings` object.
:param get_cursor_position: A callable that returns the cursor position as
a `Point` instance.
"""
def __init__(
self,
text: AnyFormattedText = "",
style: str = "",
focusable: FilterOrBool = False,
key_bindings: Optional["KeyBindingsBase"] = None,
show_cursor: bool = True,
modal: bool = False,
get_cursor_position: Optional[Callable[[], Optional[Point]]] = None,
) -> None:
self.text = text # No type check on 'text'. This is done dynamically.
self.style = style
self.focusable = to_filter(focusable)
# Key bindings.
self.key_bindings = key_bindings
self.show_cursor = show_cursor
self.modal = modal
self.get_cursor_position = get_cursor_position
#: Cache for the content.
self._content_cache: SimpleCache[Hashable, UIContent] = SimpleCache(maxsize=18)
self._fragment_cache: SimpleCache[int, StyleAndTextTuples] = SimpleCache(
maxsize=1
)
# Only cache one fragment list. We don't need the previous item.
# Render info for the mouse support.
self._fragments: Optional[StyleAndTextTuples] = None
def reset(self) -> None:
self._fragments = None
def is_focusable(self) -> bool:
return self.focusable()
def __repr__(self) -> str:
return "%s(%r)" % (self.__class__.__name__, self.text)
def _get_formatted_text_cached(self) -> StyleAndTextTuples:
"""
Get fragments, but only retrieve fragments once during one render run.
(This function is called several times during one rendering, because
we also need those for calculating the dimensions.)
"""
return self._fragment_cache.get(
get_app().render_counter, lambda: to_formatted_text(self.text, self.style)
)
def preferred_width(self, max_available_width: int) -> int:
"""
Return the preferred width for this control.
That is the width of the longest line.
"""
text = fragment_list_to_text(self._get_formatted_text_cached())
line_lengths = [get_cwidth(l) for l in text.split("\n")]
return max(line_lengths)
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
content = self.create_content(width, None)
return content.line_count
def create_content(self, width: int, height: Optional[int]) -> UIContent:
# Get fragments
fragments_with_mouse_handlers = self._get_formatted_text_cached()
fragment_lines_with_mouse_handlers = list(
split_lines(fragments_with_mouse_handlers)
)
# Strip mouse handlers from fragments.
fragment_lines: List[StyleAndTextTuples] = [
[(item[0], item[1]) for item in line]
for line in fragment_lines_with_mouse_handlers
]
# Keep track of the fragments with mouse handler, for later use in
# `mouse_handler`.
self._fragments = fragments_with_mouse_handlers
# If there is a `[SetCursorPosition]` in the fragment list, set the
# cursor position here.
def get_cursor_position(
fragment: str = "[SetCursorPosition]",
) -> Optional[Point]:
for y, line in enumerate(fragment_lines):
x = 0
for style_str, text, *_ in line:
if fragment in style_str:
return Point(x=x, y=y)
x += len(text)
return None
# If there is a `[SetMenuPosition]`, set the menu over here.
def get_menu_position() -> Optional[Point]:
return get_cursor_position("[SetMenuPosition]")
cursor_position = (self.get_cursor_position or get_cursor_position)()
# Create content, or take it from the cache.
key = (tuple(fragments_with_mouse_handlers), width, cursor_position)
def get_content() -> UIContent:
return UIContent(
get_line=lambda i: fragment_lines[i],
line_count=len(fragment_lines),
show_cursor=self.show_cursor,
cursor_position=cursor_position,
menu_position=get_menu_position(),
)
return self._content_cache.get(key, get_content)
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
Handle mouse events.
(When the fragment list contained mouse handlers and the user clicked on
on any of these, the matching handler is called. This handler can still
return `NotImplemented` in case we want the
:class:`~prompt_toolkit.layout.Window` to handle this particular
event.)
"""
if self._fragments:
# Read the generator.
fragments_for_line = list(split_lines(self._fragments))
try:
fragments = fragments_for_line[mouse_event.position.y]
except IndexError:
return NotImplemented
else:
# Find position in the fragment list.
xpos = mouse_event.position.x
# Find mouse handler for this character.
count = 0
for item in fragments:
count += len(item[1])
if count >= xpos:
if len(item) >= 3:
# Handler found. Call it.
# (Handler can return NotImplemented, so return
# that result.)
handler = item[2] # type: ignore
return handler(mouse_event)
else:
break
# Otherwise, don't handle here.
return NotImplemented
def is_modal(self) -> bool:
return self.modal
def get_key_bindings(self) -> Optional["KeyBindingsBase"]:
return self.key_bindings
class DummyControl(UIControl):
"""
A dummy control object that doesn't paint any content.
Useful for filling a :class:`~prompt_toolkit.layout.Window`. (The
`fragment` and `char` attributes of the `Window` class can be used to
define the filling.)
"""
def create_content(self, width: int, height: int) -> UIContent:
def get_line(i: int) -> StyleAndTextTuples:
return []
return UIContent(
get_line=get_line, line_count=100 ** 100
) # Something very big.
def is_focusable(self) -> bool:
return False
_ProcessedLine = NamedTuple(
"_ProcessedLine",
[
("fragments", StyleAndTextTuples),
("source_to_display", Callable[[int], int]),
("display_to_source", Callable[[int], int]),
],
)
class BufferControl(UIControl):
"""
Control for visualising the content of a :class:`.Buffer`.
:param buffer: The :class:`.Buffer` object to be displayed.
:param input_processors: A list of
:class:`~prompt_toolkit.layout.processors.Processor` objects.
:param include_default_input_processors: When True, include the default
processors for highlighting of selection, search and displaying of
multiple cursors.
:param lexer: :class:`.Lexer` instance for syntax highlighting.
:param preview_search: `bool` or :class:`.Filter`: Show search while
typing. When this is `True`, probably you want to add a
``HighlightIncrementalSearchProcessor`` as well. Otherwise only the
cursor position will move, but the text won't be highlighted.
:param focusable: `bool` or :class:`.Filter`: Tell whether this control is focusable.
:param focus_on_click: Focus this buffer when it's click, but not yet focused.
:param key_bindings: a :class:`.KeyBindings` object.
"""
def __init__(
self,
buffer: Optional[Buffer] = None,
input_processors: Optional[List[Processor]] = None,
include_default_input_processors: bool = True,
lexer: Optional[Lexer] = None,
preview_search: FilterOrBool = False,
focusable: FilterOrBool = True,
search_buffer_control: Union[
None, "SearchBufferControl", Callable[[], "SearchBufferControl"]
] = None,
menu_position: Optional[Callable] = None,
focus_on_click: FilterOrBool = False,
key_bindings: Optional["KeyBindingsBase"] = None,
):
self.input_processors = input_processors
self.include_default_input_processors = include_default_input_processors
self.default_input_processors = [
HighlightSearchProcessor(),
HighlightIncrementalSearchProcessor(),
HighlightSelectionProcessor(),
DisplayMultipleCursors(),
]
self.preview_search = to_filter(preview_search)
self.focusable = to_filter(focusable)
self.focus_on_click = to_filter(focus_on_click)
self.buffer = buffer or Buffer()
self.menu_position = menu_position
self.lexer = lexer or SimpleLexer()
self.key_bindings = key_bindings
self._search_buffer_control = search_buffer_control
#: Cache for the lexer.
#: Often, due to cursor movement, undo/redo and window resizing
#: operations, it happens that a short time, the same document has to be
#: lexed. This is a fairly easy way to cache such an expensive operation.
self._fragment_cache: SimpleCache[
Hashable, Callable[[int], StyleAndTextTuples]
] = SimpleCache(maxsize=8)
self._last_click_timestamp: Optional[float] = None
self._last_get_processed_line: Optional[Callable[[int], _ProcessedLine]] = None
def __repr__(self) -> str:
return "<%s buffer=%r at %r>" % (self.__class__.__name__, self.buffer, id(self))
@property
def search_buffer_control(self) -> Optional["SearchBufferControl"]:
result: Optional[SearchBufferControl]
if callable(self._search_buffer_control):
result = self._search_buffer_control()
else:
result = self._search_buffer_control
assert result is None or isinstance(result, SearchBufferControl)
return result
@property
def search_buffer(self) -> Optional[Buffer]:
control = self.search_buffer_control
if control is not None:
return control.buffer
return None
@property
def search_state(self) -> SearchState:
"""
Return the `SearchState` for searching this `BufferControl`. This is
always associated with the search control. If one search bar is used
for searching multiple `BufferControls`, then they share the same
`SearchState`.
"""
search_buffer_control = self.search_buffer_control
if search_buffer_control:
return search_buffer_control.searcher_search_state
else:
return SearchState()
def is_focusable(self) -> bool:
return self.focusable()
def preferred_width(self, max_available_width: int) -> Optional[int]:
"""
This should return the preferred width.
Note: We don't specify a preferred width according to the content,
because it would be too expensive. Calculating the preferred
width can be done by calculating the longest line, but this would
require applying all the processors to each line. This is
unfeasible for a larger document, and doing it for small
documents only would result in inconsistent behaviour.
"""
return None
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
# Calculate the content height, if it was drawn on a screen with the
# given width.
height = 0
content = self.create_content(width, height=1) # Pass a dummy '1' as height.
# When line wrapping is off, the height should be equal to the amount
# of lines.
if not wrap_lines:
return content.line_count
# When the number of lines exceeds the max_available_height, just
# return max_available_height. No need to calculate anything.
if content.line_count >= max_available_height:
return max_available_height
for i in range(content.line_count):
height += content.get_height_for_line(i, width, get_line_prefix)
if height >= max_available_height:
return max_available_height
return height
def _get_formatted_text_for_line_func(
self, document: Document
) -> Callable[[int], StyleAndTextTuples]:
"""
Create a function that returns the fragments for a given line.
"""
# Cache using `document.text`.
def get_formatted_text_for_line() -> Callable[[int], StyleAndTextTuples]:
return self.lexer.lex_document(document)
key = (document.text, self.lexer.invalidation_hash())
return self._fragment_cache.get(key, get_formatted_text_for_line)
def _create_get_processed_line_func(
self, document: Document, width: int, height: int
) -> Callable[[int], _ProcessedLine]:
"""
Create a function that takes a line number of the current document and
returns a _ProcessedLine(processed_fragments, source_to_display, display_to_source)
tuple.
"""
# Merge all input processors together.
input_processors = self.input_processors or []
if self.include_default_input_processors:
input_processors = self.default_input_processors + input_processors
merged_processor = merge_processors(input_processors)
def transform(lineno: int, fragments: StyleAndTextTuples) -> _ProcessedLine:
" Transform the fragments for a given line number. "
# Get cursor position at this line.
def source_to_display(i: int) -> int:
""" X position from the buffer to the x position in the
processed fragment list. By default, we start from the 'identity'
operation. """
return i
transformation = merged_processor.apply_transformation(
TransformationInput(
self, document, lineno, source_to_display, fragments, width, height
)
)
return _ProcessedLine(
transformation.fragments,
transformation.source_to_display,
transformation.display_to_source,
)
def create_func() -> Callable[[int], _ProcessedLine]:
get_line = self._get_formatted_text_for_line_func(document)
cache: Dict[int, _ProcessedLine] = {}
def get_processed_line(i: int) -> _ProcessedLine:
try:
return cache[i]
except KeyError:
processed_line = transform(i, get_line(i))
cache[i] = processed_line
return processed_line
return get_processed_line
return create_func()
def create_content(
self, width: int, height: int, preview_search: bool = False
) -> UIContent:
"""
Create a UIContent.
"""
buffer = self.buffer
# Get the document to be shown. If we are currently searching (the
# search buffer has focus, and the preview_search filter is enabled),
# then use the search document, which has possibly a different
# text/cursor position.)
search_control = self.search_buffer_control
preview_now = preview_search or bool(
# Only if this feature is enabled.
self.preview_search()
and
# And something was typed in the associated search field.
search_control
and search_control.buffer.text
and
# And we are searching in this control. (Many controls can point to
# the same search field, like in Pyvim.)
get_app().layout.search_target_buffer_control == self
)
if preview_now and search_control is not None:
ss = self.search_state
document = buffer.document_for_search(
SearchState(
text=search_control.buffer.text,
direction=ss.direction,
ignore_case=ss.ignore_case,
)
)
else:
document = buffer.document
get_processed_line = self._create_get_processed_line_func(
document, width, height
)
self._last_get_processed_line = get_processed_line
def translate_rowcol(row: int, col: int) -> Point:
" Return the content column for this coordinate. "
return Point(x=get_processed_line(row).source_to_display(col), y=row)
def get_line(i: int) -> StyleAndTextTuples:
" Return the fragments for a given line number. "
fragments = get_processed_line(i).fragments
# Add a space at the end, because that is a possible cursor
# position. (When inserting after the input.) We should do this on
# all the lines, not just the line containing the cursor. (Because
# otherwise, line wrapping/scrolling could change when moving the
# cursor around.)
fragments = fragments + [("", " ")]
return fragments
content = UIContent(
get_line=get_line,
line_count=document.line_count,
cursor_position=translate_rowcol(
document.cursor_position_row, document.cursor_position_col
),
)
# If there is an auto completion going on, use that start point for a
# pop-up menu position. (But only when this buffer has the focus --
# there is only one place for a menu, determined by the focused buffer.)
if get_app().layout.current_control == self:
menu_position = self.menu_position() if self.menu_position else None
if menu_position is not None:
assert isinstance(menu_position, int)
menu_row, menu_col = buffer.document.translate_index_to_position(
menu_position
)
content.menu_position = translate_rowcol(menu_row, menu_col)
elif buffer.complete_state:
# Position for completion menu.
# Note: We use 'min', because the original cursor position could be
# behind the input string when the actual completion is for
# some reason shorter than the text we had before. (A completion
# can change and shorten the input.)
menu_row, menu_col = buffer.document.translate_index_to_position(
min(
buffer.cursor_position,
buffer.complete_state.original_document.cursor_position,
)
)
content.menu_position = translate_rowcol(menu_row, menu_col)
else:
content.menu_position = None
return content
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
Mouse handler for this control.
"""
buffer = self.buffer
position = mouse_event.position
# Focus buffer when clicked.
if get_app().layout.current_control == self:
if self._last_get_processed_line:
processed_line = self._last_get_processed_line(position.y)
# Translate coordinates back to the cursor position of the
# original input.
xpos = processed_line.display_to_source(position.x)
index = buffer.document.translate_row_col_to_index(position.y, xpos)
# Set the cursor position.
if mouse_event.event_type == MouseEventType.MOUSE_DOWN:
buffer.exit_selection()
buffer.cursor_position = index
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
# When the cursor was moved to another place, select the text.
# (The >1 is actually a small but acceptable workaround for
# selecting text in Vi navigation mode. In navigation mode,
# the cursor can never be after the text, so the cursor
# will be repositioned automatically.)
if abs(buffer.cursor_position - index) > 1:
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position = index
# Select word around cursor on double click.
# Two MOUSE_UP events in a short timespan are considered a double click.
double_click = (
self._last_click_timestamp
and time.time() - self._last_click_timestamp < 0.3
)
self._last_click_timestamp = time.time()
if double_click:
start, end = buffer.document.find_boundaries_of_current_word()
buffer.cursor_position += start
buffer.start_selection(selection_type=SelectionType.CHARACTERS)
buffer.cursor_position += end - start
else:
# Don't handle scroll events here.
return NotImplemented
# Not focused, but focusing on click events.
else:
if (
self.focus_on_click()
and mouse_event.event_type == MouseEventType.MOUSE_UP
):
# Focus happens on mouseup. (If we did this on mousedown, the
# up event will be received at the point where this widget is
# focused and be handled anyway.)
get_app().layout.current_control = self
else:
return NotImplemented
return None
def move_cursor_down(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_down_position()
def move_cursor_up(self) -> None:
b = self.buffer
b.cursor_position += b.document.get_cursor_up_position()
def get_key_bindings(self) -> Optional["KeyBindingsBase"]:
"""
When additional key bindings are given. Return these.
"""
return self.key_bindings
def get_invalidate_events(self) -> Iterable["Event[object]"]:
"""
Return the Window invalidate events.
"""
# Whenever the buffer changes, the UI has to be updated.
yield self.buffer.on_text_changed
yield self.buffer.on_cursor_position_changed
yield self.buffer.on_completions_changed
yield self.buffer.on_suggestion_set
class SearchBufferControl(BufferControl):
"""
:class:`.BufferControl` which is used for searching another
:class:`.BufferControl`.
:param ignore_case: Search case insensitive.
"""
def __init__(
self,
buffer: Optional[Buffer] = None,
input_processors: Optional[List[Processor]] = None,
lexer: Optional[Lexer] = None,
focus_on_click: FilterOrBool = False,
key_bindings: Optional["KeyBindingsBase"] = None,
ignore_case: FilterOrBool = False,
):
super().__init__(
buffer=buffer,
input_processors=input_processors,
lexer=lexer,
focus_on_click=focus_on_click,
key_bindings=key_bindings,
)
# If this BufferControl is used as a search field for one or more other
# BufferControls, then represents the search state.
self.searcher_search_state = SearchState(ignore_case=ignore_case)
|
{
"content_hash": "722d744b82919d126759cd258aa80ad0",
"timestamp": "",
"source": "github",
"line_count": 939,
"max_line_length": 92,
"avg_line_length": 37.33759318423855,
"alnum_prop": 0.5952082144894466,
"repo_name": "sserrot/champion_relationships",
"id": "75c51753619b0af202dcec2364f324471c5f3a45",
"size": "35060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/prompt_toolkit/layout/controls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
}
|
"""Definition and setup of the Omnilogic Sensors for Home Assistant."""
from typing import Any
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
ELECTRIC_POTENTIAL_MILLIVOLT,
MASS_GRAMS,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLUME_LITERS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .common import OmniLogicEntity, OmniLogicUpdateCoordinator, check_guard
from .const import COORDINATOR, DEFAULT_PH_OFFSET, DOMAIN, PUMP_TYPES
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the sensor platform."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
entities = []
for item_id, item in coordinator.data.items():
id_len = len(item_id)
item_kind = item_id[-2]
entity_settings = SENSOR_TYPES.get((id_len, item_kind))
if not entity_settings:
continue
for entity_setting in entity_settings:
entity_classes: dict[str, type] = entity_setting["entity_classes"]
for state_key, entity_class in entity_classes.items():
if check_guard(state_key, item, entity_setting):
continue
entity = entity_class(
coordinator=coordinator,
state_key=state_key,
name=entity_setting["name"],
kind=entity_setting["kind"],
item_id=item_id,
device_class=entity_setting["device_class"],
icon=entity_setting["icon"],
unit=entity_setting["unit"],
)
entities.append(entity)
async_add_entities(entities)
class OmnilogicSensor(OmniLogicEntity, SensorEntity):
"""Defines an Omnilogic sensor entity."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
kind: str,
name: str,
device_class: str,
icon: str,
unit: str,
item_id: tuple,
state_key: str,
) -> None:
"""Initialize Entities."""
super().__init__(
coordinator=coordinator,
kind=kind,
name=name,
item_id=item_id,
icon=icon,
)
backyard_id = item_id[:2]
unit_type = coordinator.data[backyard_id].get("Unit-of-Measurement")
self._unit_type = unit_type
self._device_class = device_class
self._unit = unit
self._state_key = state_key
@property
def device_class(self):
"""Return the device class of the entity."""
return self._device_class
@property
def native_unit_of_measurement(self):
"""Return the right unit of measure."""
return self._unit
class OmniLogicTemperatureSensor(OmnilogicSensor):
"""Define an OmniLogic Temperature (Air/Water) Sensor."""
@property
def native_value(self):
"""Return the state for the temperature sensor."""
sensor_data = self.coordinator.data[self._item_id][self._state_key]
hayward_state = sensor_data
hayward_unit_of_measure = TEMP_FAHRENHEIT
state = sensor_data
if self._unit_type == "Metric":
hayward_state = round((int(hayward_state) - 32) * 5 / 9, 1)
hayward_unit_of_measure = TEMP_CELSIUS
if int(sensor_data) == -1:
hayward_state = None
state = None
self._attrs["hayward_temperature"] = hayward_state
self._attrs["hayward_unit_of_measure"] = hayward_unit_of_measure
self._unit = TEMP_FAHRENHEIT
return state
class OmniLogicPumpSpeedSensor(OmnilogicSensor):
"""Define an OmniLogic Pump Speed Sensor."""
@property
def native_value(self):
"""Return the state for the pump speed sensor."""
pump_type = PUMP_TYPES[
self.coordinator.data[self._item_id].get(
"Filter-Type", self.coordinator.data[self._item_id].get("Type", {})
)
]
pump_speed = self.coordinator.data[self._item_id][self._state_key]
if pump_type == "VARIABLE":
self._unit = PERCENTAGE
state = pump_speed
elif pump_type == "DUAL":
self._unit = None
if pump_speed == 0:
state = "off"
elif pump_speed == self.coordinator.data[self._item_id].get(
"Min-Pump-Speed"
):
state = "low"
elif pump_speed == self.coordinator.data[self._item_id].get(
"Max-Pump-Speed"
):
state = "high"
self._attrs["pump_type"] = pump_type
return state
class OmniLogicSaltLevelSensor(OmnilogicSensor):
"""Define an OmniLogic Salt Level Sensor."""
@property
def native_value(self):
"""Return the state for the salt level sensor."""
salt_return = self.coordinator.data[self._item_id][self._state_key]
unit_of_measurement = self._unit
if self._unit_type == "Metric":
salt_return = round(int(salt_return) / 1000, 2)
unit_of_measurement = f"{MASS_GRAMS}/{VOLUME_LITERS}"
self._unit = unit_of_measurement
return salt_return
class OmniLogicChlorinatorSensor(OmnilogicSensor):
"""Define an OmniLogic Chlorinator Sensor."""
@property
def native_value(self):
"""Return the state for the chlorinator sensor."""
state = self.coordinator.data[self._item_id][self._state_key]
return state
class OmniLogicPHSensor(OmnilogicSensor):
"""Define an OmniLogic pH Sensor."""
@property
def native_value(self):
"""Return the state for the pH sensor."""
ph_state = self.coordinator.data[self._item_id][self._state_key]
if ph_state == 0:
ph_state = None
else:
ph_state = float(ph_state) + float(
self.coordinator.config_entry.options.get(
"ph_offset", DEFAULT_PH_OFFSET
)
)
return ph_state
class OmniLogicORPSensor(OmnilogicSensor):
"""Define an OmniLogic ORP Sensor."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
state_key: str,
name: str,
kind: str,
item_id: tuple,
device_class: str,
icon: str,
unit: str,
) -> None:
"""Initialize the sensor."""
super().__init__(
coordinator=coordinator,
kind=kind,
name=name,
device_class=device_class,
icon=icon,
unit=unit,
item_id=item_id,
state_key=state_key,
)
@property
def native_value(self):
"""Return the state for the ORP sensor."""
orp_state = int(self.coordinator.data[self._item_id][self._state_key])
if orp_state == -1:
orp_state = None
return orp_state
SENSOR_TYPES: dict[tuple[int, str], list[dict[str, Any]]] = {
(2, "Backyard"): [
{
"entity_classes": {"airTemp": OmniLogicTemperatureSensor},
"name": "Air Temperature",
"kind": "air_temperature",
"device_class": SensorDeviceClass.TEMPERATURE,
"icon": None,
"unit": TEMP_FAHRENHEIT,
"guard_condition": [{}],
},
],
(4, "BOWS"): [
{
"entity_classes": {"waterTemp": OmniLogicTemperatureSensor},
"name": "Water Temperature",
"kind": "water_temperature",
"device_class": SensorDeviceClass.TEMPERATURE,
"icon": None,
"unit": TEMP_FAHRENHEIT,
"guard_condition": [{}],
},
],
(6, "Filter"): [
{
"entity_classes": {"filterSpeed": OmniLogicPumpSpeedSensor},
"name": "Speed",
"kind": "filter_pump_speed",
"device_class": None,
"icon": "mdi:speedometer",
"unit": PERCENTAGE,
"guard_condition": [
{"Filter-Type": "FMT_SINGLE_SPEED"},
],
},
],
(6, "Pumps"): [
{
"entity_classes": {"pumpSpeed": OmniLogicPumpSpeedSensor},
"name": "Pump Speed",
"kind": "pump_speed",
"device_class": None,
"icon": "mdi:speedometer",
"unit": PERCENTAGE,
"guard_condition": [
{"Type": "PMP_SINGLE_SPEED"},
],
},
],
(6, "Chlorinator"): [
{
"entity_classes": {"Timed-Percent": OmniLogicChlorinatorSensor},
"name": "Setting",
"kind": "chlorinator",
"device_class": None,
"icon": "mdi:gauge",
"unit": PERCENTAGE,
"guard_condition": [
{
"Shared-Type": "BOW_SHARED_EQUIPMENT",
"status": "0",
},
{
"operatingMode": "2",
},
],
},
{
"entity_classes": {"avgSaltLevel": OmniLogicSaltLevelSensor},
"name": "Salt Level",
"kind": "salt_level",
"device_class": None,
"icon": "mdi:gauge",
"unit": CONCENTRATION_PARTS_PER_MILLION,
"guard_condition": [
{
"Shared-Type": "BOW_SHARED_EQUIPMENT",
"status": "0",
},
],
},
],
(6, "CSAD"): [
{
"entity_classes": {"ph": OmniLogicPHSensor},
"name": "pH",
"kind": "csad_ph",
"device_class": None,
"icon": "mdi:gauge",
"unit": "pH",
"guard_condition": [
{"ph": ""},
],
},
{
"entity_classes": {"orp": OmniLogicORPSensor},
"name": "ORP",
"kind": "csad_orp",
"device_class": None,
"icon": "mdi:gauge",
"unit": ELECTRIC_POTENTIAL_MILLIVOLT,
"guard_condition": [
{"orp": ""},
],
},
],
}
|
{
"content_hash": "cd1e5fbf086ffe108599dcda717b157f",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 84,
"avg_line_length": 29.183333333333334,
"alnum_prop": 0.5242718446601942,
"repo_name": "nkgilley/home-assistant",
"id": "04bb1abf3e8cb5d0dbb2d4db3eb833bbf0f75064",
"size": "10506",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/omnilogic/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from numpy import mean
from numpy import std
from ml.GradientDescent import *
from logic import DataConvertor, Parser
URL_COPYPASTE_AVTOVAZ_TEST = 'http://spb.am.ru/all/search/?p465=87&p1009=1602&p1129ge=1900&p1129le=2014&p2le=9000000&p1145[]=13,29'
def feature_normalize(data):
mileage_mean = mean([int(x) for x in data['mileage']])
mileage_sigma = std([int(x) for x in data['mileage']])
data['mileage'] = [(int(x) - mileage_mean) / mileage_sigma for x in data['mileage']]
price_mean = mean([int(x) for x in data['price']])
price_sigma = std([int(x) for x in data['price']])
data['price'] = [(int(x) - price_mean) / price_sigma for x in data['price']]
def feature_scaling(data):
mileage_max = max([int(x) for x in data['mileage']])
data['mileage'] = [(int(x) - mileage_max / 2.0) / mileage_max for x in data['mileage']]
price_max = max([int(x) for x in data['price']])
data['price'] = [(int(x) - price_max / 2.0) / price_max for x in data['price']]
def main():
auto_name = 'VAZ'
page_parser = Parser('http://msk.am.ru/all/search/?p465=87&p1009=1602&p739=1%2C2')
data = page_parser.parse_auto()
data_convertor = DataConvertor(data)
# features_normalize(data)
data_convertor.print_data(True)
data_convertor.save_data('C:\\data\\' + auto_name, False)
feature_matrix = data_convertor.convert_features_into_matrix()
answer_matrix = data_convertor.convert_answers_into_matrix()
theta_matrix, history = GradientDescent(feature_matrix, answer_matrix, 0.01, 1000)
print(theta_matrix)
if __name__ == '__main__':
main()
|
{
"content_hash": "994572a586248bf57604fe8755acf05f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 131,
"avg_line_length": 41.17948717948718,
"alnum_prop": 0.6569115815691158,
"repo_name": "YuriShporhun/TheCarMonitor",
"id": "c53c6d699887d1de462b06d304cdd05f46e56554",
"size": "1650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logic/Main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17431"
}
],
"symlink_target": ""
}
|
"""
Standalone file utils.
Nothing in this module should have an knowledge of config or the layout
and structure of the site and pages in the site.
"""
from __future__ import unicode_literals
import logging
import markdown
import os
import pkg_resources
import shutil
import re
import sys
import yaml
import fnmatch
from mkdocs import toc, exceptions
try: # pragma: no cover
from urllib.parse import urlparse, urlunparse, urljoin # noqa
from urllib.request import pathname2url # noqa
from collections import UserDict # noqa
except ImportError: # pragma: no cover
from urlparse import urlparse, urlunparse, urljoin # noqa
from urllib import pathname2url # noqa
from UserDict import UserDict # noqa
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
string_types = str, # noqa
text_type = str # noqa
else: # pragma: no cover
string_types = basestring, # noqa
text_type = unicode # noqa
log = logging.getLogger(__name__)
def yaml_load(source, loader=yaml.Loader):
"""
Wrap PyYaml's loader so we can extend it to suit our needs.
Load all strings as unicode.
http://stackoverflow.com/a/2967461/3609487
"""
def construct_yaml_str(self, node):
"""
Override the default string handling function to always return
unicode objects.
"""
return self.construct_scalar(node)
class Loader(loader):
"""
Define a custom loader derived from the global loader to leave the
global loader unaltered.
"""
# Attach our unicode constructor to our custom loader ensuring all strings
# will be unicode on translation.
Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
try:
return yaml.load(source, Loader)
finally:
# TODO: Remove this when external calls are properly cleaning up file
# objects. Some mkdocs internal calls, sometimes in test lib, will
# load configs with a file object but never close it. On some
# systems, if a delete action is performed on that file without Python
# closing that object, there will be an access error. This will
# process the file and close it as there should be no more use for the
# file once we process the yaml content.
if hasattr(source, 'close'):
source.close()
def modified_time(file_path):
"""
Return the modified time of the supplied file. If the file does not exists zero is returned.
see build_pages for use.
"""
if os.path.exists(file_path):
return os.path.getmtime(file_path)
else:
return 0.0
def reduce_list(data_set):
""" Reduce duplicate items in a list and preserve order """
seen = set()
return [item for item in data_set if
item not in seen and not seen.add(item)]
def copy_file(source_path, output_path):
"""
Copy source_path to output_path, making sure any parent directories exist.
"""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
shutil.copy(source_path, output_path)
def write_file(content, output_path):
"""
Write content to output_path, making sure any parent directories exist.
"""
output_dir = os.path.dirname(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_path, 'wb') as f:
f.write(content)
def clean_directory(directory):
"""
Remove the content of a directory recursively but not the directory itself.
"""
if not os.path.exists(directory):
return
for entry in os.listdir(directory):
# Don't remove hidden files from the directory. We never copy files
# that are hidden, so we shouldn't delete them either.
if entry.startswith('.'):
continue
path = os.path.join(directory, entry)
if os.path.isdir(path):
shutil.rmtree(path, True)
else:
os.unlink(path)
def copy_media_files(from_dir, to_dir, exclude=None, dirty=False):
"""
Recursively copy all files except markdown and exclude[ed] files into another directory.
`exclude` accepts a list of Unix shell-style wildcards (`['*.py', '*.pyc']`).
Note that `exclude` only operates on file names, not directories.
"""
for (source_dir, dirnames, filenames) in os.walk(from_dir, followlinks=True):
relative_path = os.path.relpath(source_dir, from_dir)
output_dir = os.path.normpath(os.path.join(to_dir, relative_path))
# Filter file names using Unix pattern matching
# Always filter file names starting with a '.'
exclude_patterns = ['.*']
exclude_patterns.extend(exclude or [])
for pattern in exclude_patterns:
filenames = [f for f in filenames if not fnmatch.fnmatch(f, pattern)]
# Filter the dirnames that start with a '.' and update the list in
# place to prevent us walking these.
dirnames[:] = [d for d in dirnames if not d.startswith('.')]
for filename in filenames:
if not is_markdown_file(filename):
source_path = os.path.join(source_dir, filename)
output_path = os.path.join(output_dir, filename)
# Do not copy when using --dirty if the file has not been modified
if dirty and (modified_time(source_path) < modified_time(output_path)):
continue
copy_file(source_path, output_path)
def get_html_path(path):
"""
Map a source file path to an output html path.
Paths like 'index.md' will be converted to 'index.html'
Paths like 'about.md' will be converted to 'about/index.html'
Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html'
"""
path = os.path.splitext(path)[0]
if os.path.basename(path) == 'index':
return path + '.html'
return "/".join((path, 'index.html'))
def get_url_path(path, use_directory_urls=True):
"""
Map a source file path to an output html path.
Paths like 'index.md' will be converted to '/'
Paths like 'about.md' will be converted to '/about/'
Paths like 'api-guide/core.md' will be converted to '/api-guide/core/'
If `use_directory_urls` is `False`, returned URLs will include the a trailing
`index.html` rather than just returning the directory path.
"""
path = get_html_path(path)
url = '/' + path.replace(os.path.sep, '/')
if use_directory_urls:
return url[:-len('index.html')]
return url
def is_homepage(path):
return os.path.splitext(path)[0] == 'index'
def is_markdown_file(path):
"""
Return True if the given file path is a Markdown file.
http://superuser.com/questions/249436/file-extension-for-markdown-files
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.markdown',
'.mdown',
'.mkdn',
'.mkd',
'.md',
]
def is_css_file(path):
"""
Return True if the given file path is a CSS file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.css',
]
def is_javascript_file(path):
"""
Return True if the given file path is a Javascript file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.js',
'.javascript'
]
def is_html_file(path):
"""
Return True if the given file path is an HTML file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.html',
'.htm',
]
def is_template_file(path):
"""
Return True if the given file path is an HTML file.
"""
ext = os.path.splitext(path)[1].lower()
return ext in [
'.html',
'.htm',
'.xml',
]
def create_media_urls(nav, path_list):
"""
Return a list of URLs that have been processed correctly for inclusion in
a page.
"""
final_urls = []
for path in path_list:
# Allow links to fully qualified URL's
parsed = urlparse(path)
if parsed.netloc:
final_urls.append(path)
continue
# We must be looking at a local path.
url = path_to_url(path)
relative_url = '%s/%s' % (nav.url_context.make_relative('/').rstrip('/'), url)
final_urls.append(relative_url)
return final_urls
def create_relative_media_url(nav, url):
"""
For a current page, create a relative url based on the given URL.
On index.md (which becomes /index.html):
image.png -> ./image.png
/image.png -> ./image.png
On sub/page.md (which becomes /sub/page/index.html):
image.png -> ../image.png
/image.png -> ../../image.png
On sub/index.md (which becomes /sub/index.html):
image.png -> ./image.png
/image.png -> ./image.png
"""
# Allow links to fully qualified URL's
parsed = urlparse(url)
if parsed.netloc:
return url
# If the URL we are looking at starts with a /, then it should be
# considered as absolute and will be 'relative' to the root.
if url.startswith('/'):
base = '/'
url = url[1:]
else:
base = nav.url_context.base_path
relative_base = nav.url_context.make_relative(base)
if relative_base == "." and url.startswith("./"):
relative_url = url
else:
relative_url = '%s/%s' % (relative_base, url)
# TODO: Fix this, this is a hack. Relative urls are not being calculated
# correctly for images in the same directory as the markdown. I think this
# is due to us moving it into a directory with index.html, but I'm not sure
# win32 platform uses backslash "\". eg. "\level1\level2"
notindex = re.match(r'.*(?:\\|/)index.md$', nav.file_context.current_file) is None
if notindex and nav.url_context.base_path != '/' and relative_url.startswith("./"):
relative_url = ".%s" % relative_url
return relative_url
def path_to_url(path):
"""Convert a system path to a URL."""
if os.path.sep == '/':
return path
if sys.version_info < (3, 0):
path = path.encode('utf8')
return pathname2url(path)
def convert_markdown(markdown_source, extensions=None, extension_configs=None):
"""
Convert the Markdown source file to HTML content, and additionally
return the parsed table of contents, and a dictionary of any metadata
that was specified in the Markdown file.
`extensions` is an optional sequence of Python Markdown extensions to add
to the default set.
"""
md = markdown.Markdown(
extensions=extensions or [],
extension_configs=extension_configs or {}
)
html_content = md.convert(markdown_source)
# On completely blank markdown files, no Meta or tox properties are added
# to the generated document.
meta = getattr(md, 'Meta', {})
toc_html = getattr(md, 'toc', '')
# Post process the generated table of contents into a data structure
table_of_contents = toc.TableOfContents(toc_html)
return (html_content, table_of_contents, meta)
def get_theme_dir(name):
""" Return the directory of an installed theme by name. """
theme = get_themes()[name]
return os.path.dirname(os.path.abspath(theme.load().__file__))
def get_themes():
""" Return a dict of all installed themes as (name, entry point) pairs. """
themes = {}
builtins = pkg_resources.get_entry_map(dist='mkdocs', group='mkdocs.themes')
for theme in pkg_resources.iter_entry_points(group='mkdocs.themes'):
if theme.name in builtins and theme.dist.key != 'mkdocs':
raise exceptions.ConfigurationError(
"The theme {0} is a builtin theme but {1} provides a theme "
"with the same name".format(theme.name, theme.dist.key))
elif theme.name in themes:
multiple_packages = [themes[theme.name].dist.key, theme.dist.key]
log.warning("The theme %s is provided by the Python packages "
"'%s'. The one in %s will be used.",
theme.name, ','.join(multiple_packages), theme.dist.key)
themes[theme.name] = theme
return themes
def get_theme_names():
"""Return a list of all installed themes by name."""
return get_themes().keys()
def filename_to_title(filename):
title = os.path.splitext(filename)[0]
title = title.replace('-', ' ').replace('_', ' ')
# Capitalize if the filename was all lowercase, otherwise leave it as-is.
if title.lower() == title:
title = title.capitalize()
return title
def dirname_to_title(dirname):
title = dirname
title = title.replace('-', ' ').replace('_', ' ')
# Capitalize if the dirname was all lowercase, otherwise leave it as-is.
if title.lower() == title:
title = title.capitalize()
return title
def find_or_create_node(branch, key):
"""
Given a list, look for dictionary with a key matching key and return it's
value. If it doesn't exist, create it with the value of an empty list and
return that.
"""
for node in branch:
if not isinstance(node, dict):
continue
if key in node:
return node[key]
new_branch = []
node = {key: new_branch}
branch.append(node)
return new_branch
def nest_paths(paths):
"""
Given a list of paths, convert them into a nested structure that will match
the pages config.
"""
nested = []
for path in paths:
if os.path.sep not in path:
nested.append(path)
continue
directory, _ = os.path.split(path)
parts = directory.split(os.path.sep)
branch = nested
for part in parts:
part = dirname_to_title(part)
branch = find_or_create_node(branch, part)
branch.append(path)
return nested
|
{
"content_hash": "e01736f437656f31722f65d7f5c97c78",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 96,
"avg_line_length": 29.605809128630707,
"alnum_prop": 0.6124036440084093,
"repo_name": "lukfor/mkdocs",
"id": "eb9f7a42dae7b3897d349343f91bf8240aae95ee",
"size": "14287",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkdocs/utils/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "32758"
},
{
"name": "HTML",
"bytes": "17882"
},
{
"name": "JavaScript",
"bytes": "20976"
},
{
"name": "Python",
"bytes": "185783"
}
],
"symlink_target": ""
}
|
from urlmapper import settings
from .models import URLMap
def get_mapped_url(key, request=None):
"""
Return the URL for a given key, or None if one does not exist.
"""
if key not in settings.URLMAPPER_KEYS:
if settings.URLMAPPER_RAISE_EXCEPTION:
raise KeyError(
"Key '{key}' does not exist in settings.URLMAPPER_KEYS".format(
key=key
)
)
return ''
if key in settings.URLMAPPER_FUNCTIONS:
try:
try:
return settings.URLMAPPER_FUNCTIONS[key](request)
except TypeError:
return settings.URLMAPPER_FUNCTIONS[key]()
except Exception as e:
if settings.URLMAPPER_RAISE_EXCEPTION:
raise e
return ''
try:
return URLMap.objects.get(key=key).get_url()
except URLMap.DoesNotExist:
return ''
def check_mapped_url(key):
"""
Check whether a URL is mapped.
"""
return bool(
key in settings.URLMAPPER_KEYS
and (
key in settings.URLMAPPER_FUNCTIONS
or URLMap.objects.filter(key=key).exists()
)
and get_mapped_url(key)
)
|
{
"content_hash": "bcaa1dcc9a515d3c926503d7c864fb6c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 26.361702127659573,
"alnum_prop": 0.5544794188861986,
"repo_name": "TwigWorld/django-url-mapper",
"id": "7fa84e3799006bf74e10d70e9bd02573c8da1751",
"size": "1239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urlmapper/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22718"
}
],
"symlink_target": ""
}
|
"""
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core._ufunc_config import errstate
from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
@set_module('numpy')
class MachAr(object):
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, subtracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating point number ``beta**minexp`` (the smallest [in
magnitude] usable floating value).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
Same as `xmin`.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp-a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp-a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp-one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp-one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y*y
a = z*one # Check here for underflow
temp = z*t
if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax*one != xmax):
xmax = one - beta*epsneg
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
{
"content_hash": "ca79bfee3dbb2d7fbb1e1e2b86d5522f",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 88,
"avg_line_length": 31.569767441860463,
"alnum_prop": 0.49806629834254146,
"repo_name": "jorisvandenbossche/numpy",
"id": "202580bdb0649a6f3995c126f081df97f8423c41",
"size": "10860",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "numpy/core/machar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9068647"
},
{
"name": "C++",
"bytes": "189527"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8307898"
},
{
"name": "Shell",
"bytes": "8482"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import optimizer
_default_hyperparam = optimizer.Hyperparameter()
_default_hyperparam.lr = 0.01
_default_hyperparam.momentum = 0.9
class MomentumSGDRule(optimizer.UpdateRule):
"""Update rule for the classical momentum SGD.
See :class:`~chainer.optimizers.MomentumSGD` for the default values of the
hyperparameters.
Args:
parent_hyperparam (~chainer.optimizer.Hyperparameter): Hyperparameter
that provides the default values.
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, parent_hyperparam=None, lr=None, momentum=None):
super(MomentumSGDRule, self).__init__(
parent_hyperparam or _default_hyperparam)
if lr is not None:
self.hyperparam.lr = lr
if momentum is not None:
self.hyperparam.momentum = momentum
def init_state(self, param):
xp = cuda.get_array_module(param.data)
with cuda.get_device_from_array(param.data):
self.state['v'] = xp.zeros_like(param.data)
# For iDeep
if intel64.inputs_all_ready((self.state['v'],)):
self.state['v'] = intel64.ideep.array(
self.state['v'], itype=intel64.ideep.wgt_array)
def update_core_cpu(self, param):
grad = param.grad
if grad is None:
return
v = self.state['v']
if isinstance(v, intel64.mdarray):
v.inplace_axpby(self.hyperparam.momentum, -
self.hyperparam.lr, grad)
param.data += v
else:
v *= self.hyperparam.momentum
v -= self.hyperparam.lr * grad
param.data += v
def update_core_gpu(self, param):
grad = param.grad
if grad is None:
return
cuda.elementwise(
'T grad, T lr, T momentum',
'T param, T v',
'''v = momentum * v - lr * grad;
param += v;''',
'momentum_sgd')(
grad, self.hyperparam.lr, self.hyperparam.momentum,
param.data, self.state['v'])
class MomentumSGD(optimizer.GradientMethod):
"""Momentum SGD optimizer.
Args:
lr (float): Learning rate.
momentum (float): Exponential decay rate of the first order moment.
"""
def __init__(self, lr=_default_hyperparam.lr,
momentum=_default_hyperparam.momentum):
super(MomentumSGD, self).__init__()
self.hyperparam.lr = lr
self.hyperparam.momentum = momentum
lr = optimizer.HyperparameterProxy('lr')
momentum = optimizer.HyperparameterProxy('momentum')
def create_update_rule(self):
return MomentumSGDRule(self.hyperparam)
|
{
"content_hash": "3572a779f095dbfd761771d5fbedbdc0",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 78,
"avg_line_length": 31.119565217391305,
"alnum_prop": 0.5990220048899756,
"repo_name": "aonotas/chainer",
"id": "b1b298fd767713e2df49ab932bf38914527a9790",
"size": "2863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chainer/optimizers/momentum_sgd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "3357320"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 1667a11b3a01
Revises: None
Create Date: 2015-12-09 14:42:40.773000
"""
# revision identifiers, used by Alembic.
revision = '1667a11b3a01'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('subscribers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('phone_number', sa.String(), nullable=False),
sa.Column('subscribed', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('subscribers')
### end Alembic commands ###
|
{
"content_hash": "816bbac6a9f042cdbdcdb9b672b31713",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 24.129032258064516,
"alnum_prop": 0.6764705882352942,
"repo_name": "devopsec/threatdetectionservice",
"id": "b81090c8cc74f05b770025646d2610a7da2026ca",
"size": "748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "RESOURCES/marketing-notifications-flask-master/migrations/versions/1667a11b3a01_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "73"
},
{
"name": "CSS",
"bytes": "60463"
},
{
"name": "HTML",
"bytes": "73698"
},
{
"name": "JavaScript",
"bytes": "6500"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "166187"
},
{
"name": "Shell",
"bytes": "24573"
}
],
"symlink_target": ""
}
|
from itertools import count
from collections import defaultdict
import logging
from typing import List, Optional
import networkx
import ailment
from ailment import Block
from ...utils.graph import dfs_back_edges, subgraph_between_nodes, dominates, shallow_reverse
from .. import Analysis, register_analysis
from ..cfg.cfg_utils import CFGUtils
from .structuring.structurer_nodes import MultiNode, ConditionNode
from .graph_region import GraphRegion
from .condition_processor import ConditionProcessor
from .utils import replace_last_statement
l = logging.getLogger(name=__name__)
# an ever-incrementing counter
CONDITIONNODE_ADDR = count(0xff000000)
class RegionIdentifier(Analysis):
"""
Identifies regions within a function.
"""
def __init__(self, func, cond_proc=None, graph=None, largest_successor_tree_outside_loop=True,
force_loop_single_exit=True):
self.function = func
self.cond_proc = cond_proc if cond_proc is not None else ConditionProcessor(
self.project.arch if self.project is not None else None # it's only None in test cases
)
self._graph = graph if graph is not None else self.function.graph
self.region = None
self._start_node = None
self._loop_headers: Optional[List] = None
self.regions_by_block_addrs = []
self._largest_successor_tree_outside_loop = largest_successor_tree_outside_loop
self._force_loop_single_exit = force_loop_single_exit
self._analyze()
@staticmethod
def slice_graph(graph, node, frontier, include_frontier=False):
"""
Generate a slice of the graph from the head node to the given frontier.
:param networkx.DiGraph graph: The graph to work on.
:param node: The starting node in the graph.
:param frontier: A list of frontier nodes.
:param bool include_frontier: Whether the frontier nodes are included in the slice or not.
:return: A subgraph.
:rtype: networkx.DiGraph
"""
subgraph = subgraph_between_nodes(graph, node, frontier, include_frontier=include_frontier)
if not list(subgraph.nodes):
# HACK: FIXME: for infinite loop nodes, this would return an empty set, so we include the loop body itself
# Make sure this makes sense (EDG thinks it does)
if (node, node) in graph.edges:
subgraph.add_edge(node, node)
return subgraph
def _analyze(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
self._start_node = self._get_start_node(graph)
# preprocess: find loop headers
self._loop_headers = self._find_loop_headers(graph)
self.region = self._make_regions(graph)
# make regions into block address lists
self.regions_by_block_addrs = self._make_regions_by_block_addrs()
def _make_regions_by_block_addrs(self) -> List[List[int]]:
"""
Creates a list of addr lists representing each region without recursion. A single region is defined
as a set of only blocks, no Graphs containing nested regions. The list contains the address of each
block in the region, including the heads of each recursive region.
@return: List of addr lists
"""
work_list = [self.region]
block_only_regions = []
seen_regions = set()
while work_list:
children_regions = []
for region in work_list:
children_blocks = []
for node in region.graph.nodes:
if isinstance(node, Block):
children_blocks.append(node.addr)
elif isinstance(node, MultiNode):
children_blocks += [n.addr for n in node.nodes]
elif isinstance(node, GraphRegion):
if node not in seen_regions:
children_regions.append(node)
children_blocks.append(node.head.addr)
seen_regions.add(node)
else:
continue
if children_blocks:
block_only_regions.append(children_blocks)
work_list = children_regions
return block_only_regions
def _get_start_node(self, graph: networkx.DiGraph):
try:
return next(n for n in graph.nodes() if graph.in_degree(n) == 0)
except StopIteration:
pass
try:
return next(n for n in graph.nodes() if n.addr == self.function.addr)
except StopIteration as ex:
raise RuntimeError("Cannot find the start node from the graph!") from ex
def _test_reducibility(self):
# make a copy of the graph
graph = networkx.DiGraph(self._graph)
# preprocess: make it a super graph
self._make_supergraph(graph)
while True:
changed = False
# find a node with a back-edge, remove the edge (deleting the loop), and replace it with a MultiNode
changed |= self._remove_self_loop(graph)
# find a node that has only one predecessor, and merge it with its predecessor (replace them with a
# MultiNode)
changed |= self._merge_single_entry_node(graph)
if not changed:
# a fixed-point is reached
break
# Flow graph reducibility, Hecht and Ullman
if len(graph.nodes) == 1:
return True
return False
def _make_supergraph(self, graph: networkx.DiGraph):
while True:
for src, dst, data in graph.edges(data=True):
type_ = data.get('type', None)
if type_ == 'fake_return':
if len(list(graph.successors(src))) == 1 and len(list(graph.predecessors(dst))) == 1:
self._merge_nodes(graph, src, dst, force_multinode=True)
break
elif type_ == 'call':
graph.remove_node(dst)
break
else:
break
def _find_loop_headers(self, graph: networkx.DiGraph) -> List:
heads = { t for _,t in dfs_back_edges(graph, self._start_node) }
return CFGUtils.quasi_topological_sort_nodes(graph, heads)
def _find_initial_loop_nodes(self, graph: networkx.DiGraph, head):
# TODO optimize
latching_nodes = { s for s,t in dfs_back_edges(graph, self._start_node) if t == head }
loop_subgraph = self.slice_graph(graph, head, latching_nodes, include_frontier=True)
nodes = set(loop_subgraph.nodes())
return nodes
def _refine_loop(self, graph: networkx.DiGraph, head, initial_loop_nodes, initial_exit_nodes):
if len(initial_exit_nodes) <= 1:
return initial_loop_nodes, initial_exit_nodes
refined_loop_nodes = initial_loop_nodes.copy()
refined_exit_nodes = initial_exit_nodes.copy()
idom = networkx.immediate_dominators(graph, head)
new_exit_nodes = refined_exit_nodes
# a graph with only initial exit nodes and new loop nodes that are reachable from at least one initial exit
# node.
subgraph = networkx.DiGraph()
while len(refined_exit_nodes) > 1 and new_exit_nodes:
new_exit_nodes = set()
for n in list(sorted(refined_exit_nodes,
key=lambda nn: (nn.addr, nn.idx if isinstance(nn, ailment.Block) else None))):
if all((pred is n or pred in refined_loop_nodes) for pred in graph.predecessors(n)) \
and dominates(idom, head, n):
refined_loop_nodes.add(n)
refined_exit_nodes.remove(n)
to_add = set(graph.successors(n)) - refined_loop_nodes
new_exit_nodes |= to_add
for succ in to_add:
subgraph.add_edge(n, succ)
refined_exit_nodes |= new_exit_nodes
refined_loop_nodes = refined_loop_nodes - refined_exit_nodes
if self._largest_successor_tree_outside_loop and not refined_exit_nodes:
# figure out the new successor tree with the highest number of nodes
initial_exit_to_newnodes = defaultdict(set)
newnode_to_initial_exits = defaultdict(set)
for initial_exit in initial_exit_nodes:
if initial_exit in subgraph:
for _, succs in networkx.bfs_successors(subgraph, initial_exit):
initial_exit_to_newnodes[initial_exit] |= set(succs)
for succ in succs:
newnode_to_initial_exits[succ].add(initial_exit)
for newnode, exits in newnode_to_initial_exits.items():
for exit_ in exits:
initial_exit_to_newnodes[exit_].add(newnode)
if initial_exit_to_newnodes:
tree_sizes = dict((exit_, len(initial_exit_to_newnodes[exit_])) for exit_ in initial_exit_to_newnodes)
max_tree_size = max(tree_sizes.values())
if list(tree_sizes.values()).count(max_tree_size) == 1:
tree_size_to_exit = dict((v, k) for k, v in tree_sizes.items())
max_size_exit = tree_size_to_exit[max_tree_size]
if all(len(newnode_to_initial_exits[nn]) == 1 for nn in initial_exit_to_newnodes[max_size_exit]):
refined_loop_nodes = refined_loop_nodes - \
initial_exit_to_newnodes[max_size_exit] - {max_size_exit}
refined_exit_nodes.add(max_size_exit)
return refined_loop_nodes, refined_exit_nodes
def _remove_self_loop(self, graph: networkx.DiGraph):
r = False
while True:
for node in graph.nodes():
if node in graph[node]:
# found a self loop
self._remove_node(graph, node)
r = True
break
else:
break
return r
def _merge_single_entry_node(self, graph: networkx.DiGraph):
r = False
while True:
for node in networkx.dfs_postorder_nodes(graph):
preds = graph.predecessors(node)
if len(preds) == 1:
# merge the two nodes
self._absorb_node(graph, preds[0], node)
r = True
break
else:
break
return r
def _make_regions(self, graph: networkx.DiGraph):
structured_loop_headers = set()
new_regions = [ ]
# FIXME: _get_start_node() will fail if the graph is just a loop
# Find all loops
while True:
restart = False
self._start_node = self._get_start_node(graph)
# Start from loops
for node in list(reversed(self._loop_headers)):
if node in structured_loop_headers:
continue
if node not in graph:
continue
region = self._make_cyclic_region(node, graph)
if region is None:
# failed to struct the loop region - remove the header node from loop headers
l.debug("Failed to structure a loop region starting at %#x. Remove it from loop headers.",
node.addr)
self._loop_headers.remove(node)
else:
l.debug("Structured a loop region %r.", region)
new_regions.append(region)
structured_loop_headers.add(node)
restart = True
break
if restart:
continue
break
new_regions.append(GraphRegion(self._get_start_node(graph), graph, None, None, False, None))
l.debug("Identified %d loop regions.", len(structured_loop_headers))
l.debug("No more loops left. Start structuring acyclic regions.")
# No more loops left. Structure acyclic regions.
while new_regions:
region = new_regions.pop(0)
head = region.head
subgraph = region.graph
failed_region_attempts = set()
while self._make_acyclic_region(head, subgraph, region.graph_with_successors, failed_region_attempts,
region.cyclic):
if head not in subgraph:
# update head
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
head = next(iter(n for n in subgraph.nodes() if n.addr == head.addr))
region.head = head
if len(graph.nodes()) == 1 and isinstance(list(graph.nodes())[0], GraphRegion):
return list(graph.nodes())[0]
# create a large graph region
new_head = self._get_start_node(graph)
region = GraphRegion(new_head, graph, None, None, False, None)
return region
#
# Cyclic regions
#
def _make_cyclic_region(self, head, graph: networkx.DiGraph):
l.debug("Found cyclic region at %#08x", head.addr)
initial_loop_nodes = self._find_initial_loop_nodes(graph, head)
l.debug("Initial loop nodes %s", self._dbg_block_list(initial_loop_nodes))
# Make sure no other loops are contained in the current loop
if {n for n in initial_loop_nodes if n.addr != head.addr}.intersection(self._loop_headers):
return None
normal_entries = {n for n in graph.predecessors(head) if n not in initial_loop_nodes}
abnormal_entries = set()
for n in initial_loop_nodes:
if n == head:
continue
preds = set(graph.predecessors(n))
abnormal_entries |= (preds - initial_loop_nodes)
l.debug("Normal entries %s", self._dbg_block_list(normal_entries))
l.debug("Abnormal entries %s", self._dbg_block_list(abnormal_entries))
initial_exit_nodes = set()
for n in initial_loop_nodes:
succs = set(graph.successors(n))
initial_exit_nodes |= (succs - initial_loop_nodes)
l.debug("Initial exit nodes %s", self._dbg_block_list(initial_exit_nodes))
refined_loop_nodes, refined_exit_nodes = self._refine_loop(graph, head, initial_loop_nodes,
initial_exit_nodes)
l.debug("Refined loop nodes %s", self._dbg_block_list(refined_loop_nodes))
l.debug("Refined exit nodes %s", self._dbg_block_list(refined_exit_nodes))
if len(refined_exit_nodes) > 1:
# self._get_start_node(graph)
node_post_order = list(networkx.dfs_postorder_nodes(graph, head))
sorted_exit_nodes = sorted(list(refined_exit_nodes), key=node_post_order.index)
normal_exit_node = sorted_exit_nodes[0]
abnormal_exit_nodes = set(sorted_exit_nodes[1:])
else:
normal_exit_node = next(iter(refined_exit_nodes)) if len(refined_exit_nodes) > 0 else None
abnormal_exit_nodes = set()
region = self._abstract_cyclic_region(graph, refined_loop_nodes, head, normal_entries, abnormal_entries,
normal_exit_node, abnormal_exit_nodes)
if len(region.successors) > 1 and self._force_loop_single_exit:
# multi-successor region. refinement is required
self._refine_loop_successors(region, graph)
return region
def _refine_loop_successors(self, region, graph: networkx.DiGraph):
"""
If there are multiple successors of a loop, convert them into conditional gotos. Eventually there should be
only one loop successor.
:param GraphRegion region: The cyclic region to refine.
:param networkx.DiGraph graph: The current graph that is being structured.
:return: None
"""
if len(region.successors) <= 1:
return
# recover reaching conditions
self.cond_proc.recover_reaching_conditions(region, with_successors=True)
successors = list(region.successors)
condnode_addr = next(CONDITIONNODE_ADDR)
# create a new successor
cond = ConditionNode(
condnode_addr,
None,
self.cond_proc.reaching_conditions[successors[0]],
successors[0],
false_node=None,
)
for succ in successors[1:]:
cond = ConditionNode(condnode_addr,
None,
self.cond_proc.reaching_conditions[succ],
succ,
false_node=cond,
)
g = region.graph_with_successors
# modify region in place
region.successors = {cond}
for succ in successors:
for src, _, data in list(g.in_edges(succ, data=True)):
removed_edges = [ ]
for src2src, _, data_ in list(g.in_edges(src, data=True)):
removed_edges.append((src2src, src, data_))
g.remove_edge(src2src, src)
g.remove_edge(src, succ)
# TODO: rewrite the conditional jumps in src so that it goes to cond-node instead.
# modify the last statement of src so that it jumps to cond
replaced_any_stmt = False
last_stmts = self.cond_proc.get_last_statements(src)
for last_stmt in last_stmts:
if isinstance(last_stmt, ailment.Stmt.ConditionalJump):
if isinstance(last_stmt.true_target, ailment.Expr.Const) \
and last_stmt.true_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
last_stmt.false_target,
ins_addr=last_stmt.ins_addr,
)
elif isinstance(last_stmt.false_target, ailment.Expr.Const) \
and last_stmt.false_target.value == succ.addr:
new_last_stmt = ailment.Stmt.ConditionalJump(
last_stmt.idx,
last_stmt.condition,
last_stmt.true_target,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
else:
# none of the two branches is jumping out of the loop
continue
elif isinstance(last_stmt, ailment.Stmt.Jump):
if isinstance(last_stmt.target, ailment.Expr.Const):
new_last_stmt = ailment.Stmt.Jump(
last_stmt.idx,
ailment.Expr.Const(None, None, condnode_addr, self.project.arch.bits),
ins_addr=last_stmt.ins_addr,
)
else:
# an indirect jump - might be a jump table. ignore it
continue
else:
l.error("Unexpected last_stmt type %s. Ignore.", type(last_stmt))
continue
replace_last_statement(src, last_stmt, new_last_stmt)
replaced_any_stmt = True
if not replaced_any_stmt:
l.warning("No statement was replaced. Is there anything wrong?")
# raise Exception()
# add src back
for src2src, _, data_ in removed_edges:
g.add_edge(src2src, src, **data_)
g.add_edge(src, cond, **data)
# modify graph
graph.add_edge(region, cond)
for succ in successors:
edge_data = graph.get_edge_data(region, succ)
graph.remove_edge(region, succ)
graph.add_edge(cond, succ, **edge_data)
#
# Acyclic regions
#
def _make_acyclic_region(self, head, graph: networkx.DiGraph, secondary_graph, failed_region_attempts, cyclic):
# pre-processing
# we need to create a copy of the original graph if
# - there are in edges to the head node, or
# - there are more than one end nodes
head_inedges = list(graph.in_edges(head))
if head_inedges:
# we need a copy of the graph to remove edges coming into the head
graph_copy = networkx.DiGraph(graph)
# remove any in-edge to the head node
for src, _ in head_inedges:
graph_copy.remove_edge(src, head)
else:
graph_copy = graph
endnodes = [node for node in graph_copy.nodes() if graph_copy.out_degree(node) == 0]
if len(endnodes) == 0:
# sanity check: there should be at least one end node
l.critical("No end node is found in a supposedly acyclic graph. Is it really acyclic?")
return False
if len(endnodes) > 1:
# we need a copy of the graph!
graph_copy = networkx.DiGraph(graph_copy)
# if this graph has multiple end nodes: create a single end node
dummy_endnode = None
if len(endnodes) > 1:
dummy_endnode = "DUMMY_ENDNODE"
for endnode in endnodes:
graph_copy.add_edge(endnode, dummy_endnode)
endnodes = [ dummy_endnode ]
else:
dummy_endnode = None
# compute dominator tree
doms = networkx.immediate_dominators(graph_copy, head)
# compute post-dominator tree
inverted_graph = shallow_reverse(graph_copy)
postdoms = networkx.immediate_dominators(inverted_graph, endnodes[0])
# dominance frontiers
df = networkx.algorithms.dominance_frontiers(graph_copy, head)
# visit the nodes in post-order
for node in networkx.dfs_postorder_nodes(graph_copy, source=head):
if node is dummy_endnode:
# skip the dummy endnode
continue
if cyclic and node is head:
continue
out_degree = graph_copy.out_degree[node]
if out_degree == 0:
# the root element of the region hierarchy should always be a GraphRegion,
# so we transform it into one, if necessary
if graph_copy.in_degree(node) == 0 and not isinstance(node, GraphRegion):
subgraph = networkx.DiGraph()
subgraph.add_node(node)
self._abstract_acyclic_region(graph,
GraphRegion(node, subgraph, None, None, False, None),
[],
secondary_graph=secondary_graph)
continue
# test if this node is an entry to a single-entry, single-successor region
levels = 0
postdom_node = postdoms.get(node, None)
while postdom_node is not None:
if (node, postdom_node) not in failed_region_attempts:
if self._check_region(graph_copy, node, postdom_node, doms, df):
frontier = [ postdom_node ]
region = self._compute_region(graph_copy, node, frontier, dummy_endnode=dummy_endnode)
if region is not None:
# update region.graph_with_successors
if secondary_graph is not None:
for nn in list(region.graph_with_successors.nodes):
original_successors = secondary_graph.successors(nn)
for succ in original_successors:
if succ not in graph_copy:
region.graph_with_successors.add_edge(nn, succ)
# l.debug("Walked back %d levels in postdom tree.", levels)
l.debug("Node %r, frontier %r.", node, frontier)
# l.debug("Identified an acyclic region %s.", self._dbg_block_list(region.graph.nodes()))
self._abstract_acyclic_region(graph, region, frontier, dummy_endnode=dummy_endnode,
secondary_graph=secondary_graph)
# assert dummy_endnode not in graph
return True
failed_region_attempts.add((node, postdom_node))
if not dominates(doms, node, postdom_node):
break
if postdom_node is postdoms.get(postdom_node, None):
break
postdom_node = postdoms.get(postdom_node, None)
levels += 1
# l.debug("Walked back %d levels in postdom tree and did not find anything for %r. Next.", levels, node)
return False
@staticmethod
def _check_region(graph, start_node, end_node, doms, df):
"""
:param graph:
:param start_node:
:param end_node:
:param doms:
:param df:
:return:
"""
# if the exit node is the header of a loop that contains the start node, the dominance frontier should only
# contain the exit node.
if not dominates(doms, start_node, end_node):
frontier = df.get(start_node, set())
for node in frontier:
if node is not start_node and node is not end_node:
return False
# no edges should enter the region.
for node in df.get(end_node, set()):
if dominates(doms, start_node, node) and node is not end_node:
return False
# no edges should leave the region.
for node in df.get(start_node, set()):
if node is start_node or node is end_node:
continue
if node not in df.get(end_node, set()):
return False
for pred in graph.predecessors(node):
if dominates(doms, start_node, pred) and not dominates(doms, end_node, pred):
return False
return True
@staticmethod
def _compute_region(graph, node, frontier, include_frontier=False, dummy_endnode=None):
subgraph = networkx.DiGraph()
frontier_edges = [ ]
queue = [ node ]
traversed = set()
while queue:
node_ = queue.pop()
if node_ in frontier:
continue
traversed.add(node_)
subgraph.add_node(node_)
for succ in graph.successors(node_):
edge_data = graph.get_edge_data(node_, succ)
if node_ in frontier and succ in traversed:
if include_frontier:
# if frontier nodes are included, do not keep traversing their successors
# however, if it has an edge to an already traversed node, we should add that edge
subgraph.add_edge(node_, succ, **edge_data)
else:
frontier_edges.append((node_, succ, edge_data))
continue
if succ is dummy_endnode:
continue
if succ in frontier:
if not include_frontier:
# skip all frontier nodes
frontier_edges.append((node_, succ, edge_data))
continue
subgraph.add_edge(node_, succ, **edge_data)
if succ in traversed:
continue
queue.append(succ)
if dummy_endnode is not None:
frontier = { n for n in frontier if n is not dummy_endnode }
if subgraph.number_of_nodes() > 1:
subgraph_with_frontier = networkx.DiGraph(subgraph)
for src, dst, edge_data in frontier_edges:
if dst is not dummy_endnode:
subgraph_with_frontier.add_edge(src, dst, **edge_data)
# assert dummy_endnode not in frontier
# assert dummy_endnode not in subgraph_with_frontier
return GraphRegion(node, subgraph, frontier, subgraph_with_frontier, False, None)
else:
return None
def _abstract_acyclic_region(self,
graph: networkx.DiGraph,
region,
frontier,
dummy_endnode=None,
secondary_graph=None):
in_edges = self._region_in_edges(graph, region, data=True)
out_edges = self._region_out_edges(graph, region, data=True)
nodes_set = set()
for node_ in list(region.graph.nodes()):
nodes_set.add(node_)
if node_ is not dummy_endnode:
graph.remove_node(node_)
graph.add_node(region)
for src, _, data in in_edges:
if src not in nodes_set:
graph.add_edge(src, region, **data)
for _, dst, data in out_edges:
if dst not in nodes_set:
graph.add_edge(region, dst, **data)
if frontier:
for frontier_node in frontier:
if frontier_node is not dummy_endnode:
graph.add_edge(region, frontier_node)
if secondary_graph is not None:
self._abstract_acyclic_region(secondary_graph, region, { })
@staticmethod
def _abstract_cyclic_region(graph: networkx.DiGraph, loop_nodes, head, normal_entries, abnormal_entries,
normal_exit_node,
abnormal_exit_nodes):
region = GraphRegion(head, None, None, None, True, None)
subgraph = networkx.DiGraph()
region_outedges = [ ]
delayed_edges = [ ]
full_graph = networkx.DiGraph()
for node in loop_nodes:
subgraph.add_node(node)
in_edges = list(graph.in_edges(node, data=True))
out_edges = list(graph.out_edges(node, data=True))
for src, dst, data in in_edges:
full_graph.add_edge(src, dst, **data)
if src in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif src is region:
subgraph.add_edge(head, dst, **data)
elif src in normal_entries:
# graph.add_edge(src, region, **data)
delayed_edges.append((src, region, data))
elif src in abnormal_entries:
data['region_dst_node'] = dst
# graph.add_edge(src, region, **data)
delayed_edges.append((src, region, data))
else:
assert 0
for src, dst, data in out_edges:
full_graph.add_edge(src, dst, **data)
if dst in loop_nodes:
subgraph.add_edge(src, dst, **data)
elif dst is region:
subgraph.add_edge(src, head, **data)
elif dst is normal_exit_node:
region_outedges.append((node, dst))
# graph.add_edge(region, dst, **data)
delayed_edges.append((region, dst, data))
elif dst in abnormal_exit_nodes:
region_outedges.append((node, dst))
# data['region_src_node'] = src
# graph.add_edge(region, dst, **data)
delayed_edges.append((region, dst, data))
else:
assert 0
subgraph_with_exits = networkx.DiGraph(subgraph)
for src, dst in region_outedges:
subgraph_with_exits.add_edge(src, dst)
region.graph = subgraph
region.graph_with_successors = subgraph_with_exits
if normal_exit_node is not None:
region.successors = [normal_exit_node]
else:
region.successors = [ ]
region.successors += list(abnormal_exit_nodes)
for node in loop_nodes:
graph.remove_node(node)
# add delayed edges
graph.add_node(region)
for src, dst, data in delayed_edges:
graph.add_edge(src, dst, **data)
region.full_graph = full_graph
return region
@staticmethod
def _region_in_edges(graph, region, data=False):
return list(graph.in_edges(region.head, data=data))
@staticmethod
def _region_out_edges(graph, region, data=False):
out_edges = [ ]
for node in region.graph.nodes():
out_ = graph.out_edges(node, data=data)
for _, dst, data_ in out_:
if dst in region.graph:
continue
out_edges.append((region, dst, data_))
return out_edges
def _remove_node(self, graph: networkx.DiGraph, node): # pylint:disable=no-self-use
in_edges = [ (src, dst, data) for (src, dst, data) in graph.in_edges(node, data=True) if not src is node ]
out_edges = [ (src, dst, data) for (src, dst, data) in graph.out_edges(node, data=True) if not dst is node ]
if len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node ])
graph.remove_node(node)
if new_node is not None:
for src, _, data in in_edges:
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
graph.add_edge(new_node, dst, **data)
def _merge_nodes(self, graph: networkx.DiGraph, node_a, node_b, force_multinode=False): # pylint:disable=no-self-use
in_edges = list(graph.in_edges(node_a, data=True))
out_edges = list(graph.out_edges(node_b, data=True))
if not force_multinode and len(in_edges) <= 1 and len(out_edges) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([ node_a, node_b ])
graph.remove_node(node_a)
graph.remove_node(node_b)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges:
if src is node_b:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges:
if dst is node_a:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_a in graph
assert not node_b in graph
def _absorb_node(self, graph: networkx.DiGraph, node_mommy, node_kiddie, force_multinode=False): # pylint:disable=no-self-use
in_edges_mommy = graph.in_edges(node_mommy, data=True)
out_edges_mommy = graph.out_edges(node_mommy, data=True)
out_edges_kiddie = graph.out_edges(node_kiddie, data=True)
if not force_multinode and len(in_edges_mommy) <= 1 and len(out_edges_kiddie) <= 1:
# it forms a region by itself :-)
new_node = None
else:
new_node = MultiNode([node_mommy, node_kiddie])
graph.remove_node(node_mommy)
graph.remove_node(node_kiddie)
if new_node is not None:
graph.add_node(new_node)
for src, _, data in in_edges_mommy:
if src == node_kiddie:
src = new_node
graph.add_edge(src, new_node, **data)
for _, dst, data in out_edges_mommy:
if dst == node_kiddie:
continue
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
for _, dst, data in out_edges_kiddie:
if dst == node_mommy:
dst = new_node
graph.add_edge(new_node, dst, **data)
assert not node_mommy in graph
assert not node_kiddie in graph
@staticmethod
def _dbg_block_list(blocks):
return [(hex(b.addr) if hasattr(b, 'addr') else repr(b)) for b in blocks]
register_analysis(RegionIdentifier, 'RegionIdentifier')
|
{
"content_hash": "4a22e88012c90fe7d57001a06c6af30b",
"timestamp": "",
"source": "github",
"line_count": 924,
"max_line_length": 130,
"avg_line_length": 40.416666666666664,
"alnum_prop": 0.5391886464051413,
"repo_name": "angr/angr",
"id": "7749d5dc93fdba9f105ea19e89173414f77a75fa",
"size": "37345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/decompiler/region_identifier.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
}
|
import json
import sys
import requests
# tested with python 3.6 and requests 2.13.0
if len(sys.argv) != 2:
sys.stderr.write('usage: program <milestone-number>\n')
sys.stderr.write('Provide the github milestone number, not name. (e.g., 19 instead of 0.10.1)\n')
sys.exit(1)
milestone_num = sys.argv[1]
done = False
page_counter = 1
contributors = set()
# Get all users who created a closed issue or merged PR for a given milestone
while not done:
resp = requests.get("https://api.github.com/repos/apache/incubator-druid/issues?milestone=%s&state=closed&page=%s" % (milestone_num, page_counter))
pagination_link = resp.headers["Link"]
# last page doesn't have a "next"
if "rel=\"next\"" not in pagination_link:
done = True
else:
page_counter += 1
issues = json.loads(resp.text)
for issue in issues:
contributor_name = issue["user"]["login"]
contributors.add(contributor_name)
# doesn't work as-is for python2, the contributor names are "unicode" instead of "str" in python2
contributors = sorted(contributors, key=str.lower)
for contributor_name in contributors:
print("@%s" % contributor_name)
|
{
"content_hash": "020d8a765262bb91bbb98168d8566f80",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 149,
"avg_line_length": 30,
"alnum_prop": 0.7105263157894737,
"repo_name": "dkhwangbo/druid",
"id": "88b4e49dfe24bded27aaba309559d5e3832ffe9d",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/_bin/get-milestone-contributors.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "3345"
},
{
"name": "CSS",
"bytes": "15658"
},
{
"name": "Dockerfile",
"bytes": "4856"
},
{
"name": "HTML",
"bytes": "19754"
},
{
"name": "Java",
"bytes": "21183046"
},
{
"name": "JavaScript",
"bytes": "304058"
},
{
"name": "Makefile",
"bytes": "659"
},
{
"name": "PostScript",
"bytes": "5"
},
{
"name": "R",
"bytes": "17002"
},
{
"name": "Roff",
"bytes": "3617"
},
{
"name": "Shell",
"bytes": "28297"
},
{
"name": "TeX",
"bytes": "399508"
},
{
"name": "Thrift",
"bytes": "207"
}
],
"symlink_target": ""
}
|
'''
Created on 22 avr. 2016
@author: aidturith
'''
# https://docs.python.org/3/howto/logging.html
import logging
logging.basicConfig(format='[%(asctime)s] - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
level=logging.DEBUG,
filename='temp\example.log')
if __name__ == '__main__':
pass
class __main__(object):
logging.fatal('oups')
logging.error('error')
logging.warning('warn')
logging.info('info')
logging.debug('debu')
logging.debug('%s before you %s', 'Look', 'leap!')
logging.info('TODO: next step is to look into logging config and handlers (socket, smtp...)')
logging.debug('debug')
|
{
"content_hash": "79ce79e9be0bcacf93eeac31162fd18d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 97,
"avg_line_length": 25.178571428571427,
"alnum_prop": 0.5829787234042553,
"repo_name": "Aidturith/python-training",
"id": "c75ab4f1b08a3c959eac9d78c3b54388fb69ec7f",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "other/logs/logs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21159"
}
],
"symlink_target": ""
}
|
__author__ = 'matthieu'
import argparse
from cStringIO import StringIO
from docker import Client
dcl = Client()
parser = argparse.ArgumentParser(description='PGXL startpack')
parser.add_argument('--ip', dest="ip", action="store_true", help="print container IP")
parser.add_argument('--ncontainers', dest="ncontainers", action="store", type=int, default=4, help="N containers")
parser.add_argument('--conf', dest='conf', action="store", type=str, help="generate configuration file")
parser.add_argument('--static', dest='static', action="store", type=str, default=None, help="generate configuration file")
parser.add_argument('--local', dest='local', action="store", type=int, default=0, help="local mode")
parser.add_argument('--gtmproxy', dest='gtmproxy', action="store_true", help="enable gtm proxies (distributed mode)")
args = parser.parse_args()
def get_containers(dcl):
containers = dcl.containers()
cts = []
for c in containers:
info = dcl.inspect_container(c)
name = info['Name']
ip = info['NetworkSettings']['IPAddress']
cts.append({"name": name, "ip": ip})
return cts
def get_conf(ips, local_mode=False, gtmproxy=False):
servers = ["PGXL%d" %i for i in range(len(ips))]
servers_ip = ips
datanodes = servers
datanodes_ip = servers_ip
nnodes = len(datanodes)
gtm_server = servers_ip[0]
gtm_port = 20001
#coord_server = datanodes_ip[0]
conf = StringIO()
conf.write("""
pgxcOwner=$USER
pgxcUser=$pgxcOwner
tmpDir=/tmp
localTmpDir=$tmpDir
configBackup=n
""")
conf.write("""
gtmName=gtm
gtmMasterServer={0}
gtmMasterPort={1}
gtmMasterDir=$HOME/pgxc/nodes/gtm
gtmExtraConfig=none
gtmMasterSpecificExtraConfig=none
""".format(gtm_server, gtm_port))
if gtmproxy:
conf.write("""
gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
gtmProxy=y
gtmProxyNames=({0})
gtmProxyServers=({1})
gtmProxyPorts=({2})
gtmProxyDirs=({3})
gtmPxyExtraConfig=none
gtmPxySpecificExtraConfig=({4})
""".format(' '.join(["PROXY%d" %i for i in range(nnodes)]),
' '.join(datanodes_ip),
' '.join(["20001"] * nnodes),
' '.join(["$gtmProxyDir"] * nnodes),
' '.join(["none"] * nnodes)))
if local_mode:
conf.write("""
coordMasterDir=$HOME/pgxc/nodes/coord
coordSlaveDir=$HOME/pgxc/nodes/coord_slave
coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
coordNames=(COORD)
coordPorts=(5432)
poolerPorts=(20010)
coordPgHbaEntries=(all)
coordMasterServers=(127.0.0.1)
coordMasterDirs=($coordMasterDir)
coordMaxWALSenders=(5)
coordSlave=n
coordExtraConfig=coordExtraConfig
cat > $coordExtraConfig <<EOF
#================================================
# Added to all the coordinator postgresql.conf
# Original: $coordExtraConfig
log_destination = 'stderr'
logging_collector = on
log_directory = 'pg_log'
listen_addresses = '*'
max_connections = 100
log_filename = 'coordinator.log'
EOF
coordSpecificExtraConfig=(none)
coordExtraPgHba=none
coordSpecificExtraPgHba=(none)
""")
else:
conf.write("""
coordMasterDir=$HOME/pgxc/nodes/coord
coordSlaveDir=$HOME/pgxc/nodes/coord_slave
coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
coordNames=({0})
coordPorts=({1})
poolerPorts=({2})
coordPgHbaEntries=({3})
coordMasterServers=({4})
coordMasterDirs=({5})
coordMaxWALsender=5
coordMaxWALSenders=({6})
coordSlave=n
coordExtraConfig=coordExtraConfig
cat > $coordExtraConfig <<EOF
#================================================
# Added to all the coordinator postgresql.conf
# Original: $coordExtraConfig
log_destination = 'stderr'
logging_collector = on
log_directory = 'pg_log'
listen_addresses = '*'
max_connections = 100
log_filename = 'coordinator.log'
EOF
coordSpecificExtraConfig=({7})
coordExtraPgHba=none
coordSpecificExtraPgHba=({7})
""".format(' '.join("COORD%d" %node for node in range(nnodes)),
' '.join(["5432"] * nnodes),
' '.join(["20010"] * nnodes),
' '.join(["all"] * nnodes),
' '.join(datanodes_ip),
' '.join("$coordMasterDir/%d" %i for i in range(nnodes)),
' '.join(["$coordMaxWALsender"] * nnodes),
' '.join(["none"] * nnodes)))
conf.write("""
datanodeMasterDir=$HOME/pgxc/nodes/dn_master
datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
primaryDatanode={0}
datanodeNames=({1})
datanodePorts=({2})
datanodePoolerPorts=({3})
datanodePgHbaEntries=(all)
""".format(datanodes[0], ' '.join(datanodes),
' '.join(map(str, range(21000, 21000 + nnodes))),
' '.join(map(str, range(22000, 22000 + nnodes)))))
conf.write("""
datanodeMasterServers=({0})
datanodeMasterDirs=({1})
datanodeMaxWalSender=5
datanodeMaxWALSenders=({3})
datanodeSlave=n
datanodeExtraConfig=node.conf
datanodeSpecificExtraConfig=({2})
datanodeExtraPgHba=none
datanodeSpecificExtraPgHba=({2})
datanodeAdditionalSlaves=n
""".format(' '.join(ip for ip in datanodes_ip), ' '.join("$datanodeMasterDir/%d" %i for i in range(nnodes)),
' '.join(["none"] * nnodes),
' '.join(["$datanodeMaxWalSender"] * nnodes)))
return conf
def get_haproxy(ips):
conf = StringIO()
conf.write("""
global
log 127.0.0.1 local2 debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
defaults
timeout client 1m
timeout server 1m
timeout connect 10s
listen stats 0.0.0.0:9000
mode http
stats uri /stats
listen PGSQL 0.0.0.0:5432
mode tcp
option tcplog
balance static-rr
""")
for i in range(1, len(ips)):
conf.write(""" server s%d %s:5432 check\n""" %(i, ips[i]))
return conf
if args.ip:
print get_containers(dcl)
if args.conf:
local_mode = False
gtmproxy = args.gtmproxy
if args.local > 0:
ips = ["127.0.0.1"] * args.local
local_mode = True
gtmproxy = False
elif args.static == None:
from docker import Client
dcl = Client()
ctn = get_containers(dcl)
ips = [c["ip"] for c in ctn]
else:
ips = args.static.split(",")
conf = get_conf(ips, local_mode, gtmproxy)
with open("%s/pgxc_ctl.conf" %args.conf, "w+") as fp:
fp.write(conf.getvalue())
with open("%s/haproxy.cfg" %args.conf, "w+") as fp:
fp.write(get_haproxy(ips).getvalue())
conf.close()
|
{
"content_hash": "b543124370e335bded8a223c2a1ff1de",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 122,
"avg_line_length": 28.513274336283185,
"alnum_prop": 0.6579764121663563,
"repo_name": "postmind-net/pgxl-docker",
"id": "d693d2c79f34b5bf3d0d0a0ebfa138abe96013bd",
"size": "6444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6444"
},
{
"name": "Shell",
"bytes": "19993"
}
],
"symlink_target": ""
}
|
import bcrypt
def generate_password(password, rounds=10, encoding='utf-8'):
"""Generate a new password based on a random salt.
Args:
password (string): The password to generate the hash off
rounds (int): The complexity of the hashing
Returns:
mixed: The generated password and the salt used
"""
salt = bcrypt.gensalt(rounds)
hashed_password = bcrypt.hashpw(password.encode(encoding), salt)
return hashed_password.decode(encoding), salt.decode(encoding)
def check_password(password, existing_password, salt, encoding='utf-8'):
"""Validate a password against an existing password and the salt used to
generate it.
Args:
password (string): The password to validate
existing_password (string): The password to validate against
salt (string): The salt used to generate the existing_password
Returns:
boolean: True/False if valid or invalid
"""
if isinstance(salt, str):
salt = salt.encode(encoding)
if isinstance(existing_password, str):
existing_password = existing_password.encode(encoding)
return bcrypt.hashpw(
password.encode(encoding), salt) == existing_password
|
{
"content_hash": "67e587282cd7a539453d5b077aacdeab",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.6872937293729373,
"repo_name": "watsonpy/watson-auth",
"id": "cee21d8d21cf411255137426fe71d6518125c2dd",
"size": "1236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watson/auth/crypto.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4119"
},
{
"name": "Python",
"bytes": "79050"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.conf import settings
from oauth_tokens.api import ApiAbstractBase, Singleton
from oauth_tokens.models import AccessToken
from tweepy import TweepError as TwitterError
import tweepy
__all__ = ['api_call', 'TwitterError']
TWITTER_CLIENT_ID = getattr(settings, 'OAUTH_TOKENS_TWITTER_CLIENT_ID', None)
TWITTER_CLIENT_SECRET = getattr(settings, 'OAUTH_TOKENS_TWITTER_CLIENT_SECRET', None)
class TwitterApi(ApiAbstractBase):
__metaclass__ = Singleton
provider = 'twitter'
error_class = TwitterError
sleep_repeat_error_messages = [
'Failed to send request:'
]
def get_consistent_token(self):
return getattr(settings, 'TWITTER_API_ACCESS_TOKEN', None)
def get_api(self, token):
delimeter = AccessToken.objects.get_token_class(self.provider).delimeter
auth = tweepy.OAuthHandler(TWITTER_CLIENT_ID, TWITTER_CLIENT_SECRET)
auth.set_access_token(*token.split(delimeter))
return tweepy.API(auth, wait_on_rate_limit=True, retry_count=3, retry_delay=1, retry_errors=set([401, 404, 500, 503]))
def get_api_response(self, *args, **kwargs):
return getattr(self.api, self.method)(*args, **kwargs)
def get_error_code(self, e):
return e[0][0]['code'] if 'code' in e[0][0] else 0
def handle_error_no_active_tokens(self, e, *args, **kwargs):
if self.used_access_tokens and self.api:
# check if all tokens are blocked by rate limits response
try:
rate_limit_status = self.api.rate_limit_status()
except self.error_class, e:
# handle rate limit on rate_limit_status request -> wait 15 min and repeat main request
if self.get_error_code(e) == 88:
self.used_access_tokens = []
return self.sleep_repeat_call(seconds=60 * 15, *args, **kwargs)
else:
raise
# TODO: wrong logic, path is different completelly sometimes
method = '/%s' % self.method.replace('_', '/')
status = [methods for methods in rate_limit_status['resources'].values() if method in methods][0][method]
if status['remaining'] == 0:
secs = (datetime.fromtimestamp(status['reset']) - datetime.now()).seconds
self.used_access_tokens = []
return self.sleep_repeat_call(seconds=secs, *args, **kwargs)
else:
return self.repeat_call(*args, **kwargs)
else:
return super(TwitterApi, self).handle_error_no_active_tokens(e, *args, **kwargs)
def handle_error_code_88(self, e, *args, **kwargs):
# Rate limit exceeded
self.logger.warning("Rate limit exceeded: %s, method: %s recursion count: %d" %
(e, self.method, self.recursion_count))
token = AccessToken.objects.get_token_class(self.provider).delimeter.join(
[self.api.auth.access_token, self.api.auth.access_token_secret])
self.used_access_tokens += [token]
return self.repeat_call(*args, **kwargs)
# def handle_error_code_63(self, e, *args, **kwargs):
# # User has been suspended.
# self.refresh_tokens()
# return self.repeat_call(*args, **kwargs)
def api_call(*args, **kwargs):
api = TwitterApi()
return api.call(*args, **kwargs)
|
{
"content_hash": "aeaecaa4398b309844eadf2e2354af57",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 126,
"avg_line_length": 41.036144578313255,
"alnum_prop": 0.6212566059894304,
"repo_name": "manoranjanp/django-twitter-api",
"id": "b514e7368fbb39b5fea2229938e2b494308b4881",
"size": "3406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter_api/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93666"
}
],
"symlink_target": ""
}
|
from PySide import QtGui
import sys
class DialogGUIBox(QtGui.QWidget):
def __init__(self, build_plot_display):
super(DialogGUIBox, self).__init__()
# Callback for bgsub plotting function
self.build_plot_display = build_plot_display
QtGui.QToolTip.setFont(QtGui.QFont('SansSerif', 10))
self.setToolTip('This is a <b>QWidget</b> widget')
# Folder Browser
lbBroswer = QtGui.QLabel('Directory:', self)
lbBroswer.move(15, 40)
# Initialize radio buttons for selecting between line or area scan
self.check_line = QtGui.QRadioButton("Line Scan", self)
self.check_area = QtGui.QRadioButton("Area Scan", self)
self.check_line.move(15, 90)
self.check_area.move(110, 90)
self.etBrowser = QtGui.QLineEdit('', self)
self.etBrowser.resize(210,20)
self.etBrowser.move(90, 37)
self.etBrowser.setEnabled(0)
self.etBrowser.isReadOnly = 0
self.selected_directory = None
# initialize browse, open, close buttons
btnBrowse = QtGui.QPushButton('Browse...', self)
btnOpen = QtGui.QPushButton('Open', self)
btnClose = QtGui.QPushButton('Close', self)
btnBrowse.setToolTip('Select directory for project location.')
btnBrowse.resize(70,20)
btnBrowse.move(305, 37)
btnOpen.resize(70, 20)
btnOpen.move(305, 90)
btnClose.resize(70, 20)
btnClose.move(235, 90)
# Connects the action of clicking the buttons to calling methods
btnBrowse.clicked.connect(self.selectDirectory)
btnOpen.clicked.connect(self.run_bgsub)
btnClose.clicked.connect(self.close)
self.setWindowTitle('Folder Utility')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QtGui.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def selectDirectory(self):
selected_directory = QtGui.QFileDialog.getExistingDirectory(self)
self.etBrowser.setText(selected_directory)
self.selected_directory = str(selected_directory)
def run_bgsub(self):
# default set to area scan
linescan = False
# if linescan is checked, change linescan bool to true
if self.check_line.isChecked():
linescan = True
self.close()
self.build_plot_display(self.selected_directory, linescan)
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
ex = DialogGUIBox(None)
sys.exit(app.exec_())
|
{
"content_hash": "7c1f8d94791bfe69d8a2550797cb3ac9",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 74,
"avg_line_length": 33.675324675324674,
"alnum_prop": 0.6378711916698805,
"repo_name": "danushkana/pyspectrum",
"id": "d0c84c45d52ed30ec971242b0de25105fbac6d71",
"size": "2593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directory_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31710"
}
],
"symlink_target": ""
}
|
import os
import numpy.testing as npt
import numpy as np
import pytest
from scipy import stats
from statsmodels.distributions.mixture_rvs import mixture_rvs
from statsmodels.nonparametric.kde import KDEUnivariate as KDE
import statsmodels.sandbox.nonparametric.kernels as kernels
import statsmodels.nonparametric.bandwidths as bandwidths
# get results from Stata
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir, 'results', 'results_kde.csv')
# print rfname
KDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
rfname = os.path.join(curdir, 'results', 'results_kde_univ_weights.csv')
KDEWResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# get results from R
curdir = os.path.dirname(os.path.abspath(__file__))
rfname = os.path.join(curdir, 'results', 'results_kcde.csv')
# print rfname
KCDEResults = np.genfromtxt(open(rfname, 'rb'), delimiter=",", names=True)
# setup test data
np.random.seed(12345)
Xi = mixture_rvs([.25, .75], size=200, dist=[stats.norm, stats.norm],
kwargs=(dict(loc=-1, scale=.5), dict(loc=1, scale=.5)))
class TestKDEExceptions(object):
@classmethod
def setup_class(cls):
cls.kde = KDE(Xi)
cls.weights_200 = np.linspace(1, 100, 200)
cls.weights_100 = np.linspace(1, 100, 100)
def test_check_is_fit_exception(self):
with pytest.raises(ValueError):
self.kde.evaluate(0)
def test_non_weighted_fft_exception(self):
with pytest.raises(NotImplementedError):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_200,
fft=True, bw="silverman")
def test_wrong_weight_length_exception(self):
with pytest.raises(ValueError):
self.kde.fit(kernel="gau", gridsize=50, weights=self.weights_100,
fft=False, bw="silverman")
def test_non_gaussian_fft_exception(self):
with pytest.raises(NotImplementedError):
self.kde.fit(kernel="epa", gridsize=50, fft=True, bw="silverman")
class CheckKDE(object):
decimal_density = 7
def test_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
# disable test
# fails for Epan, Triangular and Biweight, only Gaussian is correct
# added it as test method to TestKDEGauss below
# inDomain is not vectorized
# kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [np.squeeze(self.res1.evaluate(xi)) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) # kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGauss(CheckKDE):
@classmethod
def setup_class(cls):
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["gau_d"]
def test_evaluate(self):
# kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) # kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
# The following tests are regression tests
# Values have been checked to be very close to R 'ks' package (Dec 2013)
def test_support_gridded(self):
kde = self.res1
support = KCDEResults['gau_support']
npt.assert_allclose(support, kde.support)
def test_cdf_gridded(self):
kde = self.res1
cdf = KCDEResults['gau_cdf']
npt.assert_allclose(cdf, kde.cdf)
def test_sf_gridded(self):
kde = self.res1
sf = KCDEResults['gau_sf']
npt.assert_allclose(sf, kde.sf)
def test_icdf_gridded(self):
kde = self.res1
icdf = KCDEResults['gau_icdf']
npt.assert_allclose(icdf, kde.icdf)
class TestKDEEpanechnikov(CheckKDE):
@classmethod
def setup_class(cls):
res1 = KDE(Xi)
res1.fit(kernel="epa", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["epa2_d"]
class TestKDETriangular(CheckKDE):
@classmethod
def setup_class(cls):
res1 = KDE(Xi)
res1.fit(kernel="tri", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["tri_d"]
class TestKDEBiweight(CheckKDE):
@classmethod
def setup_class(cls):
res1 = KDE(Xi)
res1.fit(kernel="biw", fft=False, bw="silverman")
cls.res1 = res1
cls.res_density = KDEResults["biw_d"]
# FIXME: enable/xfail/skip or delete
# NOTE: This is a knownfailure due to a definitional difference of Cosine kernel
# class TestKDECosine(CheckKDE):
# @classmethod
# def setup_class(cls):
# res1 = KDE(Xi)
# res1.fit(kernel="cos", fft=False, bw="silverman")
# cls.res1 = res1
# cls.res_density = KDEResults["cos_d"]
# weighted estimates taken from matlab so we can allow len(weights) != gridsize
class TestKdeWeights(CheckKDE):
@classmethod
def setup_class(cls):
res1 = KDE(Xi)
weights = np.linspace(1, 100, 200)
res1.fit(kernel="gau", gridsize=50, weights=weights, fft=False,
bw="silverman")
cls.res1 = res1
fname = os.path.join(curdir, 'results', 'results_kde_weights.csv')
cls.res_density = np.genfromtxt(open(fname, 'rb'), skip_header=1)
def test_evaluate(self):
# kde_vals = self.res1.evaluate(self.res1.support)
kde_vals = [self.res1.evaluate(xi) for xi in self.res1.support]
kde_vals = np.squeeze(kde_vals) # kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
class TestKDEGaussFFT(CheckKDE):
@classmethod
def setup_class(cls):
cls.decimal_density = 2 # low accuracy because binning is different
res1 = KDE(Xi)
res1.fit(kernel="gau", fft=True, bw="silverman")
cls.res1 = res1
rfname2 = os.path.join(curdir, 'results', 'results_kde_fft.csv')
cls.res_density = np.genfromtxt(open(rfname2, 'rb'))
class CheckKDEWeights(object):
@classmethod
def setup_class(cls):
cls.x = x = KDEWResults['x']
weights = KDEWResults['weights']
res1 = KDE(x)
# default kernel was scott when reference values computed
res1.fit(kernel=cls.kernel_name, weights=weights, fft=False, bw="scott")
cls.res1 = res1
cls.res_density = KDEWResults[cls.res_kernel_name]
decimal_density = 7
@pytest.mark.xfail(reason="Not almost equal to 7 decimals",
raises=AssertionError, strict=True)
def test_density(self):
npt.assert_almost_equal(self.res1.density, self.res_density,
self.decimal_density)
def test_evaluate(self):
if self.kernel_name == 'cos':
pytest.skip("Cosine kernel fails against Stata")
kde_vals = [self.res1.evaluate(xi) for xi in self.x]
kde_vals = np.squeeze(kde_vals) # kde_vals is a "column_list"
npt.assert_almost_equal(kde_vals, self.res_density,
self.decimal_density)
def test_compare(self):
xx = self.res1.support
kde_vals = [np.squeeze(self.res1.evaluate(xi)) for xi in xx]
kde_vals = np.squeeze(kde_vals) # kde_vals is a "column_list"
mask_valid = np.isfinite(kde_vals)
# TODO: nans at the boundaries
kde_vals[~mask_valid] = 0
npt.assert_almost_equal(self.res1.density, kde_vals,
self.decimal_density)
# regression test, not compared to another package
nobs = len(self.res1.endog)
kern = self.res1.kernel
v = kern.density_var(kde_vals, nobs)
v_direct = kde_vals * kern.L2Norm / kern.h / nobs
npt.assert_allclose(v, v_direct, rtol=1e-10)
ci = kern.density_confint(kde_vals, nobs)
crit = 1.9599639845400545 # stats.norm.isf(0.05 / 2)
hw = kde_vals - ci[:, 0]
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
hw = ci[:, 1] - kde_vals
npt.assert_allclose(hw, crit * np.sqrt(v), rtol=1e-10)
def test_kernel_constants(self):
kern = self.res1.kernel
nc = kern.norm_const
# trigger numerical integration
kern._norm_const = None
nc2 = kern.norm_const
npt.assert_allclose(nc, nc2, rtol=1e-10)
l2n = kern.L2Norm
# trigger numerical integration
kern._L2Norm = None
l2n2 = kern.L2Norm
npt.assert_allclose(l2n, l2n2, rtol=1e-10)
v = kern.kernel_var
# trigger numerical integration
kern._kernel_var = None
v2 = kern.kernel_var
npt.assert_allclose(v, v2, rtol=1e-10)
class TestKDEWGauss(CheckKDEWeights):
kernel_name = "gau"
res_kernel_name = "x_gau_wd"
class TestKDEWEpa(CheckKDEWeights):
kernel_name = "epa"
res_kernel_name = "x_epan2_wd"
class TestKDEWTri(CheckKDEWeights):
kernel_name = "tri"
res_kernel_name = "x_" + kernel_name + "_wd"
class TestKDEWBiw(CheckKDEWeights):
kernel_name = "biw"
res_kernel_name = "x_bi_wd"
class TestKDEWCos(CheckKDEWeights):
kernel_name = "cos"
res_kernel_name = "x_cos_wd"
class TestKDEWCos2(CheckKDEWeights):
kernel_name = "cos2"
res_kernel_name = "x_cos_wd"
class _TestKDEWRect(CheckKDEWeights):
# TODO in docstring but not in kernel_switch
kernel_name = "rect"
res_kernel_name = "x_rec_wd"
class _TestKDEWPar(CheckKDEWeights):
# TODO in docstring but not implemented in kernels
kernel_name = "par"
res_kernel_name = "x_par_wd"
class TestKdeRefit:
np.random.seed(12345)
data1 = np.random.randn(100) * 100
pdf = KDE(data1)
pdf.fit()
data2 = np.random.randn(100) * 100
pdf2 = KDE(data2)
pdf2.fit()
for attr in ['icdf', 'cdf', 'sf']:
npt.assert_(not np.allclose(getattr(pdf, attr)[:10],
getattr(pdf2, attr)[:10]))
class TestNormConstant:
def test_norm_constant_calculation(self):
custom_gauss = kernels.CustomKernel(lambda x: np.exp(-x ** 2 / 2.0))
gauss_true_const = 0.3989422804014327
npt.assert_almost_equal(gauss_true_const, custom_gauss.norm_const)
def test_kde_bw_positive():
# GH 6679
x = np.array([4.59511985, 4.59511985, 4.59511985, 4.59511985, 4.59511985,
4.59511985, 4.59511985, 4.59511985, 4.59511985, 4.59511985,
5.67332327, 6.19847872, 7.43189192])
kde = KDE(x)
kde.fit()
assert kde.bw > 0
def test_fit_self(reset_randomstate):
x = np.random.standard_normal(100)
kde = KDE(x)
assert isinstance(kde, KDE)
assert isinstance(kde.fit(), KDE)
class TestKDECustomBandwidth(object):
decimal_density = 7
@classmethod
def setup_class(cls):
cls.kde = KDE(Xi)
cls.weights_200 = np.linspace(1, 100, 200)
cls.weights_100 = np.linspace(1, 100, 100)
def test_check_is_fit_ok_with_custom_bandwidth(self):
def custom_bw(X, kern):
return np.std(X) * len(X)
kde = self.kde.fit(bw=custom_bw)
assert isinstance(kde, KDE)
def test_check_is_fit_ok_with_standard_custom_bandwidth(self):
# Note, we are passing the function, not the string - this is intended
kde = self.kde.fit(bw=bandwidths.bw_silverman)
s1 = kde.support.copy()
d1 = kde.density.copy()
kde = self.kde.fit(bw='silverman')
npt.assert_almost_equal(s1, kde.support, self.decimal_density)
npt.assert_almost_equal(d1, kde.density, self.decimal_density)
@pytest.mark.parametrize("fft", [True, False])
def test_check_is_fit_ok_with_float_bandwidth(self, fft):
# Note, we are passing the function, not the string - this is intended
kde = self.kde.fit(bw=bandwidths.bw_silverman, fft=fft)
s1 = kde.support.copy()
d1 = kde.density.copy()
kde = self.kde.fit(bw=kde.bw, fft=fft)
npt.assert_almost_equal(s1, kde.support, self.decimal_density)
npt.assert_almost_equal(d1, kde.density, self.decimal_density)
|
{
"content_hash": "04c49f89ca379b0effe7c4fde690a6e0",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 83,
"avg_line_length": 32.88974358974359,
"alnum_prop": 0.6214235596788025,
"repo_name": "jseabold/statsmodels",
"id": "fc1e31676640c4df1e27e3bc2f5f03f2a04fefd4",
"size": "12827",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statsmodels/nonparametric/tests/test_kde.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "Batchfile",
"bytes": "351"
},
{
"name": "C",
"bytes": "12088"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "Matlab",
"bytes": "1383"
},
{
"name": "Python",
"bytes": "8609450"
},
{
"name": "R",
"bytes": "34228"
},
{
"name": "Stata",
"bytes": "41179"
}
],
"symlink_target": ""
}
|
class Target:
def __init__(self,tName,tEffectiveness,tRat):
self.theName = tName
self.theEffectiveness = tEffectiveness
self.theRationale = tRat
def name(self): return self.theName
def effectiveness(self): return self.theEffectiveness
def rationale(self): return self.theRationale
def __getitem__(self,idx):
if (idx == 0):
return self.theName
elif (idx == 1):
return self.theEffectiveness
else:
return self.theRationale
|
{
"content_hash": "d09668de5110a3cdcfa370b2fb751a8b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 28.058823529411764,
"alnum_prop": 0.6855345911949685,
"repo_name": "RobinQuetin/CAIRIS-web",
"id": "463e6c81e5dab953778dde528cf1949b6ecdef4a",
"size": "1276",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cairis/cairis/Target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "11265"
},
{
"name": "Mako",
"bytes": "13226"
},
{
"name": "Python",
"bytes": "3313365"
},
{
"name": "Shell",
"bytes": "19461"
},
{
"name": "XSLT",
"bytes": "35522"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import transaction
from django.http import Http404, HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.views import static
from django.contrib.auth.decorators import login_required
from .models import Page, File
from .forms import PageForm, FileUploadForm
def can_edit(page, user):
if page and page.is_community:
return True
else:
return user.has_perm("cms.change_page")
def can_upload(user):
if user.is_staff or user.is_superuser:
return True
return False
def page(request, path):
try:
page = Page.published.get(path=path)
except Page.DoesNotExist:
page = None
editable = can_edit(page, request.user)
if page is None:
if editable:
return redirect("cms_page_edit", path=path)
else:
raise Http404
return render(request, "cms/page_detail.html", {
"page": page,
"editable": editable,
})
@login_required
def page_edit(request, path):
try:
page = Page.published.get(path=path)
except Page.DoesNotExist:
page = None
if not can_edit(page, request.user):
raise Http404
if request.method == "POST":
form = PageForm(request.POST, instance=page)
if form.is_valid():
page = form.save(commit=False)
page.path = path
page.save()
return redirect(page)
else:
print form.errors
else:
form = PageForm(instance=page, initial={"path": path})
return render(request, "cms/page_edit.html", {
"path": path,
"form": form
})
def file_index(request):
if not can_upload(request.user):
raise Http404
ctx = {
"files": File.objects.all(),
}
return render(request, "cms/file_index.html", ctx)
def file_create(request):
if not can_upload(request.user):
raise Http404
if request.method == "POST":
form = FileUploadForm(request.POST, request.FILES)
if form.is_valid():
with transaction.commit_on_success():
kwargs = {
"file": form.cleaned_data["file"],
}
File.objects.create(**kwargs)
return redirect("file_index")
else:
form = FileUploadForm()
ctx = {
"form": form,
}
return render(request, "cms/file_create.html", ctx)
def file_download(request, pk, *args):
file = get_object_or_404(File, pk=pk)
if getattr(settings, "USE_X_ACCEL_REDIRECT", False):
response = HttpResponse()
response["X-Accel-Redirect"] = file.file.url
# delete content-type to allow Gondor to determine the filetype and
# we definitely don't want Django's default :-)
del response["content-type"]
else:
response = static.serve(request, file.file.name, document_root=settings.MEDIA_ROOT)
return response
def file_delete(request, pk):
if not can_upload(request.user):
raise Http404
file = get_object_or_404(File, pk=pk)
if request.method == "POST":
file.delete()
# @@@ message
return redirect("file_index")
|
{
"content_hash": "494e3f0d8439e36c15b2702efc661df4",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 91,
"avg_line_length": 24.96923076923077,
"alnum_prop": 0.6050523721503389,
"repo_name": "euroscipy/symposion",
"id": "2f58381a077c219122d207515cd6c4d8e113bc5a",
"size": "3246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "symposion/cms/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13131"
},
{
"name": "JavaScript",
"bytes": "79954"
},
{
"name": "Python",
"bytes": "159974"
}
],
"symlink_target": ""
}
|
import sys
PY2 = sys.version_info[0] == 2
if PY2:
from urllib import quote, unquote, urlencode
from urllib2 import URLError, urlopen
from urlparse import ParseResult, urlparse, parse_qsl
bytes_ = str
str_ = unicode
cmp = cmp
else:
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import ParseResult, urlparse, parse_qsl, quote, unquote, urlencode
bytes_ = bytes
str_ = str
def cmp(a, b):
return (a > b) - (a < b)
try:
from collections.abc import Hashable
except ImportError:
# deprecated since python 3.3
from collections import Hashable
|
{
"content_hash": "2cde2e9af632da01db7e52e1dfe2c906",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 88,
"avg_line_length": 22.448275862068964,
"alnum_prop": 0.6774193548387096,
"repo_name": "elliterate/capybara.py",
"id": "a70948cbe4a29a05f4a573a0843135b81b50468b",
"size": "651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capybara/compat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38254"
},
{
"name": "JavaScript",
"bytes": "5225"
},
{
"name": "Python",
"bytes": "573480"
}
],
"symlink_target": ""
}
|
import logging
from typing import List, Optional
from sqlalchemy.exc import SQLAlchemyError
from superset.dao.base import BaseDAO
from superset.dao.exceptions import DAODeleteFailedError
from superset.extensions import db
from superset.models.sql_lab import SavedQuery
from superset.queries.saved_queries.filters import SavedQueryFilter
logger = logging.getLogger(__name__)
class SavedQueryDAO(BaseDAO):
model_cls = SavedQuery
base_filter = SavedQueryFilter
@staticmethod
def bulk_delete(models: Optional[List[SavedQuery]], commit: bool = True) -> None:
item_ids = [model.id for model in models] if models else []
try:
db.session.query(SavedQuery).filter(SavedQuery.id.in_(item_ids)).delete(
synchronize_session="fetch"
)
if commit:
db.session.commit()
except SQLAlchemyError:
if commit:
db.session.rollback()
raise DAODeleteFailedError()
|
{
"content_hash": "259d2a12d84241f6fc78d746687ef652",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 32.03225806451613,
"alnum_prop": 0.6817724068479355,
"repo_name": "airbnb/superset",
"id": "cd20fe60de583fb2aef461fa44e84b1464c2da6f",
"size": "1778",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "superset/queries/saved_queries/dao.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62654"
},
{
"name": "HTML",
"bytes": "99610"
},
{
"name": "JavaScript",
"bytes": "585557"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "715013"
},
{
"name": "Shell",
"bytes": "1033"
}
],
"symlink_target": ""
}
|
import sys
import os.path
import unittest
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), "..")))
from gnomeread import AboutGnomereadDialog
class TestExample(unittest.TestCase):
def setUp(self):
self.AboutGnomereadDialog_members = [
'AboutDialog', 'AboutGnomereadDialog', 'gettext', 'logger', 'logging']
def test_AboutGnomereadDialog_members(self):
all_members = dir(AboutGnomereadDialog)
public_members = [x for x in all_members if not x.startswith('_')]
public_members.sort()
self.assertEqual(self.AboutGnomereadDialog_members, public_members)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "2ca7678d483c91b3985707089fd1c1b3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 83,
"avg_line_length": 34.3,
"alnum_prop": 0.685131195335277,
"repo_name": "vrutkovs/LightRead",
"id": "7cf97fbb4521291ecd64d7e382e89ee9470c9a32",
"size": "2528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "339817"
},
{
"name": "Python",
"bytes": "51097"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from returntoclinic.views import ReturnToClinicView
urlpatterns = [
url(r'^$', ReturnToClinicView.as_view()),
url(r'^([0-9]+)/$', ReturnToClinicView.as_view()),
]
|
{
"content_hash": "ea008dffaf10abc5369a87ee8454330f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 54,
"avg_line_length": 29.285714285714285,
"alnum_prop": 0.697560975609756,
"repo_name": "slogan621/tscharts",
"id": "89597c2c28143a26e84573ecb7b5a07e03263f18",
"size": "816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "returntoclinic/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "763"
},
{
"name": "Python",
"bytes": "1690774"
},
{
"name": "Shell",
"bytes": "2706"
}
],
"symlink_target": ""
}
|
from pathlib import Path
from typing import Union
from ..base import ParametrizedValue
class Logger(ParametrizedValue):
args_joiner = ','
def __init__(self, alias, *args):
self.alias = alias or ''
super().__init__(*args)
class LoggerFile(Logger):
"""Allows logging into files."""
name = 'file'
plugin = 'logfile'
def __init__(self, filepath: Union[str, Path], *, alias=None):
"""
:param str filepath: File path.
:param str alias: Logger alias.
"""
super().__init__(alias, str(filepath))
class LoggerFileDescriptor(Logger):
"""Allows logging using file descriptor."""
name = 'fd'
plugin = 'logfile'
def __init__(self, fd: int, *, alias=None):
"""
:param str fd: File descriptor.
:param str alias: Logger alias.
"""
super().__init__(alias, fd)
class LoggerStdIO(Logger):
"""Allows logging stdio."""
name = 'stdio'
plugin = 'logfile'
def __init__(self, *, alias=None):
"""
:param str alias: Logger alias.
"""
super().__init__(alias)
class LoggerSocket(Logger):
"""Allows logging into UNIX and UDP sockets."""
name = 'socket'
plugin = 'logsocket'
def __init__(self, addr_or_path: Union[str, Path], *, alias=None):
"""
:param str addr_or_path: Remote address or filepath.
Examples:
* /tmp/uwsgi.logsock
* 192.168.173.19:5050
:param str alias: Logger alias.
"""
super().__init__(alias, str(addr_or_path))
class LoggerSyslog(Logger):
"""Allows logging into Unix standard syslog."""
name = 'syslog'
plugin = 'syslog'
def __init__(self, *, app_name=None, facility=None, alias=None):
"""
:param str app_name:
:param str facility:
* https://en.wikipedia.org/wiki/Syslog#Facility
:param str alias: Logger alias.
"""
super().__init__(alias, app_name, facility)
class LoggerRsyslog(LoggerSyslog):
"""Allows logging into Unix standard syslog or a remote syslog."""
name = 'rsyslog'
plugin = 'rsyslog'
def __init__(self, *, app_name=None, host=None, facility=None, split=None, packet_size=None, alias=None):
"""
:param str app_name:
:param str host: Address (host and port) or UNIX socket path.
:param str facility:
* https://en.wikipedia.org/wiki/Syslog#Facility
:param bool split: Split big messages into multiple chunks if they are bigger
than allowed packet size. Default: ``False``.
:param int packet_size: Set maximum packet size for syslog messages. Default: 1024.
.. warning:: using packets > 1024 breaks RFC 3164 (#4.1)
:param str alias: Logger alias.
"""
super().__init__(app_name=app_name, facility=facility, alias=alias)
self.args.insert(0, host)
self._set('rsyslog-packet-size', packet_size)
self._set('rsyslog-split-messages', split, cast=bool)
class LoggerRedis(Logger):
"""Allows logging into Redis.
.. note:: Consider using ``dedicate_thread`` param.
"""
name = 'redislog'
plugin = 'redislog'
def __init__(self, *, host=None, command=None, prefix=None, alias=None):
"""
:param str host: Default: 127.0.0.1:6379
:param str command: Command to be used. Default: publish uwsgi
Examples:
* publish foobar
* rpush foo
:param str prefix: Default: <empty>
:param str alias: Logger alias.
"""
super().__init__(alias, host, command, prefix)
class LoggerMongo(Logger):
"""Allows logging into Mongo DB.
.. note:: Consider using ``dedicate_thread`` param.
"""
name = 'mongodblog'
plugin = 'mongodblog'
def __init__(self, *, host=None, collection=None, node=None, alias=None):
"""
:param str host: Default: 127.0.0.1:27017
:param str collection: Command to be used. Default: uwsgi.logs
:param str node: An identification string for the instance
sending logs Default: <server hostname>
:param str alias: Logger alias.
"""
super().__init__(alias, host, collection, node)
class LoggerZeroMq(Logger):
"""Allows logging into ZeroMQ sockets."""
name = 'zeromq'
plugin = 'logzmq'
def __init__(self, connection_str, *, alias=None):
"""
:param str connection_str:
Examples:
* tcp://192.168.173.18:9191
:param str alias: Logger alias.
"""
super().__init__(alias, connection_str)
|
{
"content_hash": "66aed3f2ef8e7ec0d771d4f8fb4d3bc1",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 109,
"avg_line_length": 22.410377358490567,
"alnum_prop": 0.5704062302673122,
"repo_name": "idlesign/uwsgiconf",
"id": "207d330b029ec5116621f5606cef9a80872b5bbc",
"size": "4751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uwsgiconf/options/logging_loggers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "5422"
},
{
"name": "Python",
"bytes": "522381"
}
],
"symlink_target": ""
}
|
import logging
try:
import Queue as queue
except ImportError:
import queue
from .templates import AttributeDict
logger = logging.getLogger('itchat')
class Queue(queue.Queue):
def put(self, message):
queue.Queue.put(self, Message(message))
class Message(AttributeDict):
def download(self, fileName):
if hasattr(self.text, '__call__'):
return self.text(fileName)
else:
return b''
def __getitem__(self, value):
if value in ('isAdmin', 'isAt'):
v = value[0].upper() + value[1:] # ''[1:] == ''
logger.debug('%s is expired in 1.3.0, use %s instead.' % (value, v))
value = v
return super(Message, self).__getitem__(value)
def __str__(self):
return '{%s}' % ', '.join(
['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__.split('.')[-1],
self.__str__())
|
{
"content_hash": "49db1dbec8f76788e1d9d828fa49e350",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 80,
"avg_line_length": 31.8125,
"alnum_prop": 0.5176817288801572,
"repo_name": "tongxindao/shiyanlou",
"id": "db814b906f3b0f9940ca13c0213f3b10b5e3ca8a",
"size": "1018",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "shiyanlou_cs885/demo/wechat_robot/itchat/storage/messagequeue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "265212"
},
{
"name": "C++",
"bytes": "686"
},
{
"name": "CSS",
"bytes": "261341"
},
{
"name": "HTML",
"bytes": "945024"
},
{
"name": "Java",
"bytes": "115"
},
{
"name": "JavaScript",
"bytes": "475129"
},
{
"name": "Makefile",
"bytes": "750"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "529824"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
}
|
class Solution:
# @param root, a tree node
# @return a list of integers
def inorderTraversal(self, root):
ret = []
if root:
self.traversal(root, ret)
return ret
def traversal(self, root, ret):
if root is None:
return
self.traversal(root.left, ret)
ret.append(root.val)
self.traversal(root.right, ret)
|
{
"content_hash": "de2de8d946978674d149972c26448f16",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 26.533333333333335,
"alnum_prop": 0.5603015075376885,
"repo_name": "Fity/2code",
"id": "5025a6a5afd1f8353ce4a9e56a1ff1fb415af299",
"size": "585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leetcode/BinaryTreeInorderTraversal/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6950"
},
{
"name": "CSS",
"bytes": "919"
},
{
"name": "HTML",
"bytes": "1769"
},
{
"name": "Java",
"bytes": "20811"
},
{
"name": "JavaScript",
"bytes": "384"
},
{
"name": "Makefile",
"bytes": "237"
},
{
"name": "Python",
"bytes": "56988"
},
{
"name": "Shell",
"bytes": "5893"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/food/foraged/shared_edible_jar_berries.iff"
result.attribute_template_id = 5
result.stfName("food_name","edible_berries")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "6d57febdae2509a8b47c4df6f4f5c18f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 24.076923076923077,
"alnum_prop": 0.7028753993610224,
"repo_name": "obi-two/Rebelion",
"id": "ca3b2b1954c6bf6bfe90303c9e3692242eb05054",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/food/foraged/shared_edible_jar_berries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from haystack import indexes
from test_haystack.discovery.models import Foo, Bar
class FooIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, model_attr='body')
def get_model(self):
return Foo
class BarIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
def get_model(self):
return Bar
|
{
"content_hash": "fdef1998039c4fecc88c5d23e7e2e6d7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 24.625,
"alnum_prop": 0.7284263959390863,
"repo_name": "kybi/django-haystack",
"id": "498884b20a81ecfc5e48eecc42709d75848f49ea",
"size": "394",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_haystack/discovery/search_indexes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1325"
},
{
"name": "Makefile",
"bytes": "2492"
},
{
"name": "Python",
"bytes": "718755"
},
{
"name": "Shell",
"bytes": "1305"
}
],
"symlink_target": ""
}
|
"""
A library for working with BackendInfoExternal records, describing backends
configured for an application. Supports loading the records from backend.yaml.
"""
# WARNING: This file is externally viewable by our users. All comments from
# this file will be stripped. The docstrings will NOT. Do not put sensitive
# information in docstrings. If you must communicate internal information in
# this source file, please place them in comments only.
import os
import yaml
from yaml import representer
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
else:
from yaml_conversion.lib.google.appengine.api import validation
from yaml_conversion.lib.google.appengine.api import yaml_builder
from yaml_conversion.lib.google.appengine.api import yaml_listener
from yaml_conversion.lib.google.appengine.api import yaml_object
NAME_REGEX = r'(?!-)[a-z\d\-]{1,100}'
FILE_REGEX = r'(?!\^).*(?!\$).{1,256}'
CLASS_REGEX = r'^[bB](1|2|4|8|4_1G)$'
OPTIONS_REGEX = r'^[a-z, ]*$'
STATE_REGEX = r'^(START|STOP|DISABLED)$'
# A list of BackendEntries.
BACKENDS = 'backends'
# Properties of a BackendEntry.
NAME = 'name'
CLASS = 'class'
INSTANCES = 'instances'
OPTIONS = 'options'
PUBLIC = 'public'
DYNAMIC = 'dynamic'
FAILFAST = 'failfast'
MAX_CONCURRENT_REQUESTS = 'max_concurrent_requests'
START = 'start'
VALID_OPTIONS = frozenset([PUBLIC, DYNAMIC, FAILFAST])
# Read-only.
STATE = 'state'
class BadConfig(Exception):
"""An invalid configuration was provided."""
class ListWithoutSort(list):
def sort(self):
pass
class SortedDict(dict):
def __init__(self, keys, data):
super(SortedDict, self).__init__()
self.keys = keys
self.update(data)
def items(self):
result = ListWithoutSort()
for key in self.keys:
if type(self.get(key)) != type(None):
result.append((key, self.get(key)))
return result
representer.SafeRepresenter.add_representer(
SortedDict, representer.SafeRepresenter.represent_dict)
class BackendEntry(validation.Validated):
"""A backend entry describes a single backend."""
ATTRIBUTES = {
NAME: NAME_REGEX,
CLASS: validation.Optional(CLASS_REGEX),
INSTANCES: validation.Optional(validation.TYPE_INT),
MAX_CONCURRENT_REQUESTS: validation.Optional(validation.TYPE_INT),
# TODO(user): Consider using a YAML list fo this.
OPTIONS: validation.Optional(OPTIONS_REGEX),
PUBLIC: validation.Optional(validation.TYPE_BOOL),
DYNAMIC: validation.Optional(validation.TYPE_BOOL),
FAILFAST: validation.Optional(validation.TYPE_BOOL),
START: validation.Optional(FILE_REGEX),
# Read-only property describing whether the backend is started or stopped.
STATE: validation.Optional(STATE_REGEX),
}
def __init__(self, *args, **kwargs):
super(BackendEntry, self).__init__(*args, **kwargs)
self.Init()
def Init(self):
if self.public:
raise BadConfig("Illegal field: 'public'")
if self.dynamic:
raise BadConfig("Illegal field: 'dynamic'")
if self.failfast:
raise BadConfig("Illegal field: 'failfast'")
self.ParseOptions()
return self
def set_class(self, Class):
"""Setter for 'class', since an attribute reference is an error."""
self.Set(CLASS, Class)
def get_class(self):
"""Accessor for 'class', since an attribute reference is an error."""
return self.Get(CLASS)
def ToDict(self):
"""Returns a sorted dictionary representing the backend entry."""
self.ParseOptions().WriteOptions()
result = super(BackendEntry, self).ToDict()
return SortedDict([NAME,
CLASS,
INSTANCES,
START,
OPTIONS,
MAX_CONCURRENT_REQUESTS,
STATE],
result)
def ParseOptions(self):
"""Parses the 'options' field and sets appropriate fields."""
if self.options:
options = [option.strip() for option in self.options.split(',')]
else:
options = []
for option in options:
if option not in VALID_OPTIONS:
raise BadConfig('Unrecognized option: %s', option)
self.public = PUBLIC in options
self.dynamic = DYNAMIC in options
self.failfast = FAILFAST in options
return self
def WriteOptions(self):
"""Writes the 'options' field based on other settings."""
options = []
if self.public:
options.append('public')
if self.dynamic:
options.append('dynamic')
if self.failfast:
options.append('failfast')
if options:
self.options = ', '.join(options)
else:
self.options = None
return self
def LoadBackendEntry(backend_entry):
"""Parses a BackendEntry object from a string.
Args:
backend_entry: a backend entry, as a string
Returns:
A BackendEntry object.
"""
builder = yaml_object.ObjectBuilder(BackendEntry)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(backend_entry)
entries = handler.GetResults()
if len(entries) < 1:
raise BadConfig('Empty backend configuration.')
if len(entries) > 1:
raise BadConfig('Multiple backend entries were found in configuration.')
return entries[0].Init()
class BackendInfoExternal(validation.Validated):
"""BackendInfoExternal describes all backend entries for an application."""
ATTRIBUTES = {
BACKENDS: validation.Optional(validation.Repeated(BackendEntry)),
}
def LoadBackendInfo(backend_info, open_fn=None):
"""Parses a BackendInfoExternal object from a string.
Args:
backend_info: a backends stanza (list of backends) as a string
open_fn: Function for opening files. Unused.
Returns:
A BackendInfoExternal object.
"""
builder = yaml_object.ObjectBuilder(BackendInfoExternal)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(backend_info)
backend_info = handler.GetResults()
if len(backend_info) < 1:
return BackendInfoExternal(backends=[])
if len(backend_info) > 1:
raise BadConfig("Only one 'backends' clause is allowed.")
info = backend_info[0]
if not info.backends: # The 'backends' clause was empty.
return BackendInfoExternal(backends=[])
for backend in info.backends:
backend.Init()
return info
|
{
"content_hash": "d6a2da1148757206e144f20a5c802da5",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 80,
"avg_line_length": 29.91780821917808,
"alnum_prop": 0.6874236874236874,
"repo_name": "GoogleCloudPlatform/appengine-config-transformer",
"id": "1e8cfea20a0e73ee11c44fdde4968bd70cefe879",
"size": "7224",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yaml_conversion/lib/google/appengine/api/backendinfo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "215481"
}
],
"symlink_target": ""
}
|
"""
STK500v2 protocol implementation for programming AVR chips.
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import os, struct, sys, time
from serial import Serial
from serial import SerialException
from serial import SerialTimeoutException
import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.lastAddr = -1
self.progressCallback = None
def connect(self, port = '/dev/ttyMFD1', speed = 115200):
if self.serial is not None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, write_timeout=10000)
except SerialException as e:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
import mraa
ResetPin = mraa.Gpio(36)
ResetPin.dir(mraa.DIR_OUT)
ResetPin.write(0)
time.sleep(0.1)
ResetPin.write(1)
time.sleep(0.1)
self.serial.flushInput()
self.serial.flushOutput()
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
self.close()
raise ispBase.IspError("Failed to enter programming mode")
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
if self.sendMessage([0xEE])[1] == 0x00:
self._has_checksum = True
else:
self._has_checksum = False
self.serial.timeout = 5
def close(self):
if self.serial is not None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial is not None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial is not None
def hasChecksumFunction(self):
return self._has_checksum
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
pageSize = self.chip['pageSize'] * 2
flashSize = pageSize * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + pageSize - 1) / pageSize
# step = loadCount/20
for i in xrange(0, loadCount):
recv = self.sendMessage([0x13, pageSize >> 8, pageSize & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flashData[(i * pageSize):(i * pageSize + pageSize)])
if self.progressCallback is not None:
if self._has_checksum:
self.progressCallback(i + 1, loadCount)
else:
self.progressCallback(i + 1, loadCount*2)
# if i % step ==0:
# print '#'
def verifyFlash(self, flashData):
if self._has_checksum:
self.sendMessage([0x06, 0x00, (len(flashData) >> 17) & 0xFF, (len(flashData) >> 9) & 0xFF, (len(flashData) >> 1) & 0xFF])
res = self.sendMessage([0xEE])
checksum_recv = res[2] | (res[3] << 8)
checksum = 0
for d in flashData:
checksum += d
checksum &= 0xFFFF
if hex(checksum) != hex(checksum_recv):
raise ispBase.IspError('Verify checksum mismatch: 0x%x != 0x%x' % (checksum & 0xFFFF, checksum_recv))
else:
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flashSize = self.chip['pageSize'] * 2 * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + 0xFF) / 0x100
step = loadCount/20
for i in xrange(0, loadCount):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progressCallback is not None:
self.progressCallback(loadCount + i + 1, loadCount*2)
for j in xrange(0, 0x100):
if i * 0x100 + j < len(flashData) and flashData[i * 0x100 + j] != recv[j]:
raise ispBase.IspError('Verify error at: 0x%x' % (i * 0x100 + j))
if i % step ==0:
print '#'
def sendMessage(self, data):
# print "*",
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= ord(c)
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError('Serial send timeout')
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = 'Start'
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
#print(hex(b))
if state == 'Start':
if b == 0x1B:
state = 'GetSeq'
checksum = 0x1B
elif state == 'GetSeq':
state = 'MsgSize1'
elif state == 'MsgSize1':
msgSize = b << 8
state = 'MsgSize2'
elif state == 'MsgSize2':
msgSize |= b
state = 'Token'
elif state == 'Token':
if b != 0x0E:
state = 'Start'
else:
state = 'Data'
data = []
elif state == 'Data':
data.append(b)
if len(data) == msgSize:
state = 'Checksum'
elif state == 'Checksum':
if checksum != 0:
state = 'Start'
else:
return data
def runProgrammer(port, filename):
""" Run an STK500v2 program on serial port 'port' and write 'filename' into flash. """
programmer = Stk500v2()
programmer.connect(port = port)
programmer.programChip(intelHex.readHex(filename))
programmer.close()
def main():
""" Entry point to call the stk500v2 programmer from the commandline. """
import threading
port = '/dev/ttyMFD1'
threading.Thread(target=runProgrammer, args=(port,sys.argv[1])).start()
#threading.Thread(target=runProgrammer, args=(port,"./MarlinWitbox.hex")).start()
# programmer = Stk500v2()
# programmer.connect(port = sys.argv[1])
# programmer.programChip(intelHex.readHex(sys.argv[2]))
# sys.exit(1)
if __name__ == '__main__':
main()
|
{
"content_hash": "6cab2ff9d32711d0a7d063e3bdbb027c",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 162,
"avg_line_length": 31.326923076923077,
"alnum_prop": 0.6629834254143646,
"repo_name": "0x000000FF/yocto-edison-meta",
"id": "5f8969cbc83754e7c3bd8b6910dc4f66b7c361ee",
"size": "6538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "meta-intel-edison-distro/recipes-mostfun/mostfun-pro/files/avr_isp/stk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "21819"
},
{
"name": "BitBake",
"bytes": "103064"
},
{
"name": "C",
"bytes": "9879649"
},
{
"name": "C++",
"bytes": "25403"
},
{
"name": "CMake",
"bytes": "2014"
},
{
"name": "CSS",
"bytes": "7561"
},
{
"name": "Groff",
"bytes": "3358"
},
{
"name": "HTML",
"bytes": "44640"
},
{
"name": "JavaScript",
"bytes": "12566"
},
{
"name": "Limbo",
"bytes": "1306"
},
{
"name": "Makefile",
"bytes": "83529"
},
{
"name": "PHP",
"bytes": "18385"
},
{
"name": "Perl",
"bytes": "3164"
},
{
"name": "Python",
"bytes": "51079"
},
{
"name": "Shell",
"bytes": "78688"
},
{
"name": "SourcePawn",
"bytes": "10234"
}
],
"symlink_target": ""
}
|
BOT_NAME = 'datacollecter'
SPIDER_MODULES = ['datacollecter.spiders']
NEWSPIDER_MODULE = 'datacollecter.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'datacollecter (+http://www.yourdomain.com)'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS=32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY=3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN=16
#CONCURRENT_REQUESTS_PER_IP=16
# Disable cookies (enabled by default)
COOKIES_ENABLED=True
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED=False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'datacollecter.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'datacollecter.middlewares.MyCustomDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'datacollecter.pipelines.DatacollecterPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
# NOTE: AutoThrottle will honour the standard settings for concurrency and delay
#AUTOTHROTTLE_ENABLED=True
# The initial download delay
#AUTOTHROTTLE_START_DELAY=5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY=60
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG=False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED=True
#HTTPCACHE_EXPIRATION_SECS=0
#HTTPCACHE_DIR='httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES=[]
#HTTPCACHE_STORAGE='scrapy.extensions.httpcache.FilesystemCacheStorage'
# Mongo DB Settings
MONGO_URI = "mongodb://localhost:27017"
MONGO_DB = "text_illustrator"
|
{
"content_hash": "7f861085f27490ba2d7ff596dc4f2b1e",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 109,
"avg_line_length": 34.62820512820513,
"alnum_prop": 0.7804516845612736,
"repo_name": "Comp4710AprioriTextIllustrator/TextIllustrator",
"id": "1e6060f9c8bca618875ece72bd01cd656665694f",
"size": "3139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datacollecter/datacollecter/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48110"
},
{
"name": "TeX",
"bytes": "22978"
}
],
"symlink_target": ""
}
|
import pandas as pd
from bs4 import BeautifulSoup
import requests
# website path for zip codes in Nevada
path = 'https://www.incomebyzipcode.com/'
# zip_code list
# zip_codes = pd.read_csv('geo_data_location.csv')
zip_codes = pd.read_csv('zip_info.csv')
cols = [c for c in zip_codes.columns if c.lower()[:4] != 'unna']
zip_codes = zip_codes[cols]
# Just for the very first run, then comment out lines
# zip_codes["Average_Household_Income"] = None
# zip_codes['Per_capita_Income'] = None
# zip_codes['High_Income_Households'] = None
# for zip in zip_codes.ZIP:
for zip in zip_codes.zipcode:
val_check = zip_codes[zip_codes.zipcode == zip]['Average_Household_Income'].reset_index(drop=True)
if pd.isnull(val_check[0]):
state = zip_codes[zip_codes['state'] == zip]['state']
url = path+str(state)+'/'+str(zip)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
try:
div = soup.find_all('div', class_="data-block")[1]
avg_income = div.find_all('td', class_="hilite")[0]
div = soup.find_all('div', class_="data-block")[2]
per_capita = div.find_all('td', class_="hilite")[0]
div = soup.find_all('div', class_="data-block")[3]
high_income = div.find_all('td', class_="hilite")[0]
zip_codes.ix[zip_codes.zipcode == zip, 'Average_Household_Income'] = avg_income.text
zip_codes.ix[zip_codes.zipcode == zip, 'Per_capita_Income'] = per_capita.text
zip_codes.ix[zip_codes.zipcode == zip, 'High_Income_Households'] = high_income.text
zip_codes.to_csv("zip_info.csv")
print(zip)
except IndexError:
zip_codes.ix[zip_codes.zipcode == zip, 'Average_Household_Income'] = "No Income found for "+str(zip)
zip_codes.to_csv("zip_info.csv")
print("No Income found for "+str(zip))
# zip_codes.to_csv("zip_info.csv")
|
{
"content_hash": "86070358fe70687e1698da36cfd4317f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 112,
"avg_line_length": 33.186440677966104,
"alnum_prop": 0.6128702757916241,
"repo_name": "dgvisnadi/us_income_map_and_scrap",
"id": "4ba1c49606e5515c78bbed2ee772f86423821d19",
"size": "1978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "income_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "121989"
},
{
"name": "Jupyter Notebook",
"bytes": "177496"
},
{
"name": "Python",
"bytes": "1978"
}
],
"symlink_target": ""
}
|
import datetime
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
_testing as tm,
)
from pandas.tests.io.pytables.common import ensure_clean_store
pytestmark = pytest.mark.single_cpu
def test_store_datetime_fractional_secs(setup_path):
with ensure_clean_store(setup_path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store["a"] = series
assert store["a"].index[0] == dt
def test_tseries_indices_series(setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store["a"] = ser
result = store["a"]
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(setup_path):
with ensure_clean_store(setup_path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store["a"] = df
result = store["a"]
tm.assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index, obj="dataframe index")
|
{
"content_hash": "085440a8b90f89e285e6f93084f3c25e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 76,
"avg_line_length": 29.575757575757574,
"alnum_prop": 0.6219262295081968,
"repo_name": "datapythonista/pandas",
"id": "6625984961c116917aab7a15a91f062dc5b56832",
"size": "1952",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pandas/tests/io/pytables/test_time_series.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "355524"
},
{
"name": "CSS",
"bytes": "1662"
},
{
"name": "Cython",
"bytes": "1178139"
},
{
"name": "Dockerfile",
"bytes": "1933"
},
{
"name": "HTML",
"bytes": "456449"
},
{
"name": "Makefile",
"bytes": "505"
},
{
"name": "Python",
"bytes": "19048364"
},
{
"name": "Shell",
"bytes": "10511"
},
{
"name": "Smarty",
"bytes": "8486"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
}
|
"""Support for Broadlink switches."""
from __future__ import annotations
from abc import ABC, abstractmethod
import logging
from typing import Any
from broadlink.exceptions import BroadlinkException
import voluptuous as vol
from homeassistant.components.switch import (
PLATFORM_SCHEMA,
SwitchDeviceClass,
SwitchEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_SWITCHES,
CONF_TIMEOUT,
CONF_TYPE,
STATE_ON,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import BroadlinkDevice
from .const import DOMAIN
from .entity import BroadlinkEntity
from .helpers import data_packet, import_device, mac_address
_LOGGER = logging.getLogger(__name__)
CONF_SLOTS = "slots"
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_COMMAND_OFF): data_packet,
vol.Optional(CONF_COMMAND_ON): data_packet,
}
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_SLOTS),
cv.deprecated(CONF_TIMEOUT),
cv.deprecated(CONF_TYPE),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): mac_address,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SWITCHES, default=[]): vol.All(
cv.ensure_list,
[SWITCH_SCHEMA],
),
}
),
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Import the device and set up custom switches.
This is for backward compatibility.
Do not use this method.
"""
mac_addr = config[CONF_MAC]
host = config.get(CONF_HOST)
if switches := config.get(CONF_SWITCHES):
platform_data = hass.data[DOMAIN].platforms.get(Platform.SWITCH, {})
async_add_entities_config_entry: AddEntitiesCallback
device: BroadlinkDevice
async_add_entities_config_entry, device = platform_data.get(
mac_addr, (None, None)
)
if not async_add_entities_config_entry:
raise PlatformNotReady
async_add_entities_config_entry(
BroadlinkRMSwitch(device, config) for config in switches
)
else:
_LOGGER.warning(
"The switch platform is deprecated, except for custom IR/RF "
"switches. Please refer to the Broadlink documentation to "
"catch up"
)
if host:
import_device(hass, host)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Broadlink switch."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
switches: list[BroadlinkSwitch] = []
if device.api.type in {"RM4MINI", "RM4PRO", "RMMINI", "RMMINIB", "RMPRO"}:
platform_data = hass.data[DOMAIN].platforms.setdefault(Platform.SWITCH, {})
platform_data[device.api.mac] = async_add_entities, device
elif device.api.type == "SP1":
switches.append(BroadlinkSP1Switch(device))
elif device.api.type in {"SP2", "SP2S", "SP3", "SP3S", "SP4", "SP4B"}:
switches.append(BroadlinkSP2Switch(device))
elif device.api.type == "BG1":
switches.extend(BroadlinkBG1Slot(device, slot) for slot in range(1, 3))
elif device.api.type == "MP1":
switches.extend(BroadlinkMP1Slot(device, slot) for slot in range(1, 5))
async_add_entities(switches)
class BroadlinkSwitch(BroadlinkEntity, SwitchEntity, RestoreEntity, ABC):
"""Representation of a Broadlink switch."""
_attr_assumed_state = True
_attr_device_class = SwitchDeviceClass.SWITCH
def __init__(self, device, command_on, command_off):
"""Initialize the switch."""
super().__init__(device)
self._command_on = command_on
self._command_off = command_off
async def async_added_to_hass(self) -> None:
"""Call when the switch is added to hass."""
state = await self.async_get_last_state()
self._attr_is_on = state is not None and state.state == STATE_ON
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the switch."""
if await self._async_send_packet(self._command_on):
self._attr_is_on = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the switch."""
if await self._async_send_packet(self._command_off):
self._attr_is_on = False
self.async_write_ha_state()
@abstractmethod
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
class BroadlinkRMSwitch(BroadlinkSwitch):
"""Representation of a Broadlink RM switch."""
def __init__(self, device, config):
"""Initialize the switch."""
super().__init__(
device, config.get(CONF_COMMAND_ON), config.get(CONF_COMMAND_OFF)
)
self._attr_name = config[CONF_NAME]
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
device = self._device
if packet is None:
return True
try:
await device.async_request(device.api.send_data, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP1Switch(BroadlinkSwitch):
"""Representation of a Broadlink SP1 switch."""
_attr_has_entity_name = True
def __init__(self, device):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._attr_unique_id = self._device.unique_id
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
device = self._device
try:
await device.async_request(device.api.set_power, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP2Switch(BroadlinkSP1Switch):
"""Representation of a Broadlink SP2 switch."""
_attr_assumed_state = False
_attr_has_entity_name = True
def __init__(self, device, *args, **kwargs):
"""Initialize the switch."""
super().__init__(device, *args, **kwargs)
self._attr_is_on = self._coordinator.data["pwr"]
def _update_state(self, data):
"""Update the state of the entity."""
self._attr_is_on = data["pwr"]
class BroadlinkMP1Slot(BroadlinkSwitch):
"""Representation of a Broadlink MP1 slot."""
_attr_assumed_state = False
_attr_has_entity_name = True
def __init__(self, device, slot):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._slot = slot
self._attr_is_on = self._coordinator.data[f"s{slot}"]
self._attr_name = f"S{slot}"
self._attr_unique_id = f"{device.unique_id}-s{slot}"
def _update_state(self, data):
"""Update the state of the entity."""
self._attr_is_on = data[f"s{self._slot}"]
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
device = self._device
try:
await device.async_request(device.api.set_power, self._slot, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkBG1Slot(BroadlinkSwitch):
"""Representation of a Broadlink BG1 slot."""
_attr_assumed_state = False
_attr_has_entity_name = True
def __init__(self, device, slot):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._slot = slot
self._attr_is_on = self._coordinator.data[f"pwr{slot}"]
self._attr_name = f"S{slot}"
self._attr_device_class = SwitchDeviceClass.OUTLET
self._attr_unique_id = f"{device.unique_id}-s{slot}"
def _update_state(self, data):
"""Update the state of the entity."""
self._attr_is_on = data[f"pwr{self._slot}"]
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
device = self._device
state = {f"pwr{self._slot}": packet}
try:
await device.async_request(device.api.set_state, **state)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
|
{
"content_hash": "25b2d5baf0b6f0abbdfbfcfa6d4cb57f",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 83,
"avg_line_length": 31.040677966101693,
"alnum_prop": 0.6271704706781697,
"repo_name": "w1ll1am23/home-assistant",
"id": "009536a9adb1b5e2acc90ebe49efde750745f612",
"size": "9157",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/broadlink/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from django.db import models
class Blog(models.Model):
message = models.CharField(max_length=200)
update_time = models.DateField(auto_now=True)
|
{
"content_hash": "679ca7a2ba705d8f4e254dbae7190395",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 49,
"avg_line_length": 30.4,
"alnum_prop": 0.75,
"repo_name": "mattjmorrison/django-appengine-playground",
"id": "ce01cd13b9bc29734b945ea78040a874fa4c9033",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "86962"
},
{
"name": "Python",
"bytes": "9322"
}
],
"symlink_target": ""
}
|
"""empty message
Revision ID: 3b4fa74ecad
Revises: 2afb44ff17b
Create Date: 2015-06-10 03:07:17.438247
"""
# revision identifiers, used by Alembic.
revision = '3b4fa74ecad'
down_revision = '2afb44ff17b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('author',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_author_name'), 'author', ['name'], unique=False)
op.create_table('publisher',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_publisher_name'), 'publisher', ['name'], unique=False)
op.create_table('tag',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('value', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('podcast',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('language', sa.Enum('en', name='languages'), nullable=True),
sa.Column('subtitle', sa.String(length=80), nullable=True),
sa.Column('summary', sa.Text(), nullable=True),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('rights', sa.Text(), nullable=True),
sa.Column('link', sa.String(length=2048), nullable=True),
sa.Column('publisher_id', postgresql.UUID(), nullable=True),
sa.Column('author_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['publisher_id'], ['publisher.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('profile',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('username', sa.String(length=80), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('display_name', sa.String(length=80), nullable=True),
sa.Column('first_name', sa.String(length=80), nullable=True),
sa.Column('last_name', sa.String(length=80), nullable=True),
sa.Column('user_id', postgresql.UUID(), nullable=True),
sa.Column('author_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['author.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_profile_display_name'), 'profile', ['display_name'], unique=True)
op.create_index(op.f('ix_profile_email'), 'profile', ['email'], unique=True)
op.create_index(op.f('ix_profile_first_name'), 'profile', ['first_name'], unique=False)
op.create_index(op.f('ix_profile_last_name'), 'profile', ['last_name'], unique=False)
op.create_index(op.f('ix_profile_username'), 'profile', ['username'], unique=True)
op.create_table('image',
sa.Column('id', postgresql.UUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('title', sa.String(length=80), nullable=True),
sa.Column('href', sa.String(length=2048), nullable=True),
sa.Column('podcast_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['podcast_id'], ['podcast.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('tags',
sa.Column('tag_id', postgresql.UUID(), nullable=True),
sa.Column('podcast_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['podcast_id'], ['podcast.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tag.id'], )
)
op.drop_table('base')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('base',
sa.Column('id', postgresql.UUID(), autoincrement=False, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='base_pkey')
)
op.drop_table('tags')
op.drop_table('image')
op.drop_index(op.f('ix_profile_username'), table_name='profile')
op.drop_index(op.f('ix_profile_last_name'), table_name='profile')
op.drop_index(op.f('ix_profile_first_name'), table_name='profile')
op.drop_index(op.f('ix_profile_email'), table_name='profile')
op.drop_index(op.f('ix_profile_display_name'), table_name='profile')
op.drop_table('profile')
op.drop_table('podcast')
op.drop_table('user')
op.drop_table('tag')
op.drop_index(op.f('ix_publisher_name'), table_name='publisher')
op.drop_table('publisher')
op.drop_index(op.f('ix_author_name'), table_name='author')
op.drop_table('author')
### end Alembic commands ###
|
{
"content_hash": "edeb3290e7a6ae204a703645c5beacfe",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 94,
"avg_line_length": 45.645669291338585,
"alnum_prop": 0.6612040710712438,
"repo_name": "podhub-io/website",
"id": "609d93fa68dda97946468089a75a2f8a812e387e",
"size": "5797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/3b4fa74ecad_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "16147"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from indico.core.db import db
from indico.core.plugins import plugin_engine
from indico.modules.events.payment import PaymentPluginMixin
from indico.modules.events.payment.models.transactions import PaymentTransaction, TransactionStatus
from indico.modules.events.registration.notifications import notify_registration_state_update
remove_prefix_re = re.compile('^payment_')
def get_payment_plugins():
"""Returns a dict containing the available payment plugins."""
return {remove_prefix_re.sub('', p.name): p for p in plugin_engine.get_active_plugins().itervalues()
if isinstance(p, PaymentPluginMixin)}
def get_active_payment_plugins(event):
"""Returns a dict containing the active payment plugins of an event."""
return {name: plugin for name, plugin in get_payment_plugins().iteritems()
if plugin.event_settings.get(event, 'enabled')}
def register_transaction(registration, amount, currency, action, provider=None, data=None):
"""Creates a new transaction for a certain transaction action.
:param registration: the `Registration` associated to the transaction
:param amount: the (strictly positive) amount of the transaction
:param currency: the currency used for the transaction
:param action: the `TransactionAction` of the transaction
:param provider: the payment method name of the transaction,
or '_manual' if no payment method has been used
:param data: arbitrary JSON-serializable data specific to the
transaction's provider
"""
new_transaction = PaymentTransaction.create_next(registration=registration, action=action,
amount=amount, currency=currency,
provider=provider, data=data)
if new_transaction:
db.session.flush()
if new_transaction.status == TransactionStatus.successful:
registration.update_state(paid=True)
elif new_transaction.status == TransactionStatus.cancelled:
registration.update_state(paid=False)
notify_registration_state_update(registration)
return new_transaction
|
{
"content_hash": "e21ec660088a0c48c175c642d38b7ede",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 104,
"avg_line_length": 45.816326530612244,
"alnum_prop": 0.7006681514476615,
"repo_name": "mic4ael/indico",
"id": "65d7dcd76f0e19f95693ad1f68330cf15701eba4",
"size": "2459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/payment/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "553825"
},
{
"name": "HTML",
"bytes": "1375160"
},
{
"name": "JavaScript",
"bytes": "1852830"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4612709"
},
{
"name": "Shell",
"bytes": "2665"
},
{
"name": "TeX",
"bytes": "23292"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
}
|
from ctypes import *
from comtypes import IUnknown, COMMETHOD, GUID, HRESULT, dispid
_GUID = GUID
class tagCONNECTDATA(Structure):
_fields_ = [
('pUnk', POINTER(IUnknown)),
('dwCookie', c_ulong),
]
CONNECTDATA = tagCONNECTDATA
################################################################
class IConnectionPointContainer(IUnknown):
_iid_ = GUID('{B196B284-BAB4-101A-B69C-00AA00341D07}')
_idlflags_ = []
class IConnectionPoint(IUnknown):
_iid_ = GUID('{B196B286-BAB4-101A-B69C-00AA00341D07}')
_idlflags_ = []
class IEnumConnections(IUnknown):
_iid_ = GUID('{B196B287-BAB4-101A-B69C-00AA00341D07}')
_idlflags_ = []
def __iter__(self):
return self
def next(self):
cp, fetched = self.Next(1)
if fetched == 0:
raise StopIteration
return cp
class IEnumConnectionPoints(IUnknown):
_iid_ = GUID('{B196B285-BAB4-101A-B69C-00AA00341D07}')
_idlflags_ = []
def __iter__(self):
return self
def next(self):
cp, fetched = self.Next(1)
if fetched == 0:
raise StopIteration
return cp
################################################################
IConnectionPointContainer._methods_ = [
COMMETHOD([], HRESULT, 'EnumConnectionPoints',
( ['out'], POINTER(POINTER(IEnumConnectionPoints)), 'ppEnum' )),
COMMETHOD([], HRESULT, 'FindConnectionPoint',
( ['in'], POINTER(_GUID), 'riid' ),
( ['out'], POINTER(POINTER(IConnectionPoint)), 'ppCP' )),
]
IConnectionPoint._methods_ = [
COMMETHOD([], HRESULT, 'GetConnectionInterface',
( ['out'], POINTER(_GUID), 'pIID' )),
COMMETHOD([], HRESULT, 'GetConnectionPointContainer',
( ['out'], POINTER(POINTER(IConnectionPointContainer)), 'ppCPC' )),
COMMETHOD([], HRESULT, 'Advise',
( ['in'], POINTER(IUnknown), 'pUnkSink' ),
( ['out'], POINTER(c_ulong), 'pdwCookie' )),
COMMETHOD([], HRESULT, 'Unadvise',
( ['in'], c_ulong, 'dwCookie' )),
COMMETHOD([], HRESULT, 'EnumConnections',
( ['out'], POINTER(POINTER(IEnumConnections)), 'ppEnum' )),
]
IEnumConnections._methods_ = [
COMMETHOD([], HRESULT, 'Next',
( ['in'], c_ulong, 'cConnections' ),
( ['out'], POINTER(tagCONNECTDATA), 'rgcd' ),
( ['out'], POINTER(c_ulong), 'pcFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'cConnections' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumConnections)), 'ppEnum' )),
]
IEnumConnectionPoints._methods_ = [
COMMETHOD([], HRESULT, 'Next',
( ['in'], c_ulong, 'cConnections' ),
( ['out'], POINTER(POINTER(IConnectionPoint)), 'ppCP' ),
( ['out'], POINTER(c_ulong), 'pcFetched' )),
COMMETHOD([], HRESULT, 'Skip',
( ['in'], c_ulong, 'cConnections' )),
COMMETHOD([], HRESULT, 'Reset'),
COMMETHOD([], HRESULT, 'Clone',
( ['out'], POINTER(POINTER(IEnumConnectionPoints)), 'ppEnum' )),
]
|
{
"content_hash": "73973b5e624baa83b2977d284359fc4a",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 81,
"avg_line_length": 34.58510638297872,
"alnum_prop": 0.523838818824977,
"repo_name": "ezarko/cfn-init",
"id": "2108231350ed2f178ddbeaf114b3d7ebbc7741c3",
"size": "3251",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "comtypes/connectionpoints.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "539671"
}
],
"symlink_target": ""
}
|
'''
Defines objects for managing communication between Clique and an underlying GUI.
@author: Peter Parente <parente@cs.unc.edu>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
import pyAA
import Mixin
# add the StabilityMixin to the AccessibleObject class
modifiers = ['DoDefaultAction', 'Select', 'SetFocus', 'SendKeys']
init = ['__init__']
unsafe = ['ChildFromPath', 'FindOneChild', 'FindAllChildren', 'GetChildren']
mix = Mixin.StabilityMixin(pyAA.AccessibleObject)
mix.StirInto(include=['SendKeys'])
mix.WrapMethods(mix.CheckWrapper, include=unsafe)
mix.WrapMethods(mix.DisturbWrapper, include=modifiers)
mix.WrapMethods(mix.InitializeWrapper, include=init)
mix.StirInto(exclude=['SendKeys'])
from Macro import *
from Watcher import *
import Adapters
from pyAA import Constants
|
{
"content_hash": "16d39087b2a583bb23782cf925bffe4a",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 33.58064516129032,
"alnum_prop": 0.7857829010566763,
"repo_name": "parente/clique",
"id": "5d902237dbb5b3ca7a1dd86eee4ee29ca51b9cbf",
"size": "1041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UIA/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "624768"
}
],
"symlink_target": ""
}
|
import configparser
import json
import logging
import os
import signal
import collections
from .requesthandlers.vt_request import make_request_to_vt
from . import urls
from . import config
from .scheduler import Scheduler
from .dispatcher import Dispatcher
from .receptionist import Receptionist
class Server:
def try_serve(self):
try:
self.serve()
except OSError as error:
logging.error("Probably port is already in use, please change port in config.ini. " + str(error))
def serve(self):
self._initialize_config()
self._initialize_logs()
self._initialize_objects()
self._initialize_default_job()
self._initialize_signals()
self._start_receptionist_and_scheduler()
# self._wait_for_sigint()
def handle_sig(self, signum, frame):
self.exit_gracefully()
def exit_gracefully(self):
self._dump_deque_to_file()
self.receptionist.stop()
def _initialize_logs(self):
logging.basicConfig(filename=config.log_filename,
level=logging.DEBUG,
format='%(asctime)s %(levelname)s: %(name)s %(message)s')
def _initialize_config(self):
cfg = configparser.ConfigParser()
cfg.read('../config.ini')
config.hostname = cfg['connection']['Host']
config.port = int(cfg['connection']['Port'])
config.connection_no = int(cfg['connection']['Number'])
config.html_dir = cfg['paths']['Html']
config.log_filename = cfg['paths']['Logs']
config.dump_filename = cfg["paths"]["Dump"]
config.vt_delay = cfg['vt']['Delay']
def _initialize_objects(self):
self.deque = collections.deque(self._get_dump())
self.scheduler = Scheduler().apscheduler
self.dispatcher = Dispatcher(scheduler=self.scheduler,
deque=self.deque,
urls=urls.URLS)
self.receptionist = Receptionist(dispatcher=self.dispatcher,
hostname=config.hostname,
port=config.port,
connection_no=config.connection_no)
def _get_dump(self):
if(os.path.isfile(config.dump_filename)):
with open(config.dump_filename) as file:
return json.loads(file.read())
else:
return []
def _dump_deque_to_file(self):
with open(config.dump_filename, 'w+') as file:
file.write(json.dumps(list(self.deque)))
def _initialize_signals(self):
signal.signal(signal.SIGINT, self.handle_sig)
signal.signal(signal.SIGTERM, self.handle_sig)
def _start_receptionist_and_scheduler(self):
self.scheduler.start()
self.receptionist.start()
def _wait_for_sigint(self):
signal.pause()
def _initialize_default_job(self):
cron = {"second": "*/" + config.vt_delay}
self.scheduler.add_job(func=lambda: self.check_deque_and_make_request_to_vt(),
trigger='cron',
replace_existing=True, **cron)
def check_deque_and_make_request_to_vt(self):
try:
sha256 = self.deque.popleft()
make_request_to_vt(sha256)
except IndexError:
logging.info("Deque is empty")
|
{
"content_hash": "ebe4d88699fb470a902fc85de68c0b27",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 109,
"avg_line_length": 34,
"alnum_prop": 0.5803727431566686,
"repo_name": "mwalercz/virus-total-helper",
"id": "18748767babfbdab3dca4e08f50f45cfa3b521c0",
"size": "3434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "207338"
},
{
"name": "Python",
"bytes": "57420"
},
{
"name": "Shell",
"bytes": "449"
}
],
"symlink_target": ""
}
|
"""Dummy no-op test to be used in the webdriver test."""
import logging
import sys
import unittest
class DummyTest(unittest.TestCase):
def test_dummy(self):
logging.info('Dummy output.')
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
del sys.argv[1:]
unittest.main()
|
{
"content_hash": "2f0980d4222e08e03b94a70afec1b95b",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 18.11764705882353,
"alnum_prop": 0.685064935064935,
"repo_name": "theskyinflames/bpulse-go-client",
"id": "9f779ed4ca4a5ba608584dd508026dd0868f08e3",
"size": "330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vendor/github.com/youtube/vitess/test/cluster/keytar/dummy_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "133726"
},
{
"name": "Shell",
"bytes": "2415"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='typogrify-hyde',
version='1.0.1a',
packages=find_packages(),
author='Christian Metts',
author_email='xian@mintchaos.com',
license='BSD',
description='Typography related template filters for Django & Jinja2 applications',
url='https://github.com/mintchaos/typogrify',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Topic :: Utilities'
],
install_requires=['smartypants>=1.6']
)
|
{
"content_hash": "4326fc4d021f92cd55f7b676e9b56ad1",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 87,
"avg_line_length": 30.6,
"alnum_prop": 0.6352941176470588,
"repo_name": "hyde/typogrify",
"id": "d2b21a1f7e3a520f1dda0647ccfcb0492e86d830",
"size": "765",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "27081"
}
],
"symlink_target": ""
}
|
__author__ = 'olesya'
import api_request
import logging
import json
import datetime
import time
class EventbriteApi(api_request.ApiRequest):
def __init__(self):
self.source = "eventbrite"
self.settings = self.get_settings(self.source)
def request_events(self, city=None, category=None, date_and_time=None):
events_list = []
request = {'token': self.settings['token'], "venue.country": "IL", "expand": "venue"}
if category is not None:
catg = self.get_category(category.strip(), self.source) #find in dictionary
if catg != 401:
request.update({"categories": catg})
else:
return 401
if date_and_time is not None:
d_t = {
"1d": "tomorrow",
"1w": "this_week",
"1m": "this_month"
}[date_and_time]
request.update({"start_date.keyword": d_t})
if city is not None:
try:
city = self.possible_cities(self.source)[city]
logging.info("city exist")
request.update({"venue.city": city})
except:
logging.info("The city is not exist")
logging.info("Starting connection to eventbrite.api")
response, status_code = self.http_request_using_urlfetch(self.settings['URL_PATTERN'], request)
logging.info("eventbrite actual response {}".format(response))
response = json.loads(response)
if status_code == 200:
for res in response['events']:
event = {}
self.check_valid(event, res, 'name', 'name', 'text')
self.check_valid(event, res, 'description', 'description', 'text')
self.check_valid(event, res, 'id', 'id')
self.check_valid(event, res, 'date', 'start', 'local') #Get start date and time
date_object = datetime.datetime.strptime(event['date'], '%Y-%m-%dT%H:%M:%S') # Convert to datetime object
milisec = time.mktime(date_object.timetuple())*1000 #Convert to milliseconds
event['date'] = int(milisec) #Save the new date and time in milliseconds
try:
c_id = res['category']['id']
category = self.get_category_by_id(c_id, self.source).pop()
except:
pass
self.check_valid(event, res, 'city', 'venue', 'address', 'city')
c = self.check_city(event['city'])
if c is not None:
event['city'] = c
self.check_valid(event, res, 'address', 'venue', 'address', 'address_1')
self.check_valid(event, res, 'event_url', 'url')
self.check_valid(event, res, 'host', 'organizer', 'name')
event['attendees'] = 0
try:
if all(r['free'] for r in res['ticket_classes']):
event['price'] = "free"
else:
event['price'] = event['event_url']
except KeyError:
event['price'] = event['event_url']
events_list.append(event)
self.save_in_db(event, self.source, category)
event_json = json.dumps(events_list)
return event_json
|
{
"content_hash": "437f772609de8d49bdd3f3803acadfff",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 121,
"avg_line_length": 43.2375,
"alnum_prop": 0.5059265683723619,
"repo_name": "denbedilov/ATTENDER",
"id": "e5f1b6badde19e0004cbf11c5b3dc42af9fae476",
"size": "3459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/attender-mobile/engine/eventbrite_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2476"
},
{
"name": "HTML",
"bytes": "1842"
},
{
"name": "Java",
"bytes": "31945"
},
{
"name": "Python",
"bytes": "828347"
}
],
"symlink_target": ""
}
|
import random
class Chromosome:
def __init__(self, chain):
self.chain = chain
self.length = len(chain)
@classmethod
def cross(cls, parent1, parent2, mutation_rate):
switch_point = random.choice(range(parent1.length))
chains = [parent1.chain[:switch_point] + parent2.chain[switch_point:], parent2.chain[:switch_point] + parent1.chain[switch_point:]]
chain = random.choice(chains)
i = 0
while i < parent1.length:
if random.random() <= mutation_rate:
chain[i] = (chain[i] + 1) % 1
i = i + 1
return cls(chain)
@classmethod
def random(cls, length):
return cls([random.choice([0,1]) for i in range(length)])
@staticmethod
def decode(chain):
base = 1
x = 0
for i in chain:
x = x + (base*i)
base = base*2
return x
class Pool:
def __init__(self, chromosome_length, population_size, crossover_rate, mutation_rate, fitness_function):
self.population_size = population_size
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.fitness_function = fitness_function
self.max_fitness = float("-inf")
self.best_chromosome = None
self.population = []
while population_size > 0:
chromosome = Chromosome.random(chromosome_length)
fitness = self.fitness_function(chromosome)
while fitness <= 0:
chromosome = Chromosome.random(chromosome_length)
fitness = self.fitness_function(chromosome)
if fitness > self.max_fitness:
self.max_fitness = fitness
self.best_chromosome = chromosome
self.population = self.population + [chromosome]*fitness
population_size = population_size - 1
def new_generation(self):
i = 0
new_population = []
parent1 = random.choice(self.population)
parent2 = random.choice(self.population)
while i < self.population_size:
if random.random() <= self.crossover_rate:
child = Chromosome.cross(parent1, parent2, self.mutation_rate)
fitness = self.fitness_function(child)
while fitness <= 0:
parent2 = random.choice(self.population)
child = Chromosome.cross(parent1, parent2, self.mutation_rate)
fitness = self.fitness_function(child)
if fitness > self.max_fitness:
self.max_fitness = fitness
self.best_chromosome = child
new_population = new_population + [child]*fitness
# if fitness < 0:
# new_population.append(child)
i = i+1
parent1 = parent2
parent2 = random.choice(self.population)
self.population = new_population
def evolve(self, iterations):
i = 0
while i < iterations:
self.new_generation()
i = i + 1
def pick(self):
return self.best_chromosome #random.choice(self.population)
|
{
"content_hash": "ad28e125256a8ca958613cd0e12dd348",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 133,
"avg_line_length": 29.011111111111113,
"alnum_prop": 0.6924549980850249,
"repo_name": "aleknaui/py-genetic",
"id": "6d9ba12d59e1fcf09e71f0e48cbf1e308f35225b",
"size": "2657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "genetic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3223"
}
],
"symlink_target": ""
}
|
"""Provides device automations for Media player."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import (
DEVICE_TRIGGER_BASE_SCHEMA,
entity,
)
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_FOR,
CONF_PLATFORM,
CONF_TYPE,
STATE_BUFFERING,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from .const import DOMAIN
TRIGGER_TYPES = {"turned_on", "turned_off", "buffering", "idle", "paused", "playing"}
MEDIA_PLAYER_TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
TRIGGER_SCHEMA = vol.All(
vol.Any(
MEDIA_PLAYER_TRIGGER_SCHEMA,
entity.TRIGGER_SCHEMA,
),
vol.Schema({vol.Required(CONF_DOMAIN): DOMAIN}, extra=vol.ALLOW_EXTRA),
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device triggers for Media player entities."""
registry = entity_registry.async_get(hass)
triggers = await entity.async_get_triggers(hass, device_id, DOMAIN)
# Get all the integration entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add triggers for each entity that belongs to this integration
triggers += [
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: trigger,
}
for trigger in TRIGGER_TYPES
]
return triggers
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
if config[CONF_TYPE] not in TRIGGER_TYPES:
return await entity.async_get_trigger_capabilities(hass, config)
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if config[CONF_TYPE] not in TRIGGER_TYPES:
return await entity.async_attach_trigger(hass, config, action, automation_info)
if config[CONF_TYPE] == "buffering":
to_state = STATE_BUFFERING
elif config[CONF_TYPE] == "idle":
to_state = STATE_IDLE
elif config[CONF_TYPE] == "turned_off":
to_state = STATE_OFF
elif config[CONF_TYPE] == "turned_on":
to_state = STATE_ON
elif config[CONF_TYPE] == "paused":
to_state = STATE_PAUSED
else: # "playing"
to_state = STATE_PLAYING
state_config = {
CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = await state_trigger.async_validate_trigger_config(hass, state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
|
{
"content_hash": "5c2b5bd6691ecaba5e351d5b161fc904",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 88,
"avg_line_length": 30.76984126984127,
"alnum_prop": 0.6572091823574929,
"repo_name": "toddeye/home-assistant",
"id": "e0c88489841dfc5119cd77d5c41a53d6bb20ab8d",
"size": "3877",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import pika
connection = pika.BlockingConnection(pika.ConnectionParameters(
host='localhost'))
channel = connection.channel()
channel.queue_declare(queue='hello')
print ' [*] Waiting for messages. To exit press CTRL+C'
def callback(ch, method, properties, body):
print " [x] Received %r" % (body,)
channel.basic_consume(callback,
queue='hello',
no_ack=True)
channel.start_consuming()
|
{
"content_hash": "492e5ab38a67487635faf88ae8fe47c0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 63,
"avg_line_length": 22.15,
"alnum_prop": 0.654627539503386,
"repo_name": "theofilis/tutorial-rabbitMQ",
"id": "ffa099186e00016eac269097d9ddb547a4dcdaf7",
"size": "465",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/lesson1/receive.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9588"
}
],
"symlink_target": ""
}
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 0);
|
{
"content_hash": "622d36a1d0da318fb7b7406e6c2b76e6",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 167,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.7078651685393258,
"repo_name": "antoinecarme/pyaf",
"id": "3f8213b3bb10f4e8a4d44b4410e2e07c5450ba1f",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_LinearTrend/cycle_0/ar_/test_artificial_32_Quantization_LinearTrend_0__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from keywords.forms import KeywordCreateForm
class KeywordCreateFormTest(TestCase):
def test_KeywordCreateForm_allows_blank_items(self):
form = KeywordCreateForm(data={"keyword": ""})
self.assertTrue(form.is_valid())
|
{
"content_hash": "3e9f7a844433c48f00e9484d94d95e33",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 27.4,
"alnum_prop": 0.7408759124087592,
"repo_name": "patcurry/WebGIS",
"id": "303f6e4d00bd07786657a480e35976085d70acdb",
"size": "274",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unit_tests/keywords/test_forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3688"
},
{
"name": "HTML",
"bytes": "36261"
},
{
"name": "JavaScript",
"bytes": "664345"
},
{
"name": "Python",
"bytes": "131809"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
}
|
"""
The GPO Reference Aggregate Manager v2, showing how to implement
the GENI AM API version 2. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
import base64
import datetime
import dateutil.parser
import logging
import os
import uuid
import xml.dom.minidom as minidom
import zlib
import geni
from geni.util.urn_util import publicid_to_urn
from geni.SecureXMLRPCServer import SecureXMLRPCServer
from resource import Resource
from aggregate import Aggregate
from fakevm import FakeVM
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = 'renewsliver'
CREATESLIVERPRIV = 'createsliver'
DELETESLIVERPRIV = 'deleteslice'
SLIVERSTATUSPRIV = 'getsliceresources'
SHUTDOWNSLIVERPRIV = 'shutdown'
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = 'geni//gpo//gcf'
REFAM_MAXLEASE_DAYS = 365
class Slice(object):
"""A slice has a URN, a list of resources, and an expiration time in UTC."""
def __init__(self, urn, expiration):
self.id = str(uuid.uuid4())
self.urn = urn
self.expiration = expiration
self.resources = dict()
def status(self, resources):
"""Determine the status of the sliver by examining the status
of each resource in the sliver.
"""
# If any resource is 'shutdown', the sliver is 'shutdown'
# Else if any resource is 'failed', the sliver is 'failed'
# Else if any resource is 'configuring', the sliver is 'configuring'
# Else if all resources are 'ready', the sliver is 'ready'
# Else the sliver is 'unknown'
rstat = [res.status for res in resources]
if Resource.STATUS_SHUTDOWN in rstat:
return Resource.STATUS_SHUTDOWN
elif Resource.STATUS_FAILED in rstat:
return Resource.STATUS_FAILED
elif Resource.STATUS_CONFIGURING in rstat:
return Resource.STATUS_CONFIGURING
elif rstat == [Resource.STATUS_READY for res in self.resources.values()]:
# All resources report status of ready
return Resource.STATUS_READY
else:
return Resource.STATUS_UNKNOWN
class ReferenceAggregateManager(object):
'''A reference Aggregate Manager that manages fake resources.'''
# root_cert is a single cert or dir of multiple certs
# that are trusted to sign credentials
def __init__(self, root_cert, urn_authority, url):
self._url = url
self._api_version = 2
self._slices = dict()
self._agg = Aggregate()
self._agg.add_resources([FakeVM(self._agg) for _ in range(3)])
self._cred_verifier = geni.CredentialVerifier(root_cert)
self._urn_authority = urn_authority
self._my_urn = publicid_to_urn("%s %s %s" % (self._urn_authority, 'authority', 'am'))
self.max_lease = datetime.timedelta(days=REFAM_MAXLEASE_DAYS)
self.logger = logging.getLogger('gcf.am2')
def GetVersion(self, options):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
reqver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/request.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
adver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/ad.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
api_versions = dict()
api_versions[str(self._api_version)] = self._url
versions = dict(geni_api=2,
geni_api_versions=api_versions,
geni_request_rspec_versions=reqver,
geni_ad_rspec_versions=adver)
return dict(geni_api=versions['geni_api'],
code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=versions,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
None,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to ListResources.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a type field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a version field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, str):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('ListResources: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(4, 'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('ListResources: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(4, 'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_type))
self.logger.info("ListResources requested RSpec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
if slice_urn in self._slices:
result = self.manifest_rspec(slice_urn)
else:
# return an empty rspec
return self._no_such_slice(slice_urn)
else:
all_resources = self._agg.catalog(None)
available = 'geni_available' in options and options['geni_available']
resource_xml = ""
for r in all_resources:
if available and not r.available:
continue
resource_xml = resource_xml + self.advert_resource(r)
result = self.advert_header() + resource_xml + self.advert_footer()
self.logger.debug("Result is now \"%s\"", result)
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception, exc:
import traceback
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
self.logger.info('CreateSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (CREATESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
self.logger.error('Slice %s already exists.', slice_urn)
return self.errorResult(17, 'Slice %s already exists' % (slice_urn))
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
except Exception, exc:
self.logger.error("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
return self.errorResult(1, 'Bad Args: RSpec is unparseable')
# Look at the version of the input request RSpec
# Make sure it is supported
# Then make sure that you return an RSpec in the same format
# EG if both V1 and V2 are supported, and the user gives V2 request,
# then you must return a V2 request and not V1
allresources = self._agg.catalog()
allrdict = dict()
for r in allresources:
if r.available:
allrdict[r.id] = r
# Note: This only handles unbound nodes. Any attempt by the client
# to specify a node is ignored.
resources = dict()
unbound = list()
for elem in rspec_dom.documentElement.getElementsByTagName('node'):
unbound.append(elem)
for elem in unbound:
client_id = elem.getAttribute('client_id')
keys = allrdict.keys()
if keys:
rid = keys[0]
resources[client_id] = allrdict[rid]
del allrdict[rid]
else:
return self.errorResult(6, 'Too Big: insufficient resources to fulfill request')
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = datetime.datetime.utcnow() + self.max_lease
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp < expiration:
expiration = credexp
newslice = Slice(slice_urn, expiration)
self._agg.allocate(slice_urn, resources.values())
for cid, r in resources.items():
newslice.resources[cid] = r.id
r.status = Resource.STATUS_READY
self._slices[slice_urn] = newslice
self.logger.info("Created new slice %s" % slice_urn)
result = self.manifest_rspec(slice_urn)
self.logger.debug('Result = %s', result)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def DeleteSliver(self, slice_urn, credentials, options):
'''Stop and completely delete the named sliver, and return True.'''
self.logger.info('DeleteSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (DELETESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
sliver = self._slices[slice_urn]
resources = self._agg.catalog(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not deleted because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
self._agg.deallocate(slice_urn, None)
for r in resources:
r.status = Resource.STATUS_UNKNOWN
del self._slices[slice_urn]
self.logger.info("Sliver %r deleted" % slice_urn)
return self.successResult(True)
else:
return self._no_such_slice(slice_urn)
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('SliverStatus(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (SLIVERSTATUSPRIV,)
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
if slice_urn in self._slices:
theSlice = self._slices[slice_urn]
# Now calculate the status of the sliver
res_status = list()
resources = self._agg.catalog(slice_urn)
for res in resources:
self.logger.debug('Resource = %s', str(res))
# Gather the status of all the resources
# in the sliver. This could be actually
# communicating with the resources, or simply
# reporting the state of initialized, started, stopped, ...
res_status.append(dict(geni_urn=self.resource_urn(res),
geni_status=res.status,
geni_error=''))
self.logger.info("Calculated and returning slice %s status", slice_urn)
result = dict(geni_urn=slice_urn,
geni_status=theSlice.status(resources),
geni_resources=res_status)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
else:
return self._no_such_slice(slice_urn)
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
self.logger.info('RenewSliver(%r, %r)' % (slice_urn, expiration_time))
privileges = (RENEWSLIVERPRIV,)
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# All the credentials we just got are valid
if slice_urn in self._slices:
# If any credential will still be valid at the newly
# requested time, then we can do this.
resources = self._agg.catalog(slice_urn)
sliver = self._slices.get(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not renewed because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
requested = dateutil.parser.parse(str(expiration_time))
# Per the AM API, the input time should be TZ-aware
# But since the slice cred may not (per ISO8601), convert
# it to naiveUTC for comparison
requested = self._naiveUTC(requested)
maxexp = datetime.datetime.min
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp > maxexp:
maxexp = credexp
maxexp = credexp
if credexp >= requested:
sliver.expiration = requested
self.logger.info("Sliver %r now expires on %r", slice_urn, expiration_time)
return self.successResult(True)
else:
self.logger.debug("Valid cred %r expires at %r before %r", cred, credexp, requested)
# Fell through then no credential expires at or after
# newly requested expiration time
self.logger.info("Can't renew sliver %r until %r because none of %d credential(s) valid until then (latest expires at %r)", slice_urn, expiration_time, len(creds), maxexp)
# FIXME: raise an exception so the client knows what
# really went wrong?
return self.errorResult(19, "Out of range: Expiration %s is out of range (past last credential expiration of %s)." % (expiration_time, maxexp))
else:
return self._no_such_slice(slice_urn)
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
self.logger.info('Shutdown(%r)' % (slice_urn))
privileges = (SHUTDOWNSLIVERPRIV,)
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
if slice_urn in self._slices:
resources = self._agg.catalog(slice_urn)
for resource in resources:
resource.status = Resource.STATUS_SHUTDOWN
self.logger.info("Sliver %r shut down" % slice_urn)
return self.successResult(True)
else:
self.logger.info("Shutdown: No such slice: %s.", slice_urn)
return self._no_such_slice(slice_urn)
def successResult(self, value):
code_dict = dict(geni_code=0,
am_type="gcf2",
am_code=0)
return dict(code=code_dict,
value=value,
output="")
def _no_such_slice(self, slice_urn):
return self.errorResult(12, 'Search Failed: no slice "%s" found' % (slice_urn))
def errorResult(self, code, output, am_code=None):
code_dict = dict(geni_code=code, am_type="gcf2")
if am_code is not None:
code_dict['am_code'] = am_code
return dict(code=code_dict,
value="",
output=output)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
def advert_resource(self, resource):
tmpl = ''' <node component_manager_id="%s"
component_name="%s"
component_id="%s"
exclusive="%s">
<available now="%s"/>
</node>
'''
resource_id = str(resource.id)
resource_exclusive = str(False).lower()
resource_available = str(resource.available).lower()
resource_urn = self.resource_urn(resource)
return tmpl % (self._my_urn,
resource_id,
resource_urn,
resource_exclusive,
resource_available)
def advert_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd"
type="advertisement">'''
return header
def advert_footer(self):
return '</rspec>'
def manifest_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd"
type="manifest">'''
return header
def manifest_slice(self, slice_urn):
tmpl = '<node client_id="%s"/>'
result = ""
for cid in self._slices[slice_urn].resources.keys():
result = result + tmpl % (cid)
return result
def manifest_footer(self):
return '</rspec>'
def manifest_rspec(self, slice_urn):
return self.manifest_header() + self.manifest_slice(slice_urn) + self.manifest_footer()
def resource_urn(self, resource):
urn = publicid_to_urn("%s %s %s" % (self._urn_authority,
str(resource.type),
str(resource.id)))
return urn
class AggregateManager(object):
"""The public API for a GENI Aggregate Manager. This class provides the
XMLRPC interface and invokes a delegate for all the operations.
"""
def __init__(self, delegate):
self._delegate = delegate
self.logger = logging.getLogger('gcf.am2')
def _exception_result(self, exception):
output = str(exception)
self.logger.warning(output)
# XXX Code for no slice here?
return dict(code=dict(geni_code=102,
am_type="gcf2",
am_code=0),
value="",
output=output)
def GetVersion(self, options=dict()):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
try:
return self._delegate.GetVersion(options)
except Exception as e:
self.logger.exception("Error in GetVersion:")
return self._exception_result(e)
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
try:
return self._delegate.ListResources(credentials, options)
except Exception as e:
self.logger.exception("Error in ListResources:")
return self._exception_result(e)
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
try:
return self._delegate.CreateSliver(slice_urn, credentials, rspec,
users, options)
except Exception as e:
self.logger.exception("Error in CreateSliver:")
return self._exception_result(e)
def DeleteSliver(self, slice_urn, credentials, options):
"""Delete the given sliver. Return true on success."""
try:
return self._delegate.DeleteSliver(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in DeleteSliver:")
return self._exception_result(e)
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.'''
try:
return self._delegate.SliverStatus(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in SliverStatus:")
return self._exception_result(e)
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
"""Extend the life of the given sliver until the given
expiration time. Return False on error."""
try:
return self._delegate.RenewSliver(slice_urn, credentials,
expiration_time, options)
except Exception as e:
self.logger.exception("Error in RenewSliver:")
return self._exception_result(e)
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
try:
return self._delegate.Shutdown(slice_urn, credentials, options)
except Exception as e:
self.logger.exception("Error in Shutdown:")
return self._exception_result(e)
class AggregateManagerServer(object):
"""An XMLRPC Aggregate Manager Server. Delegates calls to given delegate,
or the default printing AM."""
def __init__(self, addr, keyfile=None, certfile=None,
trust_roots_dir=None,
ca_certs=None, base_name=None):
# ca_certs arg here must be a file of concatenated certs
if ca_certs is None:
raise Exception('Missing CA Certs')
elif not os.path.isfile(os.path.expanduser(ca_certs)):
raise Exception('CA Certs must be an existing file of accepted root certs: %s' % ca_certs)
# Decode the addr into a URL. Is there a pythonic way to do this?
server_url = "https://%s:%d/" % addr
delegate = ReferenceAggregateManager(trust_roots_dir, base_name,
server_url)
# FIXME: set logRequests=true if --debug
self._server = SecureXMLRPCServer(addr, keyfile=keyfile,
certfile=certfile, ca_certs=ca_certs)
self._server.register_instance(AggregateManager(delegate))
# Set the server on the delegate so it can access the
# client certificate.
delegate._server = self._server
if not base_name is None:
global RESOURCE_NAMESPACE
RESOURCE_NAMESPACE = base_name
def serve_forever(self):
self._server.serve_forever()
def register_instance(self, instance):
# Pass the AM instance to the generic XMLRPC server,
# which lets it know what XMLRPC methods to expose
self._server.register_instance(instance)
|
{
"content_hash": "02d6d17cb2c109e53b53148281f088ba",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 183,
"avg_line_length": 45.83259259259259,
"alnum_prop": 0.5897145812457575,
"repo_name": "EICT/C-BAS",
"id": "33a4fe598edf5f5be85b6b9ffeb440d814c0e974",
"size": "32149",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/vendor/geniv3rpc/ext/geni/am/am2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "151353"
},
{
"name": "Python",
"bytes": "412241"
},
{
"name": "Shell",
"bytes": "5982"
}
],
"symlink_target": ""
}
|
activate_this_file = "{{ os_loganalyze_venv_path }}/bin/activate_this.py"
execfile(activate_this_file, dict(__file__=activate_this_file))
import threading
from os_loganalyze import wsgi
ROOT_PATH = '{{ os_loganalyze_root_dir }}'
WSGI_CONFIG = '/etc/os_loganalyze/wsgi.conf'
def create_application():
def application(environ, start_response):
return wsgi.application(environ,
start_response,
root_path=ROOT_PATH,
wsgi_config=WSGI_CONFIG)
return application
application = create_application()
|
{
"content_hash": "4dc099c94bfd46fdd1fb41c0900c303c",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 26.608695652173914,
"alnum_prop": 0.6176470588235294,
"repo_name": "BonnyCI/hoist",
"id": "3aacb2a3a77b3d64db63fa1d2806de97a865a8ea",
"size": "1202",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "roles/os-loganalyze/templates/etc/os_loganalyze/wsgi/os-loganalyze.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47243"
},
{
"name": "Shell",
"bytes": "21603"
}
],
"symlink_target": ""
}
|
class StencilDataDataListener(object):
"""Listen to any changes of the data of the StencilData object.
"""
def data_changed(self):
"""Data has changed.
"""
raise NotImplementedError
class StencilDataStencilListListener(object):
"""Listen to changes in the stencil list of the StencilData object.
"""
def remove_all_stencils(self):
"""All stencils have been removed.
"""
raise NotImplementedError
def add_stencil(self, stencil):
"""Add a stencil.
:param stencil: Stencil to add
:type stencil: str
"""
raise NotImplementedError
def remove_stencil(self, stencil):
"""Remove a stencil.
:param stencil: Stencil to remove (expexted to exist)
:type stencil: str
"""
raise NotImplementedError
class StencilDataFieldListListener(object):
"""Listen to changes in the field list of the StencilData object.
"""
def remove_all_fields(self):
"""Remove all registered fields.
"""
raise NotImplementedError
def add_field(self, name, idx=None):
"""Append or insert field `name` at the end or `idx`.
:param name: Name of the field
:type name: str
:param idx: Position to insert the field. If idx is ``None``, the field will be appended at
the end
:type idx: int
"""
raise NotImplementedError
def remove_field(self, name):
"""Remove the field `name`.
:param name: Name of the field
:type name: str
"""
raise NotImplementedError
def set_field_enabled(self, name_or_idx, enable):
"""Enable or disable field.
:param name_or_idx: Name or index of the field
:type name_or_idx: str, int
:param enable: Enable (``True``) or disable (``False``) field
:type enable: bool
"""
raise NotImplementedError
def move_field(self, name_or_idx, idx):
"""Move field, given by `name` or `idx`, to position `idx` in the list field list.
:param name_or_idx: Name or index of the field
:type name_or_idx: str, int
:param idx: Index of the `field` to be moved to
:type idx: int
"""
raise NotImplementedError
def num_fields(self):
"""Get number of fields in the list.
:return: Number of fields in the list
:rtype: int
"""
raise NotImplementedError
|
{
"content_hash": "51b10f8d3e8de840789aec0caa9be38e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 99,
"avg_line_length": 27.560439560439562,
"alnum_prop": 0.5921052631578947,
"repo_name": "havogt/serialbox2",
"id": "3133b43f928c71c453fdf6fe9b35ebc147808a9d",
"size": "2916",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/serialbox-python/sdb/sdbcore/stencildatalistener.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "939426"
},
{
"name": "CMake",
"bytes": "81189"
},
{
"name": "Dockerfile",
"bytes": "676"
},
{
"name": "Fortran",
"bytes": "435079"
},
{
"name": "Python",
"bytes": "394767"
},
{
"name": "Shell",
"bytes": "31387"
}
],
"symlink_target": ""
}
|
"""Test multisig RPCs"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
assert_equal,
)
from test_framework.key import ECPubKey
import binascii
import decimal
import itertools
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def get_keys(self):
node0, node1, node2 = self.nodes
add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in add]
self.priv = [node1.dumpprivkey(a) for a in add]
self.final = node2.getnewaddress()
def run_test(self):
node0, node1, node2 = self.nodes
self.check_addmultisigaddress_errors()
self.log.info('Generating blocks ...')
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3, 5]:
for self.nsigs in [2, 3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
# Test mixed compressed and uncompressed pubkeys
self.log.info('Mixed compressed and uncompressed multisigs are not allowed')
pk0 = node0.getaddressinfo(node0.getnewaddress())['pubkey']
pk1 = node1.getaddressinfo(node1.getnewaddress())['pubkey']
pk2 = node2.getaddressinfo(node2.getnewaddress())['pubkey']
# decompress pk2
pk_obj = ECPubKey()
pk_obj.set(binascii.unhexlify(pk2))
pk_obj.compressed = False
pk2 = binascii.hexlify(pk_obj.get_bytes()).decode()
# Check all permutations of keys because order matters apparently
for keys in itertools.permutations([pk0, pk1, pk2]):
# Results should be the same as this legacy one
legacy_addr = node0.createmultisig(2, keys, 'legacy')['address']
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'legacy')['address'])
# Generate addresses with the segwit types. These should all make legacy addresses
assert_equal(legacy_addr, node0.createmultisig(2, keys, 'bech32')['address'])
assert_equal(legacy_addr, node0.createmultisig(2, keys, 'p2sh-segwit')['address'])
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'bech32')['address'])
assert_equal(legacy_addr, node0.addmultisigaddress(2, keys, '', 'p2sh-segwit')['address'])
def check_addmultisigaddress_errors(self):
self.log.info('Check that addmultisigaddress fails when the private keys are missing')
addresses = [self.nodes[1].getnewaddress(address_type='legacy') for _ in range(2)]
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
for a in addresses:
# Importing all addresses should not change the result
self.nodes[0].importaddress(a)
assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses))
def checkbalances(self):
node0, node1, node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149 * 50 + (height - 149 - 100) * 25
assert bal1 == 0
assert bal2 == self.moved
assert bal0 + bal1 + bal2 == total
def do_multisig(self):
node0, node1, node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "rltc" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses", [])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
prevtx_err = dict(prevtxs[0])
del prevtx_err["redeemScript"]
assert_raises_rpc_error(-8, "Missing redeemScript/witnessScript", node2.signrawtransactionwithkey, rawtx, self.priv[0:self.nsigs-1], [prevtx_err])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs - 1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
|
{
"content_hash": "fe82082a589b1148412c5cbc21bda5a2",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 160,
"avg_line_length": 40.91095890410959,
"alnum_prop": 0.6289971538590323,
"repo_name": "ledvina/zencoin",
"id": "c561f72b42a8c669bad403750e3066894ea2e05e",
"size": "6187",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/functional/rpc_createmultisig.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "92222"
},
{
"name": "C++",
"bytes": "2523842"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "37264"
},
{
"name": "Shell",
"bytes": "2527"
},
{
"name": "TypeScript",
"bytes": "5234892"
}
],
"symlink_target": ""
}
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(str):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = str.__new__(cls, prefix)
else:
obj = str.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(str):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return str.__new__(str, original_value)
obj = str.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substition
FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
@classmethod
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not isinstance(formatter, collections.Callable):
formatter = self.FORMATTERS.get(
formatter, EntitySubstitution.substitute_xml)
if formatter is None:
output = s
else:
output = formatter(s)
return output
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, str)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, str):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
def select(self, selector):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
for index, token in enumerate(tokens):
if tokens[index - 1] == '>':
# already found direct descendants in last step. skip this
# step.
continue
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag, attribute, operator, value = m.groups()
if not tag:
tag = True
checker = self._attribute_checker(operator, attribute, value)
found = []
for context in current_context:
found.extend(
[el for el in context.find_all(tag) if checker(el)])
current_context = found
continue
if '#' in token:
# ID selector
tag, id = token.split('#', 1)
if tag == "":
tag = True
el = current_context[0].find(tag, {'id': id})
if el is None:
return [] # No match
current_context = [el]
continue
if '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
if not tag_name:
tag_name = True
classes = set(klass.split('.'))
found = []
def classes_match(tag):
if tag_name is not True and tag.name != tag_name:
return False
if not tag.has_attr('class'):
return False
return classes.issubset(tag['class'])
for context in current_context:
found.extend(context.find_all(classes_match))
current_context = found
continue
if token == '*':
# Star selector
found = []
for context in current_context:
found.extend(context.findAll(True))
current_context = found
continue
if token == '>':
# Child selector
tag = tokens[index + 1]
if not tag:
tag = True
found = []
for context in current_context:
found.extend(context.find_all(tag, recursive=False))
current_context = found
continue
# Here we should just have a regular tag
if not self.tag_name_re.match(token):
return []
found = []
for context in current_context:
found.extend(context.findAll(token))
current_context = found
return current_context
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(str, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, str):
return str.__new__(cls, value)
return str.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (str(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = '<![CDATA['
SUFFIX = ']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = '<?'
SUFFIX = '?>'
class Comment(PreformattedString):
PREFIX = '<!--'
SUFFIX = '-->'
class Declaration(PreformattedString):
PREFIX = '<!'
SUFFIX = '!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = '<!DOCTYPE '
SUFFIX = '>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __bool__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, str):
val = str(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
str(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in list(attrs.items()):
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, str) or isinstance(value, collections.Callable) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, str)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return str(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, str)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, str):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, str)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, str):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
|
{
"content_hash": "d103b25da70dc5f9feb8d6f66e9f497a",
"timestamp": "",
"source": "github",
"line_count": 1355,
"max_line_length": 104,
"avg_line_length": 36.6649446494465,
"alnum_prop": 0.5646826754694954,
"repo_name": "kevlar1818/mipster",
"id": "88e7460f9172fa852cdcf8834b7fa2173211035e",
"size": "49681",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bs4/element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "252600"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.datastructures import SortedDict
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
from projects.forms import ProjectForm, ProjectUpdateForm, AddUserForm
from projects.models import Project, ProjectMember
TOPIC_COUNT_SQL = """
SELECT COUNT(*)
FROM topics_topic
WHERE
topics_topic.object_id = projects_project.id AND
topics_topic.content_type_id = %s
"""
MEMBER_COUNT_SQL = """
SELECT COUNT(*)
FROM projects_projectmember
WHERE projects_projectmember.project_id = projects_project.id
"""
@login_required
def create(request, form_class=ProjectForm, template_name="projects/create.html"):
project_form = form_class(request.POST or None)
if project_form.is_valid():
project = project_form.save(commit=False)
project.creator = request.user
project.save()
project_member = ProjectMember(project=project, user=request.user)
project.members.add(project_member)
project_member.save()
if notification:
# @@@ might be worth having a shortcut for sending to all users
notification.send(User.objects.all(), "projects_new_project",
{"project": project}, queue=True)
return HttpResponseRedirect(project.get_absolute_url())
return render_to_response(template_name, {
"project_form": project_form,
}, context_instance=RequestContext(request))
def projects(request, template_name="projects/projects.html"):
projects = Project.objects.all()
search_terms = request.GET.get("search", "")
if search_terms:
projects = (projects.filter(name__icontains=search_terms) |
projects.filter(description__icontains=search_terms))
content_type = ContentType.objects.get_for_model(Project)
projects = projects.extra(select=SortedDict([
("member_count", MEMBER_COUNT_SQL),
("topic_count", TOPIC_COUNT_SQL),
]), select_params=(content_type.id,))
return render_to_response(template_name, {
"projects": projects,
"search_terms": search_terms,
}, context_instance=RequestContext(request))
def delete(request, group_slug=None, redirect_url=None):
project = get_object_or_404(Project, slug=group_slug)
if not redirect_url:
redirect_url = reverse("project_list")
# @@@ eventually, we"ll remove restriction that project.creator can"t leave project but we"ll still require project.members.all().count() == 1
if (request.user.is_authenticated() and request.method == "POST" and
request.user == project.creator and project.members.all().count() == 1):
project.delete()
messages.add_message(request, messages.SUCCESS,
ugettext("Project %(project_name)s deleted.") % {
"project_name": project.name
}
)
# no notification required as the deleter must be the only member
return HttpResponseRedirect(redirect_url)
@login_required
def your_projects(request, template_name="projects/your_projects.html"):
projects = Project.objects.filter(member_users=request.user).order_by("name")
content_type = ContentType.objects.get_for_model(Project)
projects = projects.extra(select=SortedDict([
("member_count", MEMBER_COUNT_SQL),
("topic_count", TOPIC_COUNT_SQL),
]), select_params=(content_type.id,))
return render_to_response(template_name, {
"projects": projects,
}, context_instance=RequestContext(request))
def project(request, group_slug=None, form_class=ProjectUpdateForm, adduser_form_class=AddUserForm,
template_name="projects/project.html"):
project = get_object_or_404(Project, slug=group_slug)
if not request.user.is_authenticated():
is_member = False
else:
is_member = project.user_is_member(request.user)
action = request.POST.get("action")
if request.user == project.creator and action == "update":
project_form = form_class(request.POST, instance=project)
if project_form.is_valid():
project = project_form.save()
else:
project_form = form_class(instance=project)
if request.user == project.creator and action == "add":
adduser_form = adduser_form_class(request.POST, project=project)
if adduser_form.is_valid():
project_member = adduser_form.save(request.user)
messages.add_message(request, messages.SUCCESS,
ugettext("added %(user)s to project") % {
"user": project_member.user
}
)
adduser_form = adduser_form_class(project=project) # clear form
else:
adduser_form = adduser_form_class(project=project)
return render_to_response(template_name, {
"project_form": project_form,
"adduser_form": adduser_form,
"project": project,
"group": project, # @@@ this should be the only context var for the project
"is_member": is_member,
}, context_instance=RequestContext(request))
|
{
"content_hash": "c44980ac5d676d73401ed8ae0e0f57d5",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 146,
"avg_line_length": 36.75974025974026,
"alnum_prop": 0.6740858505564388,
"repo_name": "alex/pinax",
"id": "1248dd0f780b5084278e1093e36d1b10bfbe2658",
"size": "5661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinax/apps/projects/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "85360"
},
{
"name": "Python",
"bytes": "528515"
},
{
"name": "Shell",
"bytes": "1963"
}
],
"symlink_target": ""
}
|
'''
Created on 2013-1-10
@author: sinlangxmu@gmail.com
@version: 1.0
'''
import sys, logging, util.ApiUtil as util, xml.dom as dom, xml.dom.minidom as minidom
import traceback, base64
from .util.ApiUtil import BaseResult
X_CNC_REQUEST_ID = 'x-cnc-request-id'
X_CNC_DATE = 'x-cnc-date'
X_CNC_LOCATION = 'location'
X_CNC_CNAME = 'cname'
class DomainApi(object):
''' 域名操作API '''
HOST = 'https://cloudcdn.chinanetcenter.com'
#HOST = 'http://192.168.27.161:8080/cloudcdn'
#HOST = 'http://localhost:8080/cloud-cdn'
''' api服务地址 '''
def __init__(self, user, apiKey):
'''
初始化DomainApi 用于域名管理相关调用
@type user: str
@param user: 用户名
@type apiKey: str
@param apiKey: 用户的api key
@rtype: DomainApi对象
@return: instance of DomainApi
'''
self.user = user
self.apiKey = apiKey
self.headers = {
'Accept': 'application/xml',
'Content-Type': 'application/xml'
}
def add(self, domain):
''' 创建加速域名
@param domain: 新增加速域名构建的Domain对象实例
@rtype: ProcessResult对象
@return: 通过ProcessResult.getLocation()新域名的url
'''
url = self.HOST + "/api/domain"
try:
post = domainToXml(domain)
#print post
ret = util.httpReqeust(url, post, self.makeHeaders(url), "POST")
if ret.status == 202:
return xmlToSuccess(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
def listAll(self):
''' 获取加速所有域名列表
@rtype: ProcessResult对象
@return: 通过ProcessResult.getDomainSummarys()获取DomainSummary对象的实例列表
'''
url = self.HOST + "/api/domain"
try:
post = ''
ret = util.httpReqeust(url, post, self.makeHeaders(url), "GET")
if ret.status == 200:
return xmlToDomainList(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def find(self, domainId):
''' 获取加速域名配置
@type domainId: str
@param domainId : 指定查找的域名ID
@rtype: ProcessResult对象
@return: 通过ProcessResult.getDomain()返回指定的域名信息的Domain实例
'''
url = self.HOST + "/api/domain/" + str(domainId)
try:
post = ''
ret = util.httpReqeust(url, post, self.makeHeaders(url), "GET")
if ret.status == 200:
return xmlToDomain(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def modify(self, domain):
''' 修改加速域名配置
@type domain: Domain
@param domain : 构建需要修改的域名的Domain实例, domain中必须设置domanId字段
@rtype: ProcessResult对象
@return: 返回ProcessResult对象
'''
if domain.domainId is None:
raise '请设置domainId字段'
url = self.HOST + "/api/domain/" + str(domain.domainId)
try:
post = domainToXml(domain)
#print post
ret = util.httpReqeust(url, post, self.makeHeaders(url), "PUT")
if ret.status == 202:
return xmlToSuccess(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def delete(self, domainId):
''' 删除加速域名
@param domainId : 指定待删除的域名ID
@rtype: ProcessResult对象
@return: 返回ProcessResult对象
'''
url = self.HOST + "/api/domain/" + str(domainId)
try:
post = ''
ret = util.httpReqeust(url, post, self.makeHeaders(url), "DELETE")
if ret.status == 202:
return xmlToSuccess(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def disable(self, domainId):
''' 禁用加速域名
@param domainId : 指定待禁用的域名ID
@rtype: ProcessResult对象
@return: 返回ProcessResult对象
'''
url = self.HOST + "/api/domain/" + str(domainId)
try:
post = ''
ret = util.httpReqeust(url, post, self.makeHeaders(url), "DISABLE")
if ret.status == 202:
return xmlToSuccess(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def enable(self, domainId):
''' 启用加速域名
@param domainId : 指定启用的域名ID
@rtype: ProcessResult对象
@return: 返回ProcessResult对象
'''
url = self.HOST + "/api/domain/" + str(domainId)
try:
post = ''
ret = util.httpReqeust(url, post, self.makeHeaders(url), "ENABLE")
if ret.status == 202:
return xmlToSuccess(ret)
else:
return xmlToFailure(ret)
except Exception, e:
traceback.print_exc(file=sys.stdout)
return ProcessResult(-1, str(e))
pass
def makeHeaders(self, uri):
''' 组装头部 '''
global X_CNC_DATE
headers = self.headers.copy()
headers[X_CNC_DATE] = util.getRFCTime()
key = util.hashPassword(headers[X_CNC_DATE], self.apiKey)
headers['Authorization'] = "Basic " + base64.standard_b64encode(
self.user + ':' + key)
return headers
class Domain(object):
'''表示为域名对象'''
def __init__(self,
domainName=None,
serviceType=None,
domainId=None,
comment=None,
serviceAreas=None,
status=None,
enabled=None,
cname=None,
originConfig=None,
queryStringSettings=None,
cacheBehaviors=None,
visitControlRules=None,
videoDrags=None):
''' 初始化域名对象
@param domainName: 设置域名名称
@param serviceType: 服务类型,默认为web
@param domainId: 指定域名id,修改域名时使用
@param comment: 注释
@param serviceAreas: 加速区域
@param cname: 获取域名cname信息,只有将域名的dns解析cname到该地址后,流量才会导入到网宿cdn中
@param status: 查询域名部署状态
@param enabled: 查询域名是否启用
@type originConfig: OriginConfig
@param originConfig: 设置回源信息
@type cacheBehaviors: list of CacheBehavior
@param cacheBehaviors: 缓存规则列表, CacheBehavior对象实例的列表
@type visitControlRules: list of VisitControlRule
@param visitControlRules: 访问者控制规则列表, VisitControlRule对象实例的列表
@rtype: Domain
'''
self.domainName = domainName
self.serviceType = serviceType
self.domainId = domainId
self.comment = comment
self.serviceAreas = serviceAreas
self.status = status
self.enabled = enabled
self.cname = cname
self.originConfig = originConfig
self.queryStringSettings = queryStringSettings
self.cacheBehaviors = cacheBehaviors
self.visitControlRules = visitControlRules
self.videoDrags = videoDrags
class QueryStringSetting(object):
'''查询串控制'''
def __init__(self, pathPattern=None, ignoreQueryString=None):
'''初始化一个查询串控制规则
@param pathPattern: 设置文件类型,支持多个文件类型,当有多个文件类型时,以,分割
@type ignoreQueryString: True or False
'''
self.pathPattern = pathPattern
self.ignoreQueryString = ignoreQueryString
class VisitControlRule(object):
'''访问者控制规则'''
def __init__(self,
pathPattern=None,
allowNullReferer=None,
validReferers=None,
invalidReferers=None,
forbiddenIps=None):
'''初始化一个访问者控制规则
@param pathPattern: 设置文件类型,支持多个文件类型,当有多个文件类型时,以,分割
@type validReferers: list of str
@param allowNullReferer: 允许请求referer为空?
@type allowNullReferer: True or False
@param validReferers: referer白名单列表,支持泛域名(如.chinanetcenter.com)
@type invalidReferers: list of str
@param invalidReferers: referer黑名单列表,支持泛域名(如.chinanetcenter.com)
@type forbiddenIps: list of str
@param forbiddenIps: ip黑名单列表
'''
self.pathPattern = pathPattern
self.allowNullReferer = allowNullReferer
self.validReferers = validReferers
self.invalidReferers = invalidReferers
self.forbiddenIps = forbiddenIps
class OriginConfig(object):
''' 回源配置'''
def __init__(self,
originIps=None,
originDomainName=None,
advOriginConfigs=None):
''' 初始化回源配置
@type originIps: list of str
@param originIps: 回源ip列表,平台支持多个回源ip
@param originDomainName: 设置回源域名,平台支持通过ip或者域名回源,但二者只能选一,不能同时提供
@type advOriginConfigs: list of AdvOriginConfig
@param advOriginConfigs: 复杂回源规则列表
'''
self.originIps = originIps
self.originDomainName = originDomainName
self.advOriginConfigs = advOriginConfigs
class AdvOriginConfig(object):
'''复杂回源规则'''
def __init__(self,
isps=None,
masterIps=None,
backupIps=None,
detectUrl=None,
detectPeriod=None):
''' 初始化复杂回源规则
@type isps: list of str
@param isps: 设置isp信息,允许设定多个运营商;dx("中国电信"), wt("中国联通"), yidong("中国移动"), tt("中国铁通"), jyw("中国教育网"), changkuan("长城宽带"), gd("中国广电"), qita("其他"), all("全部");
@type masterIps: list of str
@param masterIps: 允许设定多个主IP
@type backupIps: list of str
@param backupIps: 允许设定多个备用IP,只有当主IP不可用时,才使用备IP
@param detectUrl: 监控URL,用于判断源主机是否可用
@param detectPeriod: 回源监控的频率,单位为S
'''
self.isps = isps
self.masterIps = masterIps
self.backupIps = backupIps
self.detectUrl = detectUrl
self.detectPeriod = detectPeriod
class CacheBehavior(object):
''' 缓存行为 '''
def __init__(self,
pathPattern=None,
ignoreCacheControl=None,
cacheTtl=None):
'''
@type priority: int
@param pathPattern: 设置路径匹配格式,支持*通配符以及|、()等正则字符,举例如下: 所有jpg文件:*.jpg 所有jpg或者gif文件:*.(jpg|gif) a/b/c下所有文件:a/b/c/ a/b/c下的所有jpg或者gif文件:a/b/c/*.(jpg|gif)
@type ignoreCacheControl: boolean
@param ignoreCacheControl: 设置是否忽略http头中的cache-control
@param cacheTtl: 设置缓存时间,单位为s
'''
self.pathPattern = pathPattern
self.ignoreCacheControl = ignoreCacheControl
self.cacheTtl = cacheTtl
class VideoDrags(object):
''' 视频拖拉设置 '''
def __init__(self,
pathPattern=None,
dragMode=None,
startFlag=None,
endFlag=None):
self.pathPattern = pathPattern
self.dragMode = dragMode
self.startFlag = startFlag
self.endFlag = endFlag
def parseAdvOriginConfigList(nodeList):
advOriginConfigList = []
for advOriginConfigNode in nodeList:
ispsList = util.getChildNodeText(advOriginConfigNode, 'isp')
masterIpsList = util.getChildNodeText(advOriginConfigNode,
'master-ips')
backupIpsList = util.getChildNodeText(advOriginConfigNode,
'backup-ips')
detectUrl = util.getChildNodeText(advOriginConfigNode, 'detect-url')
detectPeriod = util.getChildNodeText(advOriginConfigNode,
'detect-period')
isps = splitStr(ispsList)
masterIps = splitStr(masterIpsList)
backupIps = splitStr(backupIpsList)
advOriginConfig = AdvOriginConfig(isps=isps,
masterIps=masterIps,
backupIps=backupIps,
detectUrl=detectUrl,
detectPeriod=detectPeriod)
advOriginConfigList.append(advOriginConfig)
return advOriginConfigList
def parseQueryStringSettingListNode(nodeList):
queryStringSettingList = []
for queryStringSetting in nodeList:
pathPattern = util.getChildNodeText(queryStringSetting, 'path-pattern')
ignoreQueryStringStr = util.getChildNodeText(queryStringSetting,
'ignore-query-string')
if ignoreQueryStringStr == "false":
ignoreQueryString = False
else:
ignoreQueryString = True
queryStringSetting = QueryStringSetting(pathPattern, ignoreQueryString)
queryStringSettingList.append(queryStringSetting)
return queryStringSettingList
def parseCacheBehaviorList(nodeList):
cacheBehaviorList = []
for cacheBehavior in nodeList:
pathPattern = util.getChildNodeText(cacheBehavior, 'path-pattern')
priority = util.getChildNodeText(cacheBehavior, 'priority')
ignoreCacheControlStr = util.getChildNodeText(cacheBehavior,
'ignore-cache-control')
if ignoreCacheControlStr == "false":
ignoreCacheControl = False
else:
ignoreCacheControl = True
cacheTTL = util.getChildNodeText(cacheBehavior, 'cache-ttl')
cacheBehavior = CacheBehavior(pathPattern, ignoreCacheControl,
cacheTTL)
cacheBehaviorList.append(cacheBehavior)
return cacheBehaviorList
def parseVisitControlRulesList(nodeList):
vistControlRulesList = []
for node in nodeList:
pathPattern = util.getChildNodeText(node, 'path-pattern')
allowNullReffer = util.getChildNodeText(node, 'allownullreferer')
validReferRootNode = util.getChildNode(node, "valid-referers")
validRNode = util.getChildNodeList(validReferRootNode, 'referer')
validRefers = []
for ref in validRNode:
validRefers.append(util.getChildNodeText(ref, "referer"))
invalidReferRootNode = util.getChildNode(node, "invalid-referers")
invalidRNode = util.getChildNodeList(invalidReferRootNode, 'referer')
invalidRefers = []
for ref in invalidRNode:
invalidRefers.append(util.getChildNodeText(ref, "referer"))
forbiddenIps = splitStr(util.getChildNodeText(node, 'forbidden-ips'))
visitControlRule = VisitControlRule(pathPattern, allowNullReffer,
validRefers, invalidRefers,
forbiddenIps)
vistControlRulesList.append(visitControlRule)
return vistControlRulesList
def splitStr(data):
list1 = data.split(";")
res = []
for item in list1:
res = item.split(",") + res
return res
def xmlToDomain(ret):
''' 返回xml 转换成 带 Domain对象的ProcessResult对象, 在查询频道信息的时候使用'''
global X_CNC_REQUEST_ID, X_CNC_LOCATION, logging
requestId = ret.getheader(X_CNC_REQUEST_ID)
xmlString = ret.read().decode("utf-8")
logging.debug("response:" + xmlString)
doc = minidom.parseString(xmlString)
domainNode = util.getChildNode(doc, 'domain')
domainName = util.getChildNodeText(domainNode, 'domain-name')
domainId = util.getChildNodeText(domainNode, 'domain-id')
serviceType = util.getChildNodeText(domainNode, 'service-type')
comment = util.getChildNodeText(domainNode, 'comment')
serviceAreas = util.getChildNodeText(domainNode, 'service-areas')
enabled = util.getChildNodeText(domainNode, 'enabled')
cname = util.getChildNodeText(domainNode, 'cname')
status = util.getChildNodeText(domainNode, 'status')
domain = Domain(domainName=domainName,
serviceType=serviceType,
domainId=domainId,
comment=comment,
serviceAreas=serviceAreas,
enabled=enabled,
cname=cname,
status=status)
originConfigNode = util.getChildNode(domainNode, 'origin-config')
if originConfigNode is not None:
originIpsStr = util.getChildNodeText(originConfigNode, 'origin-ips')
originIps = splitStr(originIpsStr)
originDomainName = util.getChildNodeText(originConfigNode,
'origin-domain-name')
advOriginConfigListRootNode = util.getChildNode(originConfigNode,
'adv-origin-configs')
if advOriginConfigListRootNode is not None:
advOriginConfigListNode = util.getChildNodeList(
advOriginConfigListRootNode, 'adv-origin-config')
advOriginConfigs = []
if advOriginConfigListNode is not None:
advOriginConfigs = parseAdvOriginConfigList(
advOriginConfigListNode)
originConfig = OriginConfig(originIps, originDomainName,
advOriginConfigs)
domain.originConfig = originConfig
else:
originConfig = OriginConfig(originIps, originDomainName)
domain.originConfig = originConfig
queryStringSettingListRootNode = util.getChildNode(domainNode,
'query-string-settings')
if queryStringSettingListRootNode is not None:
queryStringSettingListNode = util.getChildNodeList(
queryStringSettingListRootNode, 'query-string-setting')
if queryStringSettingListNode is not None:
queryStringSettingList = parseQueryStringSettingListNode(
queryStringSettingListNode)
domain.queryStringSettings = queryStringSettingList
cacheBehaviorListRootNode = util.getChildNode(domainNode,
'cache-behaviors')
if cacheBehaviorListRootNode is not None:
cacheBehaviorListNode = util.getChildNodeList(
cacheBehaviorListRootNode, 'cache-behavior')
if cacheBehaviorListNode is not None:
cacheBehaviorList = parseCacheBehaviorList(cacheBehaviorListNode)
domain.cacheBehaviors = cacheBehaviorList
visitControlRulesListRootNode = util.getChildNode(domainNode,
'visit-control-rules')
if visitControlRulesListRootNode is not None:
visitControlRulesListNode = util.getChildNodeList(
visitControlRulesListRootNode, 'visit-control-rule')
if visitControlRulesListNode is not None:
visitControlRulesList = parseVisitControlRulesList(
visitControlRulesListNode)
domain.visitControlRules = visitControlRulesList
videoDragsNode = util.getChildNode(domainNode, 'videodrags')
if videoDragsNode is not None:
pathPattern = util.getChildNodeText(videoDragsNode, 'path-pattern')
dragMode = util.getChildNodeText(videoDragsNode, 'drag-mode')
startFlag = util.getChildNodeText(videoDragsNode, 'start-flag')
endFlag = util.getChildNodeText(videoDragsNode, 'end-flag')
videoDrags = VideoDrags(pathPattern=pathPattern,
dragMode=dragMode,
startFlag=startFlag,
endFlag=endFlag)
domain.videoDrags = videoDrags
return ProcessResult(0, 'OK', xCncRequestId=requestId, domain=domain)
def domainToXml(domain):
''' Domain 对象 转换成 xml '''
doc = dom.getDOMImplementation().createDocument('', 'domain', '')
domainNode = util.getChildNode(doc, 'domain')
util.addElement(doc, domainNode, 'version', "1.0.0")
if domain.domainName is not None:
util.addElement(doc, domainNode, 'domain-name', domain.domainName)
if domain.serviceType is not None:
util.addElement(doc, domainNode, 'service-type', domain.serviceType)
if domain.comment is not None:
util.addElement(doc, domainNode, 'comment', domain.comment)
if domain.serviceAreas is not None:
util.addElement(doc, domainNode, 'service-areas', domain.serviceAreas)
else:
util.addElement(doc, domainNode, 'service-areas', 'cn')
if domain.originConfig is not None:
originConfigNode = util.addElement(doc, domainNode, 'origin-config')
if domain.originConfig.originIps is not None:
originIps = domain.originConfig.originIps
util.addElement(doc, originConfigNode, 'origin-ips',
';'.join(originIps))
if domain.originConfig.originDomainName is not None:
util.addElement(doc, originConfigNode, 'origin-domain-name',
domain.originConfig.originDomainName)
if domain.originConfig.advOriginConfigs is not None:
advOriginConfigsNode = util.addElement(doc, originConfigNode,
'adv-origin-configs')
for advOriginConfig in domain.originConfig.advOriginConfigs:
isps = advOriginConfig.isps
advOriginConfigNode = util.addElement(
doc, advOriginConfigsNode, 'adv-origin-config')
util.addElement(doc, advOriginConfigNode, 'isp',
';'.join(isps))
util.addElement(doc, advOriginConfigNode, 'master-ips',
';'.join(advOriginConfig.masterIps))
util.addElement(doc, advOriginConfigNode, 'backup-ips',
';'.join(advOriginConfig.backupIps))
util.addElement(doc, advOriginConfigNode, 'detect-url',
advOriginConfig.detectUrl)
util.addElement(doc, advOriginConfigNode, 'detect-period',
advOriginConfig.detectPeriod)
if domain.queryStringSettings is not None:
queryStringSettingsNode = util.addElement(doc, domainNode,
'query-string-settings')
for queryStringSetting in domain.queryStringSettings:
queryStringSettingNode = util.addElement(
doc, queryStringSettingsNode, 'query-string-setting')
util.addElement(doc, queryStringSettingNode, 'path-pattern',
queryStringSetting.pathPattern)
if queryStringSetting.ignoreQueryString == False:
util.addElement(doc, queryStringSettingNode,
'ignore-query-string', "false")
else:
util.addElement(doc, queryStringSettingNode,
'ignore-query-string', "true")
if domain.cacheBehaviors is not None:
cacheBehaviorsNode = util.addElement(doc, domainNode,
'cache-behaviors')
for cacheBehavior in domain.cacheBehaviors:
cacheBehaviorNode = util.addElement(doc, cacheBehaviorsNode,
'cache-behavior')
util.addElement(doc, cacheBehaviorNode, 'path-pattern',
cacheBehavior.pathPattern)
if cacheBehavior.ignoreCacheControl == False:
util.addElement(doc, cacheBehaviorNode, 'ignore-cache-control',
"false")
else:
util.addElement(doc, cacheBehaviorNode, 'ignore-cache-control',
"true")
util.addElement(doc, cacheBehaviorNode, 'cache-ttl',
cacheBehavior.cacheTtl)
if domain.visitControlRules is not None:
visitControlRulesNode = util.addElement(doc, domainNode,
'visit-control-rules')
for visitControl in domain.visitControlRules:
visitControlNode = util.addElement(doc, visitControlRulesNode,
"visit-control-rule")
if visitControl.allowNullReferer == True:
util.addElement(doc, visitControlNode, 'allownullreferer',
"true")
elif visitControl.allowNullReferer == False:
util.addElement(doc, visitControlNode, 'allownullreferer',
"false")
util.addElement(doc, visitControlNode, 'path-pattern',
visitControl.pathPattern)
validRNode = util.addElement(doc, visitControlNode,
'valid-referers')
validReferers = visitControl.validReferers
if validReferers is not None and len(validReferers) > 0:
for referer in validReferers:
util.addElement(doc, validRNode, 'referer', referer)
invalidRNode = util.addElement(doc, visitControlNode,
'invalid-referers')
invalidReferers = visitControl.invalidReferers
if invalidReferers is not None and len(invalidReferers) > 0:
for referer in invalidReferers:
util.addElement(doc, invalidRNode, 'referer', referer)
util.addElement(doc, visitControlNode, 'forbidden-ips', ';'.join(
visitControl.forbiddenIps))
if domain.videoDrags is not None:
videoDragsNode = util.addElement(doc, domainNode, 'videodrags')
if domain.videoDrags.pathPattern is not None:
util.addElement(doc, videoDragsNode, 'path-pattern',
domain.videoDrags.pathPattern)
if domain.videoDrags.dragMode is not None:
util.addElement(doc, videoDragsNode, 'drag-mode',
domain.videoDrags.dragMode)
if domain.videoDrags.startFlag is not None:
util.addElement(doc, videoDragsNode, 'start-flag',
domain.videoDrags.startFlag)
if domain.videoDrags.endFlag is not None:
util.addElement(doc, videoDragsNode, 'end-flag',
domain.videoDrags.endFlag)
return doc.toprettyxml(indent="", newl="", encoding='utf-8')
def xmlToDomainList(ret):
''' 返回xml 转换成 带 Domain对象列表的ProcessResult对象, 在查询用户下所有频道时候使用'''
global X_CNC_REQUEST_ID, X_CNC_LOCATION
requestId = ret.getheader(X_CNC_REQUEST_ID)
xmlString = ret.read().decode("utf-8")
logging.debug("response:" + xmlString)
doc = minidom.parseString(xmlString)
domainListNode = util.getChildNode(doc, 'domain-list')
domainList = []
domainSummaryList = util.getChildNodeList(domainListNode, 'domain-summary')
for domainNode in domainSummaryList:
domainId = util.getChildNodeText(domainNode, 'domain-id')
cname = util.getChildNodeText(domainNode, 'cname')
domainName = util.getChildNodeText(domainNode, 'domain-name')
status = util.getChildNodeText(domainNode, 'status')
serviceType = util.getChildNodeText(domainNode, "service-type")
enabled = util.getChildNodeText(domainNode, 'enabled') == 'true'
cdnServiceStatus = util.getChildNodeText(
domainNode, 'cdn-service-status') == 'true'
domainSummary = DomainSummary(domainId, domainName, cname, status,
enabled, serviceType, cdnServiceStatus)
domainList.append(domainSummary)
return ProcessResult(0, 'OK',
xCncRequestId=requestId,
domainSummarys=domainList)
class DomainSummary(object):
''' 查询域名列表 返回 的列表中 单个域名的信息 '''
def __init__(self,
domainId=None,
domainName=None,
cname=None,
status=None,
enabled=None,
serviceType=None,
cdnServiceStatus=None):
'''
@param domainName: 设置域名名称
@param serviceType: 服务类型
@param domainId: 指定域名id,修改域名时使用
@param cname: 获取域名cname信息,只有将域名的dns解析cname到该地址后,流量才会导入到网宿cdn中
@param cdnServiceStatus: 域名服务状态
@param status: 查询域名部署状态
@param enabled: 查询域名是否启用
'''
self.domainId = domainId
self.domainName = domainName
self.cname = cname
self.status = status
self.enabled = enabled
self.serviceType = serviceType
self.cdnServiceStatus = cdnServiceStatus
class ProcessResult(BaseResult):
'''表示请求的返回结果'''
def __init__(self, ret, msg,
xCncRequestId=None,
domain=None,
domainSummarys=None,
location=None,
cname=None):
'''
@param ret: HTTP响应状态码
@param msg: 响应消息
@param xCncRequestId: 每一次请求,都会被分配一个唯一的id
@param domain: 查询域名 返回的域名Domain实例
@type domainSummarys: list of DomainSummary
@param domainSummarys: 查询域名列表,返回的 域名基本信息 列表
@param location: 返回新域名的url, 只有新增域名时候才有,
@param cname: 返回新域名的cname
'''
super(ProcessResult, self).__init__(ret, msg, xCncRequestId)
self.domainSummarys = domainSummarys
self.location = location
self.domain = domain
self.cname = cname
def getDomainSummarys(self):
''' 如果返回多个域名信息, 调用此方法获取'''
return self.domainSummarys
def getDomain(self):
''' 如果返回含有单个域名信息, 调用此方法获取'''
return self.domain
def getLocation(self):
''' 返回频道的location信息, 只有新增频道时候才有'''
return self.location
def getCname(self):
''' 返回频道的cname信息'''
return self.cname
def xmlToSuccess(ret):
''' 返回xml 转换成 成功返回的ProcessResult对象'''
global X_CNC_REQUEST_ID, X_CNC_LOCATION, X_CNC_CNAME
requestId = ret.getheader(X_CNC_REQUEST_ID)
location = ret.getheader(X_CNC_LOCATION)
cname = ret.getheader(X_CNC_CNAME)
msg = util.getReturnXmlMsg(ret)
return ProcessResult(ret.status, msg,
xCncRequestId=requestId,
location=location,
cname=cname)
def xmlToFailure(ret):
msg = util.getReturnXmlMsg(ret)
return ProcessResult(ret.status, ret.reason + ":" + msg)
|
{
"content_hash": "9d2dc3572e8a8b6a357c1ef660436783",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 158,
"avg_line_length": 38.80530973451327,
"alnum_prop": 0.5923440299723082,
"repo_name": "kendazheng/wizcloud2",
"id": "f7d3449f167ca05cc81e3ec175a28a66f2010169",
"size": "32797",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "wizcloud/wizresource/cdnwssdk/domainApi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "368897"
},
{
"name": "HTML",
"bytes": "779419"
},
{
"name": "JavaScript",
"bytes": "3010901"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "591646"
},
{
"name": "Shell",
"bytes": "592"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import boto3
import boto
import boto.s3
import boto.s3.key
from botocore.exceptions import ClientError
from moto import mock_cloudformation, mock_s3, mock_sqs
import json
import sure # noqa
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
import random
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"EC2Instance1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-d3adb33f",
"KeyName": "dummy",
"InstanceType": "t2.micro",
"Tags": [
{
"Key": "Description",
"Value": "Test tag"
},
{
"Key": "Name",
"Value": "Name tag for tests"
}
]
}
}
}
}
dummy_template_yaml = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-d3adb33f
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_short_form_func = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-d3adb33f
KeyName: !Join [ ":", [ du, m, my ] ]
InstanceType: t2.micro
Tags:
- Key: Description
Value: Test tag
- Key: Name
Value: Name tag for tests
"""
dummy_template_yaml_with_ref = """---
AWSTemplateFormatVersion: 2010-09-09
Description: Stack1 with yaml template
Parameters:
TagDescription:
Type: String
TagName:
Type: String
Resources:
EC2Instance1:
Type: AWS::EC2::Instance
Properties:
ImageId: ami-d3adb33f
KeyName: dummy
InstanceType: t2.micro
Tags:
- Key: Description
Value:
Ref: TagDescription
- Key: Name
Value: !Ref TagName
"""
dummy_update_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Parameters": {
"KeyName": {
"Description": "Name of an existing EC2 KeyPair",
"Type": "AWS::EC2::KeyPair::KeyName",
"ConstraintDescription": "must be the name of an existing EC2 KeyPair."
}
},
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-08111162"
}
}
}
}
dummy_output_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {
"Instance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": "ami-08111162"
}
}
},
"Outputs" : {
"StackVPC" : {
"Description" : "The ID of the VPC",
"Value" : "VPCID",
"Export" : {
"Name" : "My VPC ID"
}
}
}
}
dummy_import_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Resources": {
"Queue": {
"Type": "AWS::SQS::Queue",
"Properties": {
"QueueName": {"Fn::ImportValue": 'My VPC ID'},
"VisibilityTimeout": 60,
}
}
}
}
dummy_template_json = json.dumps(dummy_template)
dummy_update_template_json = json.dumps(dummy_template)
dummy_output_template_json = json.dumps(dummy_output_template)
dummy_import_template_json = json.dumps(dummy_import_template)
@mock_cloudformation
def test_boto3_create_stack():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal(
dummy_template)
@mock_cloudformation
def test_boto3_create_stack_with_yaml():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_yaml,
)
cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal(
dummy_template_yaml)
@mock_cloudformation
def test_boto3_create_stack_with_short_form_func_yaml():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_yaml_with_short_form_func,
)
cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal(
dummy_template_yaml_with_short_form_func)
@mock_cloudformation
def test_boto3_create_stack_with_ref_yaml():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
params = [
{'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'},
{'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'},
]
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_yaml_with_ref,
Parameters=params
)
cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal(
dummy_template_yaml_with_ref)
@mock_cloudformation
def test_creating_stacks_across_regions():
west1_cf = boto3.resource('cloudformation', region_name='us-west-1')
west2_cf = boto3.resource('cloudformation', region_name='us-west-2')
west1_cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
west2_cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
list(west1_cf.stacks.all()).should.have.length_of(1)
list(west2_cf.stacks.all()).should.have.length_of(1)
@mock_cloudformation
def test_create_stack_with_notification_arn():
cf = boto3.resource('cloudformation', region_name='us-east-1')
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
NotificationARNs=['arn:aws:sns:us-east-1:123456789012:fake-queue'],
)
stack = list(cf.stacks.all())[0]
stack.notification_arns.should.contain(
'arn:aws:sns:us-east-1:123456789012:fake-queue')
@mock_cloudformation
def test_create_stack_with_role_arn():
cf = boto3.resource('cloudformation', region_name='us-east-1')
cf.create_stack(
StackName="test_stack_with_notifications",
TemplateBody=dummy_template_json,
RoleARN='arn:aws:iam::123456789012:role/moto',
)
stack = list(cf.stacks.all())[0]
stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto')
@mock_cloudformation
@mock_s3
def test_create_stack_from_s3_url():
s3 = boto3.client('s3')
s3_conn = boto3.resource('s3')
bucket = s3_conn.create_bucket(Bucket="foobar")
key = s3_conn.Object(
'foobar', 'template-key').put(Body=dummy_template_json)
key_url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'foobar',
'Key': 'template-key'
}
)
cf_conn = boto3.client('cloudformation', region_name='us-west-1')
cf_conn.create_stack(
StackName='stack_from_url',
TemplateURL=key_url,
)
cf_conn.get_template(StackName="stack_from_url")[
'TemplateBody'].should.equal(dummy_template)
@mock_cloudformation
def test_describe_stack_pagination():
conn = boto3.client('cloudformation', region_name='us-east-1')
for i in range(100):
conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
resp = conn.describe_stacks()
stacks = resp['Stacks']
stacks.should.have.length_of(50)
next_token = resp['NextToken']
next_token.should_not.be.none
resp2 = conn.describe_stacks(NextToken=next_token)
stacks.extend(resp2['Stacks'])
stacks.should.have.length_of(100)
assert 'NextToken' not in resp2.keys()
@mock_cloudformation
def test_describe_stack_resources():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0]
response = cf_conn.describe_stack_resources(StackName=stack['StackName'])
resource = response['StackResources'][0]
resource['LogicalResourceId'].should.equal('EC2Instance1')
resource['ResourceStatus'].should.equal('CREATE_COMPLETE')
resource['ResourceType'].should.equal('AWS::EC2::Instance')
resource['StackId'].should.equal(stack['StackId'])
@mock_cloudformation
def test_describe_stack_by_name():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0]
stack['StackName'].should.equal('test_stack')
@mock_cloudformation
def test_describe_stack_by_stack_id():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0]
stack_by_id = cf_conn.describe_stacks(StackName=stack['StackId'])['Stacks'][
0]
stack_by_id['StackId'].should.equal(stack['StackId'])
stack_by_id['StackName'].should.equal("test_stack")
@mock_cloudformation
def test_list_stacks():
cf = boto3.resource('cloudformation', region_name='us-east-1')
cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
cf.create_stack(
StackName="test_stack2",
TemplateBody=dummy_template_json,
)
stacks = list(cf.stacks.all())
stacks.should.have.length_of(2)
stack_names = [stack.stack_name for stack in stacks]
stack_names.should.contain("test_stack")
stack_names.should.contain("test_stack2")
@mock_cloudformation
def test_delete_stack_from_resource():
cf = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
list(cf.stacks.all()).should.have.length_of(1)
stack.delete()
list(cf.stacks.all()).should.have.length_of(0)
@mock_cloudformation
def test_delete_stack_by_name():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
cf_conn.describe_stacks()['Stacks'].should.have.length_of(1)
cf_conn.delete_stack(StackName="test_stack")
cf_conn.describe_stacks()['Stacks'].should.have.length_of(0)
@mock_cloudformation
def test_describe_deleted_stack():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0]
stack_id = stack['StackId']
cf_conn.delete_stack(StackName=stack['StackId'])
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0]
stack_by_id['StackId'].should.equal(stack['StackId'])
stack_by_id['StackName'].should.equal("test_stack")
stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE")
@mock_cloudformation
def test_describe_updated_stack():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
cf_conn.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
Tags=[{'Key': 'foo', 'Value': 'bar'}],
)
cf_conn.update_stack(
StackName="test_stack",
RoleARN='arn:aws:iam::123456789012:role/moto',
TemplateBody=dummy_update_template_json,
Tags=[{'Key': 'foo', 'Value': 'baz'}],
)
stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0]
stack_id = stack['StackId']
stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0]
stack_by_id['StackId'].should.equal(stack['StackId'])
stack_by_id['StackName'].should.equal("test_stack")
stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE")
stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto')
stack_by_id['Tags'].should.equal([{'Key': 'foo', 'Value': 'baz'}])
@mock_cloudformation
def test_bad_describe_stack():
cf_conn = boto3.client('cloudformation', region_name='us-east-1')
with assert_raises(ClientError):
cf_conn.describe_stacks(StackName="non_existent_stack")
@mock_cloudformation()
def test_cloudformation_params():
dummy_template_with_params = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
"Parameters": {
"APPNAME": {
"Default": "app-name",
"Description": "The name of the app",
"Type": "String"
}
}
}
dummy_template_with_params_json = json.dumps(dummy_template_with_params)
cf = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName='test_stack',
TemplateBody=dummy_template_with_params_json,
Parameters=[{
"ParameterKey": "APPNAME",
"ParameterValue": "testing123",
}],
)
stack.parameters.should.have.length_of(1)
param = stack.parameters[0]
param['ParameterKey'].should.equal('APPNAME')
param['ParameterValue'].should.equal('testing123')
@mock_cloudformation
def test_stack_tags():
tags = [
{
"Key": "foo",
"Value": "bar"
},
{
"Key": "baz",
"Value": "bleh"
}
]
cf = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
Tags=tags,
)
observed_tag_items = set(
item for items in [tag.items() for tag in stack.tags] for item in items)
expected_tag_items = set(
item for items in [tag.items() for tag in tags] for item in items)
observed_tag_items.should.equal(expected_tag_items)
@mock_cloudformation
def test_stack_events():
cf = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_template_json,
)
stack.update(TemplateBody=dummy_update_template_json)
stack = cf.Stack(stack.stack_id)
stack.delete()
# assert begins and ends with stack events
events = list(stack.events.all())
events[0].resource_type.should.equal("AWS::CloudFormation::Stack")
events[-1].resource_type.should.equal("AWS::CloudFormation::Stack")
# testing ordering of stack events without assuming resource events will not exist
# the AWS API returns events in reverse chronological order
stack_events_to_look_for = iter([
("DELETE_COMPLETE", None),
("DELETE_IN_PROGRESS", "User Initiated"),
("UPDATE_COMPLETE", None),
("UPDATE_IN_PROGRESS", "User Initiated"),
("CREATE_COMPLETE", None),
("CREATE_IN_PROGRESS", "User Initiated"),
])
try:
for event in events:
event.stack_id.should.equal(stack.stack_id)
event.stack_name.should.equal("test_stack")
event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
if event.resource_type == "AWS::CloudFormation::Stack":
event.logical_resource_id.should.equal("test_stack")
event.physical_resource_id.should.equal(stack.stack_id)
status_to_look_for, reason_to_look_for = next(
stack_events_to_look_for)
event.resource_status.should.equal(status_to_look_for)
if reason_to_look_for is not None:
event.resource_status_reason.should.equal(
reason_to_look_for)
except StopIteration:
assert False, "Too many stack events"
list(stack_events_to_look_for).should.be.empty
@mock_cloudformation
def test_list_exports():
cf_client = boto3.client('cloudformation', region_name='us-east-1')
cf_resource = boto3.resource('cloudformation', region_name='us-east-1')
stack = cf_resource.create_stack(
StackName="test_stack",
TemplateBody=dummy_output_template_json,
)
output_value = 'VPCID'
exports = cf_client.list_exports()['Exports']
stack.outputs.should.have.length_of(1)
stack.outputs[0]['OutputValue'].should.equal(output_value)
exports.should.have.length_of(1)
exports[0]['ExportingStackId'].should.equal(stack.stack_id)
exports[0]['Name'].should.equal('My VPC ID')
exports[0]['Value'].should.equal(output_value)
@mock_cloudformation
def test_list_exports_with_token():
cf = boto3.client('cloudformation', region_name='us-east-1')
for i in range(101):
# Add index to ensure name is unique
dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i)
cf.create_stack(
StackName="test_stack",
TemplateBody=json.dumps(dummy_output_template),
)
exports = cf.list_exports()
exports['Exports'].should.have.length_of(100)
exports.get('NextToken').should_not.be.none
more_exports = cf.list_exports(NextToken=exports['NextToken'])
more_exports['Exports'].should.have.length_of(1)
more_exports.get('NextToken').should.be.none
@mock_cloudformation
def test_delete_stack_with_export():
cf = boto3.client('cloudformation', region_name='us-east-1')
stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_output_template_json,
)
stack_id = stack['StackId']
exports = cf.list_exports()['Exports']
exports.should.have.length_of(1)
cf.delete_stack(StackName=stack_id)
cf.list_exports()['Exports'].should.have.length_of(0)
@mock_cloudformation
def test_export_names_must_be_unique():
cf = boto3.resource('cloudformation', region_name='us-east-1')
first_stack = cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_output_template_json,
)
with assert_raises(ClientError):
cf.create_stack(
StackName="test_stack",
TemplateBody=dummy_output_template_json,
)
@mock_sqs
@mock_cloudformation
def test_stack_with_imports():
cf = boto3.resource('cloudformation', region_name='us-east-1')
ec2_resource = boto3.resource('sqs', region_name='us-east-1')
output_stack = cf.create_stack(
StackName="test_stack1",
TemplateBody=dummy_output_template_json,
)
import_stack = cf.create_stack(
StackName="test_stack2",
TemplateBody=dummy_import_template_json
)
output_stack.outputs.should.have.length_of(1)
output = output_stack.outputs[0]['OutputValue']
queue = ec2_resource.get_queue_by_name(QueueName=output)
queue.should_not.be.none
|
{
"content_hash": "5b94797ccbfa723c006b464521f507e4",
"timestamp": "",
"source": "github",
"line_count": 638,
"max_line_length": 86,
"avg_line_length": 30.684952978056426,
"alnum_prop": 0.6243040302395668,
"repo_name": "kefo/moto",
"id": "ed2ee833738554d3e6427e1e2a70778bdcea30e8",
"size": "19577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "Python",
"bytes": "2996908"
},
{
"name": "Ruby",
"bytes": "188"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.core.validators import RegexValidator
from django.utils.functional import lazy
# Constants
REGION_CHOICE = (
('South','South'),
('North','North'),
)
class NorthPlaceChoice(models.Model):
place = models.CharField(max_length=30)
def __unicode__(self):
return self.place
class SouthPlaceChoice(models.Model):
place = models.CharField(max_length=30)
def __unicode__(self):
return self.place
class User(models.Model):
"""
User model for employees.
"""
name = models.CharField(verbose_name='Full Name',max_length=150,blank=False,null=False)
signum = models.CharField(max_length=10, blank=False, null=False)
email = models.EmailField(verbose_name='Email (Ericsson mail address)',max_length=150,blank=False,null=False, unique=True)
phone_regex = RegexValidator(regex=r'^\d{10}$', message="Phone number must be entered in the format: '9012345678'. Up to 10 digits allowed.")
phone_number = models.CharField(max_length=10, validators=[phone_regex], blank=False, null=False) # validators should be a list
region = models.CharField(max_length=10,blank=False, null=False, choices=REGION_CHOICE)
northPickUpPlace = models.ForeignKey(NorthPlaceChoice,verbose_name='PickUpPlace',related_name='PickUpPlace')
southPickUpPlace = models.ForeignKey(SouthPlaceChoice, verbose_name='PickUpPlace')
comments = models.TextField(verbose_name='Comments (Optional)',blank=True, null=True)
def __unicode__(self):
return self.name
|
{
"content_hash": "086bac63d598fe6825cae20ca841b11e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 142,
"avg_line_length": 38.76315789473684,
"alnum_prop": 0.7576374745417516,
"repo_name": "triump0870/outing",
"id": "a73e7aebd9a28cd8d0805ffff126c360999c4490",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aug/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7832"
},
{
"name": "JavaScript",
"bytes": "39989"
},
{
"name": "Python",
"bytes": "7528"
}
],
"symlink_target": ""
}
|
from flask import json
from jinja2 import escape
from wtforms.widgets import HTMLString, html_params
from flask_admin._compat import as_unicode
from flask_admin.babel import gettext
from flask_admin.helpers import get_url
from flask_admin.form import RenderTemplateWidget
class InlineFieldListWidget(RenderTemplateWidget):
def __init__(self):
super(InlineFieldListWidget, self).__init__('admin/model/inline_field_list.html')
class InlineFormWidget(RenderTemplateWidget):
def __init__(self):
super(InlineFormWidget, self).__init__('admin/model/inline_form.html')
def __call__(self, field, **kwargs):
kwargs.setdefault('form_opts', getattr(field, 'form_opts', None))
return super(InlineFormWidget, self).__call__(field, **kwargs)
class AjaxSelect2Widget(object):
def __init__(self, multiple=False):
self.multiple = multiple
def __call__(self, field, **kwargs):
kwargs.setdefault('data-role', 'select2-ajax')
kwargs.setdefault('data-url', get_url('.ajax_lookup', name=field.loader.name))
allow_blank = getattr(field, 'allow_blank', False)
if allow_blank and not self.multiple:
kwargs['data-allow-blank'] = u'1'
kwargs.setdefault('id', field.id)
kwargs.setdefault('type', 'hidden')
if self.multiple:
result = []
ids = []
for value in field.data:
data = field.loader.format(value)
result.append(data)
ids.append(as_unicode(data[0]))
separator = getattr(field, 'separator', ',')
kwargs['value'] = separator.join(ids)
kwargs['data-json'] = json.dumps(result)
kwargs['data-multiple'] = u'1'
else:
data = field.loader.format(field.data)
if data:
kwargs['value'] = data[0]
kwargs['data-json'] = json.dumps(data)
placeholder = gettext(field.loader.options.get('placeholder', 'Please select model'))
kwargs.setdefault('data-placeholder', placeholder)
return HTMLString('<input %s>' % html_params(name=field.name, **kwargs))
class XEditableWidget(object):
"""
WTForms widget that provides in-line editing for the list view.
Determines how to display the x-editable/ajax form based on the
field inside of the FieldList (StringField, IntegerField, etc).
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('data-value', kwargs.pop('value', ''))
kwargs.setdefault('data-role', 'x-editable')
kwargs.setdefault('data-url', './ajax/update/')
kwargs.setdefault('id', field.id)
kwargs.setdefault('name', field.name)
kwargs.setdefault('href', '#')
if not kwargs.get('pk'):
raise Exception('pk required')
kwargs['data-pk'] = str(kwargs.pop("pk"))
kwargs['data-csrf'] = kwargs.pop("csrf", "")
# subfield is the first entry (subfield) from FieldList (field)
subfield = field.entries[0]
kwargs = self.get_kwargs(subfield, kwargs)
return HTMLString(
'<a %s>%s</a>' % (html_params(**kwargs),
escape(kwargs['data-value']))
)
def get_kwargs(self, subfield, kwargs):
"""
Return extra kwargs based on the subfield type.
"""
if subfield.type == 'StringField':
kwargs['data-type'] = 'text'
elif subfield.type == 'TextAreaField':
kwargs['data-type'] = 'textarea'
kwargs['data-rows'] = '5'
elif subfield.type == 'BooleanField':
kwargs['data-type'] = 'select'
# data-source = dropdown options
kwargs['data-source'] = {'': 'False', '1': 'True'}
kwargs['data-role'] = 'x-editable-boolean'
elif subfield.type == 'Select2Field':
kwargs['data-type'] = 'select'
kwargs['data-source'] = dict(subfield.choices)
elif subfield.type == 'DateField':
kwargs['data-type'] = 'combodate'
kwargs['data-format'] = 'YYYY-MM-DD'
kwargs['data-template'] = 'YYYY-MM-DD'
elif subfield.type == 'DateTimeField':
kwargs['data-type'] = 'combodate'
kwargs['data-format'] = 'YYYY-MM-DD HH:mm:ss'
kwargs['data-template'] = 'YYYY-MM-DD HH:mm:ss'
# x-editable-combodate uses 1 minute increments
kwargs['data-role'] = 'x-editable-combodate'
elif subfield.type == 'TimeField':
kwargs['data-type'] = 'combodate'
kwargs['data-format'] = 'HH:mm:ss'
kwargs['data-template'] = 'HH:mm:ss'
kwargs['data-role'] = 'x-editable-combodate'
elif subfield.type == 'IntegerField':
kwargs['data-type'] = 'number'
elif subfield.type in ['FloatField', 'DecimalField']:
kwargs['data-type'] = 'number'
kwargs['data-step'] = 'any'
elif subfield.type in ['QuerySelectField', 'ModelSelectField']:
kwargs['data-type'] = 'select'
choices = {}
for choice in subfield:
try:
choices[str(choice._value())] = str(choice.label.text)
except TypeError:
choices[str(choice._value())] = ""
kwargs['data-source'] = choices
else:
raise Exception('Unsupported field type: %s' % (type(subfield),))
# for Select2, QuerySelectField, and ModelSelectField
if getattr(subfield, 'allow_blank', False):
kwargs['data-source']['__None'] = ""
return kwargs
|
{
"content_hash": "bec1075308e60ecc56ab72f17ac44fff",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 93,
"avg_line_length": 37.032467532467535,
"alnum_prop": 0.5732070839908819,
"repo_name": "janusnic/flask-admin",
"id": "e2cd1544bcf9452db72f03ffa1e8c2220b75a675",
"size": "5703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_admin/model/widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "120"
},
{
"name": "HTML",
"bytes": "94717"
},
{
"name": "JavaScript",
"bytes": "30692"
},
{
"name": "Makefile",
"bytes": "5587"
},
{
"name": "Python",
"bytes": "593614"
},
{
"name": "Shell",
"bytes": "1316"
}
],
"symlink_target": ""
}
|
"""
githubimporter
~~~~~~~~~~~~~~
Imports code directly from github.
:copyright: (c) Copyright 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import imp
import urllib
import urlparse
class GithubImporter(object):
url_template = 'https://raw.github.com/%(user)s/%(project)s/master/%(file)s'
def __init__(self, path):
url = urlparse.urlparse(path)
if url.scheme != 'github':
raise ImportError()
self.user = url.netloc
self.project = url.path.strip('/')
if '/' in self.project:
self.project, self.path = self.project.split('/', 1)
else:
self.path = ''
self._cache = {}
def get_source_and_filename(self, name):
rv = self._cache.get(name)
if rv is not None:
return rv
url_name = name.replace('.', '/')
for filename in url_name + '.py', url_name + '/__init__.py':
try:
url = self.url_template % dict(
user=self.user,
project=self.project,
file=urlparse.urljoin(self.path, filename)
)
resp = urllib.urlopen(url)
if resp.code == 404:
continue
rv = resp.read(), 'github://%s/%s' % (
self.user,
filename
)
self._cache[name] = rv
return rv
except IOError:
continue
raise ImportError(name)
def get_source(self, name):
return self.get_source_and_filename(name)[0]
def get_filename(self, name):
return self.get_source_and_filename(name)[1]
def find_module(self, name, path=None):
try:
self.get_source_and_filename(name)
except ImportError:
return None
return self
def load_module(self, name):
source, filename = self.get_source_and_filename(name)
sys.modules[name] = mod = imp.new_module(name)
mod.__loader__ = self
mod.__file__ = filename
if filename.endswith('/__init__.py'):
mod.__path__ = [filename.rsplit('/', 1)[0]]
exec source in mod.__dict__
return mod
def install_hook():
sys.path_hooks.append(GithubImporter)
if __name__ == '__main__':
install_hook()
sys.path.append('github://mitsuhiko/markupsafe')
import markupsafe
print markupsafe.__file__
print markupsafe.Markup.escape('<foo>')
|
{
"content_hash": "915e3eb92946dbb6d944bdb3f3411f3e",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 80,
"avg_line_length": 28.533333333333335,
"alnum_prop": 0.5253115264797508,
"repo_name": "mitsuhiko/badideas",
"id": "c3ecdb33f216677d978f73648996b2a2637e6560",
"size": "2592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "githubimporter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13705"
}
],
"symlink_target": ""
}
|
from typing import Dict, List
from joueur.base_game import BaseGame
# import game objects
from games.stardash.body import Body
from games.stardash.game_object import GameObject
from games.stardash.job import Job
from games.stardash.player import Player
from games.stardash.projectile import Projectile
from games.stardash.unit import Unit
# <<-- Creer-Merge: imports -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# you can add additional import(s) here
# <<-- /Creer-Merge: imports -->>
class Game(BaseGame):
"""The class representing the Game in the Stardash game.
Collect of the most of the rarest mineral orbiting around the sun and out-compete your competitor.
"""
def __init__(self):
"""Initializes a Game with basic logic as provided by the Creer code generator.
"""
BaseGame.__init__(self)
# private attributes to hold the properties so they appear read only
self._bodies = []
self._current_player = None
self._current_turn = 0
self._dash_cost = 0
self._dash_distance = 0
self._game_objects = {}
self._genarium_value = 0
self._jobs = []
self._legendarium_value = 0
self._max_asteroid = 0
self._max_turns = 100
self._min_asteroid = 0
self._mining_speed = 0
self._mythicite_amount = 0
self._orbits_protected = 0
self._ore_rarity_genarium = 0
self._ore_rarity_legendarium = 0
self._ore_rarity_rarium = 0
self._planet_energy_cap = 0
self._planet_recharge_rate = 0
self._players = []
self._projectile_radius = 0
self._projectile_speed = 0
self._projectiles = []
self._rarium_value = 0
self._regenerate_rate = 0
self._session = ""
self._ship_radius = 0
self._size_x = 0
self._size_y = 0
self._time_added_per_turn = 0
self._turns_to_orbit = 0
self._units = []
self.name = "Stardash"
self._game_object_classes = {
'Body': Body,
'GameObject': GameObject,
'Job': Job,
'Player': Player,
'Projectile': Projectile,
'Unit': Unit
}
@property
def bodies(self) -> List['games.stardash.body.Body']:
"""list[games.stardash.body.Body]: All the celestial bodies in the game. The first two are planets and the third is the sun. The fourth is the VP asteroid. Everything else is normal asteroids.
"""
return self._bodies
@property
def current_player(self) -> 'games.stardash.player.Player':
"""games.stardash.player.Player: The player whose turn it is currently. That player can send commands. Other players cannot.
"""
return self._current_player
@property
def current_turn(self) -> int:
"""int: The current turn number, starting at 0 for the first player's turn.
"""
return self._current_turn
@property
def dash_cost(self) -> int:
"""int: The cost of dashing.
"""
return self._dash_cost
@property
def dash_distance(self) -> int:
"""int: The distance traveled each turn by dashing.
"""
return self._dash_distance
@property
def game_objects(self) -> Dict[str, 'games.stardash.game_object.GameObject']:
"""dict[str, games.stardash.game_object.GameObject]: A mapping of every game object's ID to the actual game object. Primarily used by the server and client to easily refer to the game objects via ID.
"""
return self._game_objects
@property
def genarium_value(self) -> float:
"""float: The value of every unit of genarium.
"""
return self._genarium_value
@property
def jobs(self) -> List['games.stardash.job.Job']:
"""list[games.stardash.job.Job]: A list of all jobs. The first element is corvette, second is missileboat, third is martyr, fourth is transport, and fifth is miner.
"""
return self._jobs
@property
def legendarium_value(self) -> float:
"""float: The value of every unit of legendarium.
"""
return self._legendarium_value
@property
def max_asteroid(self) -> int:
"""int: The highest amount of material, that can be in a asteroid.
"""
return self._max_asteroid
@property
def max_turns(self) -> int:
"""int: The maximum number of turns before the game will automatically end.
"""
return self._max_turns
@property
def min_asteroid(self) -> int:
"""int: The smallest amount of material, that can be in a asteroid.
"""
return self._min_asteroid
@property
def mining_speed(self) -> int:
"""int: The rate at which miners grab minerals from asteroids.
"""
return self._mining_speed
@property
def mythicite_amount(self) -> float:
"""float: The amount of mythicite that spawns at the start of the game.
"""
return self._mythicite_amount
@property
def orbits_protected(self) -> int:
"""int: The number of orbit updates you cannot mine the mithicite asteroid.
"""
return self._orbits_protected
@property
def ore_rarity_genarium(self) -> float:
"""float: The rarity modifier of the most common ore. This controls how much spawns.
"""
return self._ore_rarity_genarium
@property
def ore_rarity_legendarium(self) -> float:
"""float: The rarity modifier of the rarest ore. This controls how much spawns.
"""
return self._ore_rarity_legendarium
@property
def ore_rarity_rarium(self) -> float:
"""float: The rarity modifier of the second rarest ore. This controls how much spawns.
"""
return self._ore_rarity_rarium
@property
def planet_energy_cap(self) -> int:
"""int: The amount of energy a planet can hold at once.
"""
return self._planet_energy_cap
@property
def planet_recharge_rate(self) -> int:
"""int: The amount of energy the planets restore each round.
"""
return self._planet_recharge_rate
@property
def players(self) -> List['games.stardash.player.Player']:
"""list[games.stardash.player.Player]: List of all the players in the game.
"""
return self._players
@property
def projectile_radius(self) -> int:
"""int: The standard size of ships.
"""
return self._projectile_radius
@property
def projectile_speed(self) -> int:
"""int: The amount of distance missiles travel through space.
"""
return self._projectile_speed
@property
def projectiles(self) -> List['games.stardash.projectile.Projectile']:
"""list[games.stardash.projectile.Projectile]: Every projectile in the game.
"""
return self._projectiles
@property
def rarium_value(self) -> float:
"""float: The value of every unit of rarium.
"""
return self._rarium_value
@property
def regenerate_rate(self) -> float:
"""float: The regeneration rate of asteroids.
"""
return self._regenerate_rate
@property
def session(self) -> str:
"""str: A unique identifier for the game instance that is being played.
"""
return self._session
@property
def ship_radius(self) -> int:
"""int: The standard size of ships.
"""
return self._ship_radius
@property
def size_x(self) -> int:
"""int: The size of the map in the X direction.
"""
return self._size_x
@property
def size_y(self) -> int:
"""int: The size of the map in the Y direction.
"""
return self._size_y
@property
def time_added_per_turn(self) -> int:
"""int: The amount of time (in nano-seconds) added after each player performs a turn.
"""
return self._time_added_per_turn
@property
def turns_to_orbit(self) -> int:
"""int: The number of turns it takes for a asteroid to orbit the sun. (Asteroids move after each players turn).
"""
return self._turns_to_orbit
@property
def units(self) -> List['games.stardash.unit.Unit']:
"""list[games.stardash.unit.Unit]: Every Unit in the game.
"""
return self._units
# <<-- Creer-Merge: functions -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.
# if you want to add any client side logic (such as state checking functions) this is where you can add them
# <<-- /Creer-Merge: functions -->>
|
{
"content_hash": "a2d567d322c68756630d240652b325ec",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 207,
"avg_line_length": 32.395604395604394,
"alnum_prop": 0.6073043871551335,
"repo_name": "JacobFischer/Joueur.py",
"id": "11c7ef892fa4325bb4467c75406fd52241f82e43",
"size": "9148",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "games/stardash/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "80"
},
{
"name": "Python",
"bytes": "91770"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
}
|
"""
.. codeauthor:: Cédric Dumay <cedric.dumay@gmail.com>
"""
from cdumay_result import Result
from kafka import KafkaProducer
from kser.controller import BaseProducer
class Producer(BaseProducer):
"""Mother class for producers"""
def __init__(self, config):
""" Create new Producer instance using provided configuration dict.
:param dict config: configuration
"""
self.client = KafkaProducer(**config)
def bulk_send(self, topic, kmsgs, timeout=60):
""" Send a batch of messages
:param str topic: a kafka topic
:param ksr.transport.Message kmsgs: Messages to serialize
:param int timeout: Timeout in seconds
:return: Execution result
:rtype: kser.result.Result
"""
try:
for kmsg in kmsgs:
self.client.send(
topic, self._onmessage(kmsg).dumps().encode("UTF-8")
)
self.client.flush(timeout=timeout)
return Result(stdout="{} message(s) sent".format(len(kmsgs)))
except Exception as exc:
return Result.from_exception(exc)
# noinspection PyUnusedLocal
def _send(self, topic, kmsg, timeout=60):
""" Send the message into the given topic
:param str topic: a kafka topic
:param ksr.transport.Message kmsg: Message to serialize
:param int timeout: Timeout in seconds (not used in proto producer)
:return: Execution result
:rtype: kser.result.Result
"""
result = Result(uuid=kmsg.uuid)
try:
self.client.send(
topic, self._onmessage(kmsg).dumps().encode("UTF-8")
)
result.stdout = "Message {}[{}] sent in {}".format(
kmsg.entrypoint, kmsg.uuid, topic
)
self.client.flush()
except Exception as exc:
result = Result.from_exception(exc, kmsg.uuid)
finally:
if result.retcode < 300:
return self._onsuccess(kmsg=kmsg, result=result)
else:
return self._onerror(kmsg=kmsg, result=result)
|
{
"content_hash": "a22535ad320b7662118f344f459691be",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 75,
"avg_line_length": 30.814285714285713,
"alnum_prop": 0.5836810384793695,
"repo_name": "cdumay/kser",
"id": "b0926161d22566198158bc108c69a7d2b287e8b3",
"size": "2205",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/kser/python_kafka/producer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48357"
}
],
"symlink_target": ""
}
|
from . import base
class Section(base.SectionBase):
def gc(self, *, return_result=True, **kwargs):
"""Removes stored objects that are not pinned from the repo.
.. code-block:: python
>>> client.repo.gc()
[{'Key': 'QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQuwaHG2mpW2'},
{'Key': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'},
{'Key': 'QmRVBnxUCsD57ic5FksKYadtyUbMsyo9KYQKKELajqAp4q'},
…
{'Key': 'QmYp4TeCurXrhsxnzt5wqLqqUz8ZRg5zsc7GuUrUSDtwzP'}]
Performs a garbage collection sweep of the local set of
stored objects and remove ones that are not pinned in order
to reclaim hard disk space. Returns the hashes of all collected
objects.
Parameters
----------
return_result : bool
Passing False will return None and avoid downloading
the list of removed objects.
Returns
-------
dict
List of IPFS objects that have been removed
"""
kwargs["return_result"] = return_result
if "use_http_head_for_no_result" not in self._client.workarounds:
# go-ipfs 0.4.22- does not support the quiet option yet
kwargs.setdefault("opts", {})["quiet"] = not return_result
return self._client.request('/repo/gc', decoder='json', **kwargs)
@base.returns_single_item
def stat(self, **kwargs):
"""Displays the repo's status.
Returns the number of objects in the repo and the repo's size,
version, and path.
.. code-block:: python
>>> client.repo.stat()
{'NumObjects': 354,
'RepoPath': '…/.local/share/ipfs',
'Version': 'fs-repo@4',
'RepoSize': 13789310}
Returns
-------
dict
General information about the IPFS file repository
+------------+-------------------------------------------------+
| NumObjects | Number of objects in the local repo. |
+------------+-------------------------------------------------+
| RepoPath | The path to the repo being currently used. |
+------------+-------------------------------------------------+
| RepoSize | Size in bytes that the repo is currently using. |
+------------+-------------------------------------------------+
| Version | The repo version. |
+------------+-------------------------------------------------+
"""
return self._client.request('/repo/stat', decoder='json', **kwargs)
#TODO: `version()`
|
{
"content_hash": "ed3de5da8d4a003380f62b2a0dbcff21",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 69,
"avg_line_length": 31.84931506849315,
"alnum_prop": 0.5690322580645162,
"repo_name": "alexander255/py-ipfs-api",
"id": "65f4a8391dbdfdbdd3a03bf6a400bfd00b00714d",
"size": "2329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ipfshttpclient/client/repo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "222072"
},
{
"name": "Shell",
"bytes": "452"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import TestCase
from testfixtures import LogCapture
import frame_logging.log as log
class Order(object):
def __init__(self):
self.customer = {'id': 1}
class FrameLoggingTest(TestCase):
def test_default_info(self):
with LogCapture() as l:
log.info('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'INFO', 'Renewed contract test'))
def test_info_with_task(self):
class Task(object):
def __init__(self):
self.id = 1
with LogCapture() as l:
log.info('Renewed contract %s', 'test', task=Task())
l.check(('tests.test_frame_logging', 'INFO', 'Renewed contract test - task_id=1'))
def test_default_debug(self):
with LogCapture() as l:
log.debug('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'DEBUG', 'Renewed contract test'))
def test_default_critical(self):
with LogCapture() as l:
log.critical('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'CRITICAL', 'Renewed contract test'))
def test_default_warning(self):
with LogCapture() as l:
log.warning('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'WARNING', 'Renewed contract test'))
def test_default_error(self):
with LogCapture() as l:
log.error('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'ERROR', 'Renewed contract test'))
def test_default_exception(self):
with LogCapture() as l:
try:
raise ValueError("This is a test")
except ValueError:
log.exception('Renewed contract %s', 'test')
l.check(('tests.test_frame_logging', 'ERROR', 'Renewed contract test'))
def test_custom_format(self):
class TestObj(object):
def __init__(self):
self.t_attr = 't_val'
def __str__(self):
return self.t_attr
with LogCapture() as l:
test_obj = TestObj()
test_obj.tn_attr = 'tn_val'
log.error('Renewed contract test', obj=test_obj)
l.check(('tests.test_frame_logging',
'ERROR', 'Renewed contract test - Impacted object is TestObj(t_val):'
' tn_attr=tn_val, t_attr=t_val'))
def test_transform(self):
order = Order()
with LogCapture() as l:
#import pdb; pdb.set_trace()
log.info('Renewed contract %s', 'test', order=order)
l.check(('tests.test_frame_logging', 'INFO', 'Renewed contract test - customer_id=1'))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7139faf48cab4afb9aea8a519c529b06",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 98,
"avg_line_length": 35.45,
"alnum_prop": 0.561706629055007,
"repo_name": "ItsfBisounours/frame_logging",
"id": "a9ad3362171fc8ffa4165e3ea4a8612f7111906f",
"size": "2860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_frame_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "14838"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
# We're adding a Meta option of 'mario' to the defaults so we can set it for these models
# then the MarioRouter can point at the separate database for these specific models
import django.db.models.options as options
options.DEFAULT_NAMES = options.DEFAULT_NAMES + ('mario',)
class Colors(models.Model):
colorid = models.IntegerField(db_column='colorId', primary_key=True, blank=True) # Field name made lowercase.
# profileid = models.IntegerField(db_column='profileId', blank=True, null=True) # Field name made lowercase.
profileid = models.ForeignKey('Profiles', db_column='profileId', blank=True, null=True)
idx = models.IntegerField(blank=True, null=True)
r = models.FloatField(blank=True, null=True)
g = models.FloatField(blank=True, null=True)
b = models.FloatField(blank=True, null=True)
class Meta:
mario = True
managed = False
db_table = 'colors'
class Features(models.Model):
# profileid = models.IntegerField(db_column='profileId', primary_key=True, blank=True) # Field name made lowercase.
profileid = models.ForeignKey('Profiles', db_column='profileId', primary_key=True, blank=True) # Field name made lowercase.
grad1_energy = models.FloatField(blank=True, null=True)
grad1_entropy = models.FloatField(blank=True, null=True)
grad1_correlation = models.FloatField(blank=True, null=True)
grad1_inversedifferencemoment = models.FloatField(db_column='grad1_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
grad1_inertia = models.FloatField(blank=True, null=True)
grad1_clustershade = models.FloatField(db_column='grad1_clusterShade', blank=True, null=True) # Field name made lowercase.
grad1_clusterprominence = models.FloatField(db_column='grad1_clusterProminence', blank=True, null=True) # Field name made lowercase.
grad2_energy = models.FloatField(blank=True, null=True)
grad2_entropy = models.FloatField(blank=True, null=True)
grad2_correlation = models.FloatField(blank=True, null=True)
grad2_inversedifferencemoment = models.FloatField(db_column='grad2_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
grad2_inertia = models.FloatField(blank=True, null=True)
grad2_clustershade = models.FloatField(db_column='grad2_clusterShade', blank=True, null=True) # Field name made lowercase.
grad2_clusterprominence = models.FloatField(db_column='grad2_clusterProminence', blank=True, null=True) # Field name made lowercase.
energy = models.FloatField(blank=True, null=True)
entropy = models.FloatField(blank=True, null=True)
correlation = models.FloatField(blank=True, null=True)
inversedifferencemoment = models.FloatField(db_column='inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
inertia = models.FloatField(blank=True, null=True)
clustershade = models.FloatField(db_column='clusterShade', blank=True, null=True) # Field name made lowercase.
clusterprominence = models.FloatField(db_column='clusterProminence', blank=True, null=True) # Field name made lowercase.
laplace_energy = models.FloatField(blank=True, null=True)
laplace_entropy = models.FloatField(blank=True, null=True)
laplace_correlation = models.FloatField(blank=True, null=True)
laplace_inversedifferencemoment = models.FloatField(db_column='laplace_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
laplace_inertia = models.FloatField(blank=True, null=True)
laplace_clustershade = models.FloatField(db_column='laplace_clusterShade', blank=True, null=True) # Field name made lowercase.
laplace_clusterprominence = models.FloatField(db_column='laplace_clusterProminence', blank=True, null=True) # Field name made lowercase.
segment_energy = models.FloatField(blank=True, null=True)
segment_entropy = models.FloatField(blank=True, null=True)
segment_correlation = models.FloatField(blank=True, null=True)
segment_inversedifferencemoment = models.FloatField(db_column='segment_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
segment_inertia = models.FloatField(blank=True, null=True)
segment_clustershade = models.FloatField(db_column='segment_clusterShade', blank=True, null=True) # Field name made lowercase.
segment_clusterprominence = models.FloatField(db_column='segment_clusterProminence', blank=True, null=True) # Field name made lowercase.
segmentdistances_energy = models.FloatField(db_column='segmentDistances_energy', blank=True, null=True) # Field name made lowercase.
segmentdistances_entropy = models.FloatField(db_column='segmentDistances_entropy', blank=True, null=True) # Field name made lowercase.
segmentdistances_correlation = models.FloatField(db_column='segmentDistances_correlation', blank=True, null=True) # Field name made lowercase.
segmentdistances_inversedifferencemoment = models.FloatField(db_column='segmentDistances_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
segmentdistances_inertia = models.FloatField(db_column='segmentDistances_inertia', blank=True, null=True) # Field name made lowercase.
segmentdistances_clustershade = models.FloatField(db_column='segmentDistances_clusterShade', blank=True, null=True) # Field name made lowercase.
segmentdistances_clusterprominence = models.FloatField(db_column='segmentDistances_clusterProminence', blank=True, null=True) # Field name made lowercase.
threshold_y_q0 = models.FloatField(db_column='threshold_Y_Q0', blank=True, null=True) # Field name made lowercase.
threshold_u_q0 = models.FloatField(db_column='threshold_U_Q0', blank=True, null=True) # Field name made lowercase.
threshold_v_q0 = models.FloatField(db_column='threshold_V_Q0', blank=True, null=True) # Field name made lowercase.
threshold_y_q1 = models.FloatField(db_column='threshold_Y_Q1', blank=True, null=True) # Field name made lowercase.
threshold_u_q1 = models.FloatField(db_column='threshold_U_Q1', blank=True, null=True) # Field name made lowercase.
threshold_v_q1 = models.FloatField(db_column='threshold_V_Q1', blank=True, null=True) # Field name made lowercase.
threshold_y_q2 = models.FloatField(db_column='threshold_Y_Q2', blank=True, null=True) # Field name made lowercase.
threshold_u_q2 = models.FloatField(db_column='threshold_U_Q2', blank=True, null=True) # Field name made lowercase.
threshold_v_q2 = models.FloatField(db_column='threshold_V_Q2', blank=True, null=True) # Field name made lowercase.
threshold_y_q3 = models.FloatField(db_column='threshold_Y_Q3', blank=True, null=True) # Field name made lowercase.
threshold_u_q3 = models.FloatField(db_column='threshold_U_Q3', blank=True, null=True) # Field name made lowercase.
threshold_v_q3 = models.FloatField(db_column='threshold_V_Q3', blank=True, null=True) # Field name made lowercase.
threshold_y_q4 = models.FloatField(db_column='threshold_Y_Q4', blank=True, null=True) # Field name made lowercase.
threshold_u_q4 = models.FloatField(db_column='threshold_U_Q4', blank=True, null=True) # Field name made lowercase.
threshold_v_q4 = models.FloatField(db_column='threshold_V_Q4', blank=True, null=True) # Field name made lowercase.
thresholdotsu_y = models.FloatField(db_column='thresholdOtsu_Y', blank=True, null=True) # Field name made lowercase.
thresholdotsu_u = models.FloatField(db_column='thresholdOtsu_U', blank=True, null=True) # Field name made lowercase.
thresholdotsu_v = models.FloatField(db_column='thresholdOtsu_V', blank=True, null=True) # Field name made lowercase.
median_y = models.FloatField(db_column='median_Y', blank=True, null=True) # Field name made lowercase.
median_u = models.FloatField(db_column='median_U', blank=True, null=True) # Field name made lowercase.
median_v = models.FloatField(db_column='median_V', blank=True, null=True) # Field name made lowercase.
variance_y = models.FloatField(db_column='variance_Y', blank=True, null=True) # Field name made lowercase.
standarddeviation_y = models.FloatField(db_column='standardDeviation_Y', blank=True, null=True) # Field name made lowercase.
skewness_y = models.FloatField(db_column='skewness_Y', blank=True, null=True) # Field name made lowercase.
kurtosis_y = models.FloatField(db_column='kurtosis_Y', blank=True, null=True) # Field name made lowercase.
erroronaverage_y = models.FloatField(db_column='errorOnAverage_Y', blank=True, null=True) # Field name made lowercase.
variance_u = models.FloatField(db_column='variance_U', blank=True, null=True) # Field name made lowercase.
standarddeviation_u = models.FloatField(db_column='standardDeviation_U', blank=True, null=True) # Field name made lowercase.
skewness_u = models.FloatField(db_column='skewness_U', blank=True, null=True) # Field name made lowercase.
kurtosis_u = models.FloatField(db_column='kurtosis_U', blank=True, null=True) # Field name made lowercase.
erroronaverage_u = models.FloatField(db_column='errorOnAverage_U', blank=True, null=True) # Field name made lowercase.
variance_v = models.FloatField(db_column='variance_V', blank=True, null=True) # Field name made lowercase.
standarddeviation_v = models.FloatField(db_column='standardDeviation_V', blank=True, null=True) # Field name made lowercase.
skewness_v = models.FloatField(db_column='skewness_V', blank=True, null=True) # Field name made lowercase.
kurtosis_v = models.FloatField(db_column='kurtosis_V', blank=True, null=True) # Field name made lowercase.
erroronaverage_v = models.FloatField(db_column='errorOnAverage_V', blank=True, null=True) # Field name made lowercase.
variance_segmentsize = models.FloatField(db_column='variance_SegmentSize', blank=True, null=True) # Field name made lowercase.
standarddeviation_segmentsize = models.FloatField(db_column='standardDeviation_SegmentSize', blank=True, null=True) # Field name made lowercase.
skewness_segmentsize = models.FloatField(db_column='skewness_SegmentSize', blank=True, null=True) # Field name made lowercase.
kurtosis_segmentsize = models.FloatField(db_column='kurtosis_SegmentSize', blank=True, null=True) # Field name made lowercase.
erroronaverage_segmentsize = models.FloatField(db_column='errorOnAverage_SegmentSize', blank=True, null=True) # Field name made lowercase.
thresholdotsu_segmentsize = models.FloatField(db_column='thresholdOtsu_SegmentSize', blank=True, null=True) # Field name made lowercase.
median_segmentsize = models.FloatField(db_column='median_SegmentSize', blank=True, null=True) # Field name made lowercase.
variance_segmentdistances = models.FloatField(db_column='variance_SegmentDistances', blank=True, null=True) # Field name made lowercase.
standarddeviation_segmentdistances = models.FloatField(db_column='standardDeviation_SegmentDistances', blank=True, null=True) # Field name made lowercase.
skewness_segmentdistances = models.FloatField(db_column='skewness_SegmentDistances', blank=True, null=True) # Field name made lowercase.
kurtosis_segmentdistances = models.FloatField(db_column='kurtosis_SegmentDistances', blank=True, null=True) # Field name made lowercase.
erroronaverage_segmentdistances = models.FloatField(db_column='errorOnAverage_SegmentDistances', blank=True, null=True) # Field name made lowercase.
thresholdotsu_segmentdistances = models.FloatField(db_column='thresholdOtsu_SegmentDistances', blank=True, null=True) # Field name made lowercase.
median_segmentdistances = models.FloatField(db_column='median_SegmentDistances', blank=True, null=True) # Field name made lowercase.
originalwidth = models.FloatField(db_column='originalWidth', blank=True, null=True) # Field name made lowercase.
originalheight = models.FloatField(db_column='originalHeight', blank=True, null=True) # Field name made lowercase.
proportion = models.FloatField(blank=True, null=True)
orientation = models.FloatField(blank=True, null=True)
segmentcount = models.FloatField(db_column='segmentCount', blank=True, null=True) # Field name made lowercase.
jpegcompressionratio = models.FloatField(db_column='jpegCompressionRatio', blank=True, null=True) # Field name made lowercase.
pngcompressionratio = models.FloatField(db_column='pngCompressionRatio', blank=True, null=True) # Field name made lowercase.
variance_grad1 = models.FloatField(blank=True, null=True)
standarddeviation_grad1 = models.FloatField(db_column='standardDeviation_grad1', blank=True, null=True) # Field name made lowercase.
skewness_grad1 = models.FloatField(blank=True, null=True)
kurtosis_grad1 = models.FloatField(blank=True, null=True)
erroronaverage_grad1 = models.FloatField(db_column='errorOnAverage_grad1', blank=True, null=True) # Field name made lowercase.
thresholdotsu_grad1 = models.FloatField(db_column='thresholdOtsu_grad1', blank=True, null=True) # Field name made lowercase.
median_grad1 = models.FloatField(blank=True, null=True)
variance_grad2 = models.FloatField(blank=True, null=True)
standarddeviation_grad2 = models.FloatField(db_column='standardDeviation_grad2', blank=True, null=True) # Field name made lowercase.
skewness_grad2 = models.FloatField(blank=True, null=True)
kurtosis_grad2 = models.FloatField(blank=True, null=True)
erroronaverage_grad2 = models.FloatField(db_column='errorOnAverage_grad2', blank=True, null=True) # Field name made lowercase.
thresholdotsu_grad2 = models.FloatField(db_column='thresholdOtsu_grad2', blank=True, null=True) # Field name made lowercase.
median_grad2 = models.FloatField(blank=True, null=True)
variance_hough = models.FloatField(blank=True, null=True)
standarddeviation_hough = models.FloatField(db_column='standardDeviation_hough', blank=True, null=True) # Field name made lowercase.
skewness_hough = models.FloatField(blank=True, null=True)
kurtosis_hough = models.FloatField(blank=True, null=True)
erroronaverage_hough = models.FloatField(db_column='errorOnAverage_hough', blank=True, null=True) # Field name made lowercase.
thresholdotsu_hough = models.FloatField(db_column='thresholdOtsu_hough', blank=True, null=True) # Field name made lowercase.
median_hough = models.FloatField(blank=True, null=True)
hough_energy = models.FloatField(blank=True, null=True)
hough_entropy = models.FloatField(blank=True, null=True)
hough_correlation = models.FloatField(blank=True, null=True)
hough_inversedifferencemoment = models.FloatField(db_column='hough_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
hough_inertia = models.FloatField(blank=True, null=True)
hough_clustershade = models.FloatField(db_column='hough_clusterShade', blank=True, null=True) # Field name made lowercase.
hough_clusterprominence = models.FloatField(db_column='hough_clusterProminence', blank=True, null=True) # Field name made lowercase.
class Meta:
mario = True
managed = False
db_table = 'features'
class Normalized(models.Model):
# profileid = models.IntegerField(db_column='profileId', primary_key=True, blank=True) # Field name made lowercase.
profileid = models.ForeignKey('Profiles', db_column='profileId', primary_key=True, blank=True) # Field name made lowercase.
grad1_energy = models.FloatField(blank=True, null=True)
grad1_entropy = models.FloatField(blank=True, null=True)
grad1_correlation = models.FloatField(blank=True, null=True)
grad1_inversedifferencemoment = models.FloatField(db_column='grad1_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
grad1_inertia = models.FloatField(blank=True, null=True)
grad1_clustershade = models.FloatField(db_column='grad1_clusterShade', blank=True, null=True) # Field name made lowercase.
grad1_clusterprominence = models.FloatField(db_column='grad1_clusterProminence', blank=True, null=True) # Field name made lowercase.
grad2_energy = models.FloatField(blank=True, null=True)
grad2_entropy = models.FloatField(blank=True, null=True)
grad2_correlation = models.FloatField(blank=True, null=True)
grad2_inversedifferencemoment = models.FloatField(db_column='grad2_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
grad2_inertia = models.FloatField(blank=True, null=True)
grad2_clustershade = models.FloatField(db_column='grad2_clusterShade', blank=True, null=True) # Field name made lowercase.
grad2_clusterprominence = models.FloatField(db_column='grad2_clusterProminence', blank=True, null=True) # Field name made lowercase.
energy = models.FloatField(blank=True, null=True)
entropy = models.FloatField(blank=True, null=True)
correlation = models.FloatField(blank=True, null=True)
inversedifferencemoment = models.FloatField(db_column='inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
inertia = models.FloatField(blank=True, null=True)
clustershade = models.FloatField(db_column='clusterShade', blank=True, null=True) # Field name made lowercase.
clusterprominence = models.FloatField(db_column='clusterProminence', blank=True, null=True) # Field name made lowercase.
laplace_energy = models.FloatField(blank=True, null=True)
laplace_entropy = models.FloatField(blank=True, null=True)
laplace_correlation = models.FloatField(blank=True, null=True)
laplace_inversedifferencemoment = models.FloatField(db_column='laplace_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
laplace_inertia = models.FloatField(blank=True, null=True)
laplace_clustershade = models.FloatField(db_column='laplace_clusterShade', blank=True, null=True) # Field name made lowercase.
laplace_clusterprominence = models.FloatField(db_column='laplace_clusterProminence', blank=True, null=True) # Field name made lowercase.
segment_energy = models.FloatField(blank=True, null=True)
segment_entropy = models.FloatField(blank=True, null=True)
segment_correlation = models.FloatField(blank=True, null=True)
segment_inversedifferencemoment = models.FloatField(db_column='segment_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
segment_inertia = models.FloatField(blank=True, null=True)
segment_clustershade = models.FloatField(db_column='segment_clusterShade', blank=True, null=True) # Field name made lowercase.
segment_clusterprominence = models.FloatField(db_column='segment_clusterProminence', blank=True, null=True) # Field name made lowercase.
segmentdistances_energy = models.FloatField(db_column='segmentDistances_energy', blank=True, null=True) # Field name made lowercase.
segmentdistances_entropy = models.FloatField(db_column='segmentDistances_entropy', blank=True, null=True) # Field name made lowercase.
segmentdistances_correlation = models.FloatField(db_column='segmentDistances_correlation', blank=True, null=True) # Field name made lowercase.
segmentdistances_inversedifferencemoment = models.FloatField(db_column='segmentDistances_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
segmentdistances_inertia = models.FloatField(db_column='segmentDistances_inertia', blank=True, null=True) # Field name made lowercase.
segmentdistances_clustershade = models.FloatField(db_column='segmentDistances_clusterShade', blank=True, null=True) # Field name made lowercase.
segmentdistances_clusterprominence = models.FloatField(db_column='segmentDistances_clusterProminence', blank=True, null=True) # Field name made lowercase.
threshold_y_q0 = models.FloatField(db_column='threshold_Y_Q0', blank=True, null=True) # Field name made lowercase.
threshold_u_q0 = models.FloatField(db_column='threshold_U_Q0', blank=True, null=True) # Field name made lowercase.
threshold_v_q0 = models.FloatField(db_column='threshold_V_Q0', blank=True, null=True) # Field name made lowercase.
threshold_y_q1 = models.FloatField(db_column='threshold_Y_Q1', blank=True, null=True) # Field name made lowercase.
threshold_u_q1 = models.FloatField(db_column='threshold_U_Q1', blank=True, null=True) # Field name made lowercase.
threshold_v_q1 = models.FloatField(db_column='threshold_V_Q1', blank=True, null=True) # Field name made lowercase.
threshold_y_q2 = models.FloatField(db_column='threshold_Y_Q2', blank=True, null=True) # Field name made lowercase.
threshold_u_q2 = models.FloatField(db_column='threshold_U_Q2', blank=True, null=True) # Field name made lowercase.
threshold_v_q2 = models.FloatField(db_column='threshold_V_Q2', blank=True, null=True) # Field name made lowercase.
threshold_y_q3 = models.FloatField(db_column='threshold_Y_Q3', blank=True, null=True) # Field name made lowercase.
threshold_u_q3 = models.FloatField(db_column='threshold_U_Q3', blank=True, null=True) # Field name made lowercase.
threshold_v_q3 = models.FloatField(db_column='threshold_V_Q3', blank=True, null=True) # Field name made lowercase.
threshold_y_q4 = models.FloatField(db_column='threshold_Y_Q4', blank=True, null=True) # Field name made lowercase.
threshold_u_q4 = models.FloatField(db_column='threshold_U_Q4', blank=True, null=True) # Field name made lowercase.
threshold_v_q4 = models.FloatField(db_column='threshold_V_Q4', blank=True, null=True) # Field name made lowercase.
thresholdotsu_y = models.FloatField(db_column='thresholdOtsu_Y', blank=True, null=True) # Field name made lowercase.
thresholdotsu_u = models.FloatField(db_column='thresholdOtsu_U', blank=True, null=True) # Field name made lowercase.
thresholdotsu_v = models.FloatField(db_column='thresholdOtsu_V', blank=True, null=True) # Field name made lowercase.
median_y = models.FloatField(db_column='median_Y', blank=True, null=True) # Field name made lowercase.
median_u = models.FloatField(db_column='median_U', blank=True, null=True) # Field name made lowercase.
median_v = models.FloatField(db_column='median_V', blank=True, null=True) # Field name made lowercase.
variance_y = models.FloatField(db_column='variance_Y', blank=True, null=True) # Field name made lowercase.
standarddeviation_y = models.FloatField(db_column='standardDeviation_Y', blank=True, null=True) # Field name made lowercase.
skewness_y = models.FloatField(db_column='skewness_Y', blank=True, null=True) # Field name made lowercase.
kurtosis_y = models.FloatField(db_column='kurtosis_Y', blank=True, null=True) # Field name made lowercase.
erroronaverage_y = models.FloatField(db_column='errorOnAverage_Y', blank=True, null=True) # Field name made lowercase.
variance_u = models.FloatField(db_column='variance_U', blank=True, null=True) # Field name made lowercase.
standarddeviation_u = models.FloatField(db_column='standardDeviation_U', blank=True, null=True) # Field name made lowercase.
skewness_u = models.FloatField(db_column='skewness_U', blank=True, null=True) # Field name made lowercase.
kurtosis_u = models.FloatField(db_column='kurtosis_U', blank=True, null=True) # Field name made lowercase.
erroronaverage_u = models.FloatField(db_column='errorOnAverage_U', blank=True, null=True) # Field name made lowercase.
variance_v = models.FloatField(db_column='variance_V', blank=True, null=True) # Field name made lowercase.
standarddeviation_v = models.FloatField(db_column='standardDeviation_V', blank=True, null=True) # Field name made lowercase.
skewness_v = models.FloatField(db_column='skewness_V', blank=True, null=True) # Field name made lowercase.
kurtosis_v = models.FloatField(db_column='kurtosis_V', blank=True, null=True) # Field name made lowercase.
erroronaverage_v = models.FloatField(db_column='errorOnAverage_V', blank=True, null=True) # Field name made lowercase.
variance_segmentsize = models.FloatField(db_column='variance_SegmentSize', blank=True, null=True) # Field name made lowercase.
standarddeviation_segmentsize = models.FloatField(db_column='standardDeviation_SegmentSize', blank=True, null=True) # Field name made lowercase.
skewness_segmentsize = models.FloatField(db_column='skewness_SegmentSize', blank=True, null=True) # Field name made lowercase.
kurtosis_segmentsize = models.FloatField(db_column='kurtosis_SegmentSize', blank=True, null=True) # Field name made lowercase.
erroronaverage_segmentsize = models.FloatField(db_column='errorOnAverage_SegmentSize', blank=True, null=True) # Field name made lowercase.
thresholdotsu_segmentsize = models.FloatField(db_column='thresholdOtsu_SegmentSize', blank=True, null=True) # Field name made lowercase.
median_segmentsize = models.FloatField(db_column='median_SegmentSize', blank=True, null=True) # Field name made lowercase.
variance_segmentdistances = models.FloatField(db_column='variance_SegmentDistances', blank=True, null=True) # Field name made lowercase.
standarddeviation_segmentdistances = models.FloatField(db_column='standardDeviation_SegmentDistances', blank=True, null=True) # Field name made lowercase.
skewness_segmentdistances = models.FloatField(db_column='skewness_SegmentDistances', blank=True, null=True) # Field name made lowercase.
kurtosis_segmentdistances = models.FloatField(db_column='kurtosis_SegmentDistances', blank=True, null=True) # Field name made lowercase.
erroronaverage_segmentdistances = models.FloatField(db_column='errorOnAverage_SegmentDistances', blank=True, null=True) # Field name made lowercase.
thresholdotsu_segmentdistances = models.FloatField(db_column='thresholdOtsu_SegmentDistances', blank=True, null=True) # Field name made lowercase.
median_segmentdistances = models.FloatField(db_column='median_SegmentDistances', blank=True, null=True) # Field name made lowercase.
originalwidth = models.FloatField(db_column='originalWidth', blank=True, null=True) # Field name made lowercase.
originalheight = models.FloatField(db_column='originalHeight', blank=True, null=True) # Field name made lowercase.
proportion = models.FloatField(blank=True, null=True)
orientation = models.FloatField(blank=True, null=True)
segmentcount = models.FloatField(db_column='segmentCount', blank=True, null=True) # Field name made lowercase.
jpegcompressionratio = models.FloatField(db_column='jpegCompressionRatio', blank=True, null=True) # Field name made lowercase.
pngcompressionratio = models.FloatField(db_column='pngCompressionRatio', blank=True, null=True) # Field name made lowercase.
variance_grad1 = models.FloatField(blank=True, null=True)
standarddeviation_grad1 = models.FloatField(db_column='standardDeviation_grad1', blank=True, null=True) # Field name made lowercase.
skewness_grad1 = models.FloatField(blank=True, null=True)
kurtosis_grad1 = models.FloatField(blank=True, null=True)
erroronaverage_grad1 = models.FloatField(db_column='errorOnAverage_grad1', blank=True, null=True) # Field name made lowercase.
thresholdotsu_grad1 = models.FloatField(db_column='thresholdOtsu_grad1', blank=True, null=True) # Field name made lowercase.
median_grad1 = models.FloatField(blank=True, null=True)
variance_grad2 = models.FloatField(blank=True, null=True)
standarddeviation_grad2 = models.FloatField(db_column='standardDeviation_grad2', blank=True, null=True) # Field name made lowercase.
skewness_grad2 = models.FloatField(blank=True, null=True)
kurtosis_grad2 = models.FloatField(blank=True, null=True)
erroronaverage_grad2 = models.FloatField(db_column='errorOnAverage_grad2', blank=True, null=True) # Field name made lowercase.
thresholdotsu_grad2 = models.FloatField(db_column='thresholdOtsu_grad2', blank=True, null=True) # Field name made lowercase.
median_grad2 = models.FloatField(blank=True, null=True)
variance_hough = models.FloatField(blank=True, null=True)
standarddeviation_hough = models.FloatField(db_column='standardDeviation_hough', blank=True, null=True) # Field name made lowercase.
skewness_hough = models.FloatField(blank=True, null=True)
kurtosis_hough = models.FloatField(blank=True, null=True)
erroronaverage_hough = models.FloatField(db_column='errorOnAverage_hough', blank=True, null=True) # Field name made lowercase.
thresholdotsu_hough = models.FloatField(db_column='thresholdOtsu_hough', blank=True, null=True) # Field name made lowercase.
median_hough = models.FloatField(blank=True, null=True)
hough_energy = models.FloatField(blank=True, null=True)
hough_entropy = models.FloatField(blank=True, null=True)
hough_correlation = models.FloatField(blank=True, null=True)
hough_inversedifferencemoment = models.FloatField(db_column='hough_inverseDifferenceMoment', blank=True, null=True) # Field name made lowercase.
hough_inertia = models.FloatField(blank=True, null=True)
hough_clustershade = models.FloatField(db_column='hough_clusterShade', blank=True, null=True) # Field name made lowercase.
hough_clusterprominence = models.FloatField(db_column='hough_clusterProminence', blank=True, null=True) # Field name made lowercase.
class Meta:
mario = True
managed = False
db_table = 'normalized'
class Profiles(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
fileid = models.TextField(db_column='fileId', unique=True, blank=True) # Field name made lowercase.
version = models.IntegerField(blank=True, null=True)
class Meta:
mario = True
managed = False
db_table = 'profiles'
class Tag2Profile(models.Model):
id = models.IntegerField(primary_key=True, blank=True)
# profileid = models.IntegerField(db_column='profileId', blank=True, null=True) # Field name made lowercase.
profileid = models.ForeignKey('Profiles', db_column='profileId', blank=True) # Field name made lowercase.
tagid = models.IntegerField(db_column='tagId', blank=True, null=True) # Field name made lowercase.
# tagid = models.ForeignKey('Tags', db_column='tagId', blank=True) # Field name made lowercase.
class Meta:
mario = True
managed = False
db_table = 'tag2profile'
class Tags(models.Model):
tagid = models.IntegerField(db_column='tagId', primary_key=True, blank=True) # Field name made lowercase.
# tagid = models.ManyToManyField('Profiles', through='Tag2Profile', db_column='tagId') # Field name made lowercase.
# tagid = models.ForeignKey('Tag2Profile', to_field='tagid', db_column='tagId', primary_key=True) # Field name made lowercase.
tag = models.TextField(unique=True, blank=True)
class Meta:
mario = True
managed = False
db_table = 'tags'
|
{
"content_hash": "8ddf56fdfb3d09cb2056914344f89c21",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 170,
"avg_line_length": 92.5119760479042,
"alnum_prop": 0.755331887763358,
"repo_name": "CSCSI/Lost-Visions",
"id": "c8f973af2837529c5f025ae91e496e012cce4a7f",
"size": "31383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lost_visions/mario_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "341754"
},
{
"name": "HTML",
"bytes": "785300"
},
{
"name": "JavaScript",
"bytes": "608226"
},
{
"name": "Python",
"bytes": "383528"
},
{
"name": "Ruby",
"bytes": "58036"
},
{
"name": "Shell",
"bytes": "93"
}
],
"symlink_target": ""
}
|
import argparse
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.lib.io.tf_record import TFRecordCompressionType
def dataset_to_file(feature_iter, filename):
writer = tf.python_io.TFRecordWriter(
filename,
options=tf.python_io.TFRecordOptions(
compression_type=TFRecordCompressionType.GZIP))
with writer:
for feature in feature_iter:
writer.write(tf.train.Example(features=tf.train.Features(
feature=feature
)).SerializeToString())
def mnist_feature_fn(dataset):
for image, label in zip(dataset.images.tolist(), dataset.labels.tolist()):
assert len(image) == 784
yield {
'labels': tf.train.Feature(
int64_list=tf.train.Int64List(value=[label])),
'images': tf.train.Feature(
float_list=tf.train.FloatList(value=image)),
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('output_dir', type=os.path.abspath)
args = parser.parse_args()
mnist = input_data.read_data_sets("data",
one_hot=False,
validation_size=0)
train_path = os.path.join(args.output_dir, 'train.pb2')
print('Writing train data to: {}'.format(train_path))
dataset_to_file(mnist_feature_fn(mnist.train), train_path)
eval_path = os.path.join(args.output_dir, 'eval.pb2')
print('Writing eval data to: {}'.format(eval_path))
dataset_to_file(mnist_feature_fn(mnist.test), eval_path)
|
{
"content_hash": "3befccb51d3cc76cb57f7a838269ea13",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 76,
"avg_line_length": 35.95348837209303,
"alnum_prop": 0.6623544631306598,
"repo_name": "fluxcapacitor/source.ml",
"id": "67040a866933cdda3869f7cdc9297a407e431495",
"size": "1546",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/GoogleTraining/workshop_sections/mnist_series/cloudml/prepare_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "21795"
},
{
"name": "C",
"bytes": "1759"
},
{
"name": "C++",
"bytes": "50538"
},
{
"name": "CSS",
"bytes": "404634"
},
{
"name": "Cuda",
"bytes": "8671"
},
{
"name": "HTML",
"bytes": "63259"
},
{
"name": "Java",
"bytes": "92972"
},
{
"name": "JavaScript",
"bytes": "483783"
},
{
"name": "Jupyter Notebook",
"bytes": "18119406"
},
{
"name": "Protocol Buffer",
"bytes": "141238"
},
{
"name": "Python",
"bytes": "595541"
},
{
"name": "R",
"bytes": "21"
},
{
"name": "Scala",
"bytes": "122185"
},
{
"name": "Shell",
"bytes": "21139"
}
],
"symlink_target": ""
}
|
import time
import numpy.random as npr
from hypergrad.odyssey import omap
objects = [1, (3,4), 1.04, "hello", "it's \n a hard string \\\''\to parse", ((1,'a'), 3)]
def identity(x):
time.sleep(npr.randint(5))
print x
return x
def check_omap():
# This won't work with nosetest. Needs to be run from the same directory as the file.
ans = omap(identity, objects)
for x, y in zip(ans, objects):
assert x == y, "Failed on {0}".format(y)
print "test ok"
if __name__ == "__main__":
check_omap()
|
{
"content_hash": "f12f186aade7ad7fcc3338928cd6d3fa",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 89,
"avg_line_length": 26.55,
"alnum_prop": 0.608286252354049,
"repo_name": "bigaidream-projects/drmad",
"id": "777df46e71f064e14502129e46a5ac1f325eb06b",
"size": "531",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cpu_ver/tests/test_odyssey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "386831"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
import collections
import json
import re
import os
import sys
# TODO: Replace commented-out debugging statements
# with debug or trace level log statements.
# TODO: Handle cases like CollectionObject (Cataloging)
# where there may be multiple, discrepant record type names.
# (E.g. csc-object v. csc-collection-object, as well as
# some items that have neither prefix.)
# TODO: Handle cases where field name isn't a match for
# CSS selector; e.g. csc-loanOut-loanOutConditions
# selector pertains to specialConditionsOfLoan field
# TODO: Improve choice of CSS selectors for term list fields in
# authority term records. Currently, selector names for these are
# generated inaccurately. For instance, for Concept records, the
# messagekeys for term list fields begin with 'preferredCA-...'
# and that's how their selectors are currently rendered, but their
# actual selectors are of the form 'csc-conceptAuthority-...'
# TODO: Handle structured dates. These will be present within
# lists containing key 'func', value 'cspace.structuredDate',
# and have the form '${fields.groupname.fieldname}', while
# their associated text labels in the message bundle file
# will have the form 'structuredDate-fieldname'.
# Remove a substring from the right of a string.
# From Jack Kelly
# http://stackoverflow.com/a/3663505
def rchop(thestring, ending):
if thestring.endswith(ending):
return thestring[:-len(ending)]
return thestring
# Extract the name of a field from an entry for a field or row (the
# latter for repeatable fields) in a CollectionSpace 'uispec' file.
FIELD_PATTERN = re.compile("^\$\{fields\.(\w+)\}$", re.IGNORECASE)
ROW_PATTERN = re.compile("^\$\{\{row\}\.(\w+)\}$", re.IGNORECASE)
def get_field_name(value):
# 'basestring' -> 'str' here in Python 3
if isinstance(value, basestring):
match = FIELD_PATTERN.match(value) or ROW_PATTERN.match(value)
if match is not None:
return str(match.group(1))
MESSAGEKEY_KEY = 'messagekey'
def get_messagekey_from_item(item):
if not isinstance(item, dict):
return None
else:
messagekey = item.get(MESSAGEKEY_KEY, None)
# Recursively walk a nested collection and return
# lists representing each of its parts
# From Bryukhanov Valentin
# http://stackoverflow.com/a/12507546
# Adapted slightly to remove prefix argument.
def get_messagekeys_generator(indict, key=MESSAGEKEY_KEY):
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in get_messagekeys_generator(value, [key]):
yield d
elif isinstance(value, list) or isinstance(value, tuple):
for v in value:
for d in get_messagekeys_generator(v, [key]):
yield d
else:
yield [key, value]
else:
yield indict
# Get the selector prefix for the current record type (e.g. "acquisition-").
# Once acquired, cache it within a global variable and return the cached
# value to all subsequent queries.
RECORD_TYPE_PREFIX = None
SELECTOR_PATTERN = re.compile("^\.csc-(\w+\-)?.*$", re.IGNORECASE)
def get_record_type_selector_prefix(selector):
global RECORD_TYPE_PREFIX
if RECORD_TYPE_PREFIX is not None:
return RECORD_TYPE_PREFIX
else:
match = SELECTOR_PATTERN.match(selector)
if match is not None:
RECORD_TYPE_PREFIX = str(match.group(1))
return RECORD_TYPE_PREFIX
# Exclude messagekeys for generic metadata fields like 'tenant ID'
# and 'workflow state'. These typically aren't displayed with
# text labels, and most don't even appear in a user-visible context
# within the CollectionSpace UI.
def in_messagekey_stoplist(messagekey):
global RECORD_TYPE_PREFIX
stoplist = ['coreInformationLabel', 'createdAtLabel', 'createdByLabel', 'csidLabel', 'deprecatedLabel',
'deprecatedRefNameLabel', 'domaindataLabel', 'inAuthorityLabel', 'numberLabel', 'otherInformationLabel',
'proposedLabel', 'refNameLabel', 'revLabel', 'sasLabel', 'shortIdentifierLabel', 'summaryLabel', 'tenantIdLabel',
'updatedAtLabel', 'updatedByLabel', 'uriLabel', 'workflowLabel']
in_stoplist = False
for stop_item in stoplist:
if messagekey == RECORD_TYPE_PREFIX + stop_item:
in_stoplist = True
return in_stoplist
# Load the contents of a Java-style properties file.
# E.g. with per-line entries in the form 'key: value'.
# From Roberto
# http://stackoverflow.com/a/31852401
# Adapted slightly as commented below.
def load_properties(filepath, sep=':', comment_char='#'):
props = {}
with open(filepath, "rt") as f:
for line in f:
l = line.strip()
# Added check that each line to be processed also contains the separator
if l and not l.startswith(comment_char) and sep in l:
key_value = l.split(sep)
props[key_value[0].strip()] = key_value[1].strip('" \t')
return props
if __name__ == '__main__':
parser = ArgumentParser(description='Generates pairs for the text labels that appear next to fields and the CSS selectors for those fields. Used, in part, for CSpace QA Automation via Cucumber.')
parser.add_argument('-b', '--bundle_file',
help='file with text labels (defaults to \'core-messages.properties\' in current directory)', default = 'core-messages.properties')
parser.add_argument('-u', '--uispec_file',
help='file with data used to generate page (defaults to \'uispec\' in current directory)', default = 'uispec')
args = parser.parse_args()
# ##################################################
# Open and read the message bundle (text label) file
# ##################################################
bundle_path = args.bundle_file.strip()
if not os.path.isfile(bundle_path):
sys.exit("Could not find file '%s'.\n(Re-run script with '-h' option to view help instructions.)" % bundle_path)
text_labels = load_properties(bundle_path)
text_labels_lowercase = {k.lower():v for k,v in text_labels.items()}
# For debugging
# for key,value in text_labels_lowercase.items():
# # Change the following hard-coded record type value as needed, for debugging
# if key.startswith('acquisition-'):
# print "%s: %s" % (key, str(value))
# ##################################################
# Open and read the uispec file
# ##################################################
uispec_path = args.uispec_file.strip()
if not os.path.isfile(uispec_path):
sys.exit("Could not find file '%s'.\n(Re-run script with '-h' option to view help instructions.)" % uispec_path)
with open(uispec_path) as uispec_file:
uispec = json.load(uispec_file)
# From the uispec file ...
# Check for presence of the expected top-level item
# for the record editor
TOP_LEVEL_KEY='recordEditor'
try:
recordeditor_items = uispec[TOP_LEVEL_KEY]
except KeyError, e:
sys.exit("Could not find expected top level item \'%s\' in uispec file." % TOP_LEVEL_KEY)
# Verify that at least one item is present in the list of items
# below the top level item
if not recordeditor_items:
sys.exit("Could not find expected Record Editor items in uispec file")
# Create a new dict to hold all relevant uispec items
uispec_items = {}
uispec_items.update(recordeditor_items)
# Merge in hierarchy items, if any were present in this record type,
# alongside record editor items
HIERARCHY_SECTION_KEY='hierarchy'
hierarchy_items = uispec.get(HIERARCHY_SECTION_KEY, None)
if hierarchy_items is not None and isinstance(hierarchy_items, dict):
uispec_items.update(hierarchy_items)
# ##################################################
# Get the prefix for this record type
# (used in selectors)
# ##################################################
# TODO: As noted above, also need to handle instances of irregular
# use of this prefix, as in CollectionObjects/Cataloging records
for selector, value in uispec_items.iteritems():
# For debugging
# print "%s %s\n" % (selector, value)
# On encountering the first '${fields}.fieldname' item,
# set the record type prefix from its selector
if RECORD_TYPE_PREFIX is None:
field_name = get_field_name(value)
if field_name is not None:
prefix = get_record_type_selector_prefix(selector)
break
# ##################################################
# Iterate through the list of selectors in the
# uispec file and find those that have messagekeys.
# These represent text labels that are, in many
# cases, associated with fields. Store these selectors
# and their text labels for further use below ...
# ##################################################
# ##################################################
# Get messagekeys and their associated selectors
# ##################################################
CSC_PREFIX = 'csc-'
CSC_RECORD_TYPE_PREFIX = CSC_PREFIX + RECORD_TYPE_PREFIX
LABEL_CAMELCASE_SUFFIX = "Label"
LABEL_SUFFIX = '-label'
mkeys = {}
for selector, value in uispec_items.iteritems():
# For debugging
# print "%s %s" % (selector, value)
mkey = get_messagekey_from_item(value)
# 'basestring' -> 'str' here in Python 3
if isinstance(mkey, basestring):
mkeys[selector] = mkey
# Can use collections.abc abstract classes here in Python 3.3 and higher
if mkey is None and isinstance(value, (dict, list, set, tuple)):
for item in get_messagekeys_generator(value):
# For debugging
# print item
if isinstance(item, list):
if str(item[0]) == MESSAGEKEY_KEY:
# This block is an outright hack
selector = str(item[1])
mkey_selector = '.' + CSC_PREFIX + selector
if mkey_selector.endswith(LABEL_CAMELCASE_SUFFIX):
mkey_selector= mkey_selector.replace(LABEL_CAMELCASE_SUFFIX, LABEL_SUFFIX)
mkeys[mkey_selector] = selector
# ##################################################
# For each messagekey, get its text label (if any)
# ##################################################
text_labels_not_found_msgs = []
messagekeys = {}
for selector, messagekey in mkeys.iteritems():
# For debugging
# print "selector messagekey = %s %s\n" % (selector, messagekey)
if messagekey is not None:
if in_messagekey_stoplist(messagekey):
continue
text_label = text_labels_lowercase.get(messagekey.lower(), None)
if text_label is None or text_label.strip() is None:
text_labels_not_found_msgs.append("// Not found: text label for message key '%s'" % messagekey)
else:
# Strip leading '.' from selector
selector = selector.replace(selector[:1], '')
messagekeys[selector] = text_label
# For debugging
# for key, value in messagekeys.iteritems():
# print 'fieldSelectorByLabel.put("%s", "%s");' % (value, key)
# ##################################################
# Do one last cleanup pass on messagekeys:
# * Remove the 'Label' suffix from selectors
# * Add placeholders for missing text labels
# ##################################################
ADD_ME_VALUE = "ADD_ME"
fields = {}
for key, value in messagekeys.iteritems():
messagekey_fieldname = rchop(key, LABEL_SUFFIX)
if messagekey_fieldname.startswith(CSC_PREFIX):
# Expression here includes ternary operator
fields[messagekey_fieldname] = value if value is not (None or '') else ADD_ME_VALUE
# ##################################################
# Generate output suitable for pasting
# ##################################################
print '// ----- Start of entries generated by an automated script -----'
print '//'
print '// (Note: These require review by a human.)'
print '// (Note: Entries for structured date fields are not yet generated.)'
print "\n"
# ##################################################
# Output regarding text label-selector associations
# ##################################################
# Print associations between text labels and field selectors
# TODO: Need to do case independent sorting here (e.g. on lowercase values)
for key, value in sorted(fields.iteritems(), key=lambda (k,v): (v,k)):
print 'fieldSelectorByLabel.put("%s", "%s");' % (value, key)
# ##################################################
# Output regarding errors
# ##################################################
# Print various potential errors as Java comments, for a human to look at/sort out
# Duplicate text labels: instances where the same text label is
# associated with two or more fields, or there is some other
# discrepancy in text label-to-field associations.
#
# From user2357112
# http://stackoverflow.com/a/20463090
value_occurrences = collections.Counter(fields.values())
if value_occurrences is not None and len(value_occurrences) > 0:
print "\n"
print "// Entries above with duplicate text labels, to be checked by a human."
print "//"
print "// Some may represent labels for headers above repeatable fields/groups."
for key, value in sorted(value_occurrences.iteritems()):
if value > 1:
print "// Duplicate text label: %s (appears %d times)" % (key, value)
# Messagekeys in the 'uispec file without associated text labels in the
# message bundle file (e.g. 'core-messages.properties').
#
# (Example: messagekey 'acquisition-ownerLabel' is present in the uispec
# for the Acquisition record, but isn't found in the message bundle file;
# only 'acquisition-ownersLabel' is present there.)
if len(text_labels_not_found_msgs) > 0:
print "\n"
print "// Messagekeys in the 'uispec' file not matched by text labels"
print "// in the message bundles file (e.g. 'core-messages.properties')."
print "//"
print "// Some of these may be record metadata that is never displayed"
print "// in the UI. If so, they can be added to the script's stoplist."
print "//"
print "// In other instances, these may represent messagekeys for section"
print "// headers in the record, rather than for fields."
print "//"
print "// Finally, these may represent sub-records (e.g. Contact in"
print "// Person and Organization) or other sub-data structures."
print "//"
for msg in sorted(text_labels_not_found_msgs):
print msg
print "\n"
print '// ----- End of entries generated by an automated script -----'
|
{
"content_hash": "bd998104290ac8a8555d841fc6eaa929",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 199,
"avg_line_length": 43.58988764044944,
"alnum_prop": 0.5971774713236242,
"repo_name": "aronr/Tools",
"id": "1a1ee9389ae2c31c557986489514ae8f87bcc9eb",
"size": "15541",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/populate-field-label-selector-pairs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "425353"
},
{
"name": "HTML",
"bytes": "1230950"
},
{
"name": "Java",
"bytes": "109813"
},
{
"name": "JavaScript",
"bytes": "3456906"
},
{
"name": "Perl",
"bytes": "33563"
},
{
"name": "Python",
"bytes": "23862"
},
{
"name": "Ruby",
"bytes": "3402"
},
{
"name": "SQLPL",
"bytes": "4703"
},
{
"name": "Shell",
"bytes": "38795"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope
from test_imperative_resnet import ResNet
from paddle.fluid.framework import _test_eager_guard
batch_size = 8
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": batch_size,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001],
},
"batch_size": batch_size,
"lr": 0.1,
"total_images": 1281164,
}
def optimizer_setting(params, parameter_list=None):
ls = params["learning_strategy"]
if ls["name"] == "piecewise_decay":
if "total_images" not in params:
total_images = 1281167
else:
total_images = params["total_images"]
batch_size = ls["batch_size"]
step = int(total_images / batch_size + 1)
bd = [step * e for e in ls["epochs"]]
base_lr = params["lr"]
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
if fluid._non_static_mode():
optimizer = fluid.optimizer.SGD(
learning_rate=0.01, parameter_list=parameter_list
)
else:
optimizer = fluid.optimizer.SGD(learning_rate=0.01)
# TODO(minqiyang): Add learning rate scheduler support to dygraph mode
# optimizer = fluid.optimizer.Momentum(
# learning_rate=params["lr"],
# learning_rate=fluid.layers.piecewise_decay(
# boundaries=bd, values=lr),
# momentum=0.9,
# regularization=fluid.regularizer.L2Decay(1e-4))
return optimizer
class TestDygraphResnetSortGradient(unittest.TestCase):
def func_test_resnet_sort_gradient_float32(self):
seed = 90
batch_size = train_parameters["batch_size"]
batch_num = 10
with fluid.dygraph.guard():
fluid.set_flags({'FLAGS_sort_sum_gradient': True})
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
resnet = ResNet()
optimizer = optimizer_setting(
train_parameters, parameter_list=resnet.parameters()
)
np.random.seed(seed)
import random
random.seed = seed
train_reader = paddle.batch(
paddle.dataset.flowers.train(use_xmap=False),
batch_size=batch_size,
)
dy_param_init_value = {}
for param in resnet.parameters():
dy_param_init_value[param.name] = param.numpy()
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
dy_x_data = np.array(
[x[0].reshape(3, 224, 224) for x in data]
).astype('float32')
y_data = (
np.array([x[1] for x in data])
.astype('int64')
.reshape(batch_size, 1)
)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
out = resnet(img)
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = paddle.mean(x=loss)
dy_out = avg_loss.numpy()
if batch_id == 0:
for param in resnet.parameters():
if param.name not in dy_param_init_value:
dy_param_init_value[param.name] = param.numpy()
avg_loss.backward()
dy_grad_value = {}
for param in resnet.parameters():
if param.trainable:
np_array = np.array(
param._grad_ivar().value().get_tensor()
)
dy_grad_value[
param.name + core.grad_var_suffix()
] = np_array
optimizer.minimize(avg_loss)
resnet.clear_gradients()
dy_param_value = {}
for param in resnet.parameters():
dy_param_value[param.name] = param.numpy()
with new_program_scope():
paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed)
exe = fluid.Executor(
fluid.CPUPlace()
if not core.is_compiled_with_cuda()
else fluid.CUDAPlace(0)
)
resnet = ResNet()
optimizer = optimizer_setting(train_parameters)
np.random.seed(seed)
import random
random.seed = seed
train_reader = paddle.batch(
paddle.dataset.flowers.train(use_xmap=False),
batch_size=batch_size,
)
img = fluid.layers.data(
name='pixel', shape=[3, 224, 224], dtype='float32'
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = resnet(img)
loss = fluid.layers.cross_entropy(input=out, label=label)
avg_loss = paddle.mean(x=loss)
optimizer.minimize(avg_loss)
# initialize params and fetch them
static_param_init_value = {}
static_param_name_list = []
static_grad_name_list = []
for param in resnet.parameters():
static_param_name_list.append(param.name)
for param in resnet.parameters():
if param.trainable:
static_grad_name_list.append(
param.name + core.grad_var_suffix()
)
out = exe.run(
fluid.default_startup_program(),
fetch_list=static_param_name_list,
)
for i in range(len(static_param_name_list)):
static_param_init_value[static_param_name_list[i]] = out[i]
for batch_id, data in enumerate(train_reader()):
if batch_id >= batch_num:
break
static_x_data = np.array(
[x[0].reshape(3, 224, 224) for x in data]
).astype('float32')
y_data = (
np.array([x[1] for x in data])
.astype('int64')
.reshape([batch_size, 1])
)
fetch_list = [avg_loss.name]
fetch_list.extend(static_param_name_list)
fetch_list.extend(static_grad_name_list)
out = exe.run(
fluid.default_main_program(),
feed={"pixel": static_x_data, "label": y_data},
fetch_list=fetch_list,
)
static_param_value = {}
static_grad_value = {}
static_out = out[0]
param_start_pos = 1
grad_start_pos = len(static_param_name_list) + param_start_pos
for i in range(
param_start_pos,
len(static_param_name_list) + param_start_pos,
):
static_param_value[
static_param_name_list[i - param_start_pos]
] = out[i]
for i in range(
grad_start_pos, len(static_grad_name_list) + grad_start_pos
):
static_grad_value[
static_grad_name_list[i - grad_start_pos]
] = out[i]
np.testing.assert_allclose(static_out, dy_out, rtol=1e-05)
self.assertEqual(len(dy_param_init_value), len(static_param_init_value))
for key, value in static_param_init_value.items():
np.testing.assert_allclose(
value, dy_param_init_value[key], rtol=1e-05
)
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
self.assertEqual(len(dy_grad_value), len(static_grad_value))
for key, value in static_grad_value.items():
np.testing.assert_allclose(value, dy_grad_value[key], rtol=1e-05)
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
self.assertEqual(len(dy_param_value), len(static_param_value))
for key, value in static_param_value.items():
np.testing.assert_allclose(value, dy_param_value[key], rtol=1e-05)
self.assertTrue(np.isfinite(value.all()))
self.assertFalse(np.isnan(value.any()))
def test_resnet_sort_gradient_float32(self):
with _test_eager_guard():
self.func_test_resnet_sort_gradient_float32()
self.func_test_resnet_sort_gradient_float32()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7e6dfd9d3f62dada53ea4a9f6a6009a5",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 80,
"avg_line_length": 35.63813229571984,
"alnum_prop": 0.5093350802489355,
"repo_name": "luotao1/Paddle",
"id": "9e203092dc77b46a333f9b883d24dfdd8ee1db61",
"size": "9770",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
import carsales_pb2
from common import rand_int, rand_double, rand_bool, from_bytes_helper
from random import choice
MAKES = ["Toyota", "GM", "Ford", "Honda", "Tesla"]
MODELS = ["Camry", "Prius", "Volt", "Accord", "Leaf", "Model S"]
COLORS = ["black", "white", "red", "green", "blue", "cyan", "magenta", "yellow", "silver"]
def random_car(car):
car.make = choice(MAKES)
car.model = choice(MODELS)
car.color = rand_int(len(COLORS))
car.seats = 2 + rand_int(6)
car.doors = 2 + rand_int(3)
for _ in range(4):
wheel = car.wheel.add()
wheel.diameter = 25 + rand_int(15)
wheel.air_pressure = 30 + rand_double(20)
wheel.snow_tires = rand_int(16) == 0
car.length = 170 + rand_int(150)
car.width = 48 + rand_int(36)
car.height = 54 + rand_int(48)
car.weight = car.length * car.width * car.height // 200
engine = car.engine
engine.horsepower = 100 * rand_int(400)
engine.cylinders = 4 + 2 * rand_int(3)
engine.cc = 800 + rand_int(10000)
engine.uses_gas = True
engine.uses_electric = rand_bool()
car.fuel_capacity = 10.0 + rand_double(30.0)
car.fuel_level = rand_double(car.fuel_capacity)
car.has_power_windows = rand_bool()
car.has_power_steering = rand_bool()
car.has_cruise_control = rand_bool()
car.cup_holders = rand_int(12)
car.has_nav_system = rand_bool()
def calc_value(car):
result = 0
result += car.seats * 200
result += car.doors * 350
for wheel in car.wheel:
result += wheel.diameter * wheel.diameter
result += 100 if wheel.snow_tires else 0
result += car.length * car.width * car.height // 50
engine = car.engine
result += engine.horsepower * 40
if engine.uses_electric:
if engine.uses_gas:
result += 5000
else:
result += 3000
result += 100 if car.has_power_windows else 0
result += 200 if car.has_power_steering else 0
result += 400 if car.has_cruise_control else 0
result += 2000 if car.has_nav_system else 0
result += car.cup_holders * 25
return result
class Benchmark:
def __init__(self, compression):
self.Request = carsales_pb2.ParkingLot
self.Response = carsales_pb2.TotalValue
self.from_bytes_request = from_bytes_helper(carsales_pb2.ParkingLot)
self.from_bytes_response = from_bytes_helper(carsales_pb2.TotalValue)
self.to_bytes = lambda x: x.SerializeToString()
def setup(self, request):
result = 0
for _ in range(rand_int(200)):
car = request.car.add()
random_car(car)
result += calc_value(car)
return result
def handle(self, request, response):
result = 0
for car in request.car:
result += calc_value(car)
response.amount = result
def check(self, response, expected):
return response.amount == expected
|
{
"content_hash": "ab7fc51a5ba58acbbbb8b5e5aaa289d1",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 90,
"avg_line_length": 30.726315789473684,
"alnum_prop": 0.6132237067488866,
"repo_name": "SymbiFlow/pycapnp",
"id": "c969070122a0b10708d2cbf9705dfd6f0d5ad11e",
"size": "2942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmark/carsales_proto.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "447"
},
{
"name": "C++",
"bytes": "15822"
},
{
"name": "Cap'n Proto",
"bytes": "27443"
},
{
"name": "Python",
"bytes": "368736"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
}
|
_matplotlib_backend = None
def set_backend(backend='agg'):
global _matplotlib_backend
if _matplotlib_backend is None:
_matplotlib_backend = backend
import matplotlib
matplotlib.use(_matplotlib_backend)
return
|
{
"content_hash": "2083ba2dfc7e915be141eeab27167ba6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 24.7,
"alnum_prop": 0.680161943319838,
"repo_name": "tskisner/pytoast",
"id": "495348e416455f6953718544cad4de8bc4b3434b",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/vis.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "29103"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "228040"
}
],
"symlink_target": ""
}
|
"""Module which impements the sequence class and a few of its subclasses."""
import os
from typing import List
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
from typeguard import check_argument_types
from neuralmonkey.model.model_part import ModelPart, FeedDict, InitializerSpecs
from neuralmonkey.model.stateful import TemporalStateful
from neuralmonkey.vocabulary import Vocabulary
from neuralmonkey.decorators import tensor
from neuralmonkey.dataset import Dataset
from neuralmonkey.tf_utils import get_variable
# pylint: disable=abstract-method
class Sequence(ModelPart, TemporalStateful):
"""Base class for a data sequence.
This abstract class represents a batch of sequences of Tensors of possibly
different lengths.
Sequence is essentialy a temporal stateful object whose states and mask
are fed, or computed from fed values. It is also a ModelPart, and
therefore, it can store variables such as embedding matrices.
"""
def __init__(self,
name: str,
max_length: int = None,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Construct a new `Sequence` object.
Arguments:
name: The name for the `ModelPart` object
max_length: Maximum length of sequences in the object (not checked)
save_checkpoint: The save_checkpoint parameter for `ModelPart`
load_checkpoint: The load_checkpoint parameter for `ModelPart`
"""
ModelPart.__init__(self, name, save_checkpoint, load_checkpoint,
initializers)
self.max_length = max_length
if self.max_length is not None and self.max_length <= 0:
raise ValueError("Max sequence length must be a positive integer.")
# pylint: enable=abstract-method
class EmbeddedFactorSequence(Sequence):
"""A sequence that stores one or more embedded inputs (factors)."""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
vocabularies: List[Vocabulary],
data_ids: List[str],
embedding_sizes: List[int],
max_length: int = None,
add_start_symbol: bool = False,
add_end_symbol: bool = False,
scale_embeddings_by_depth: bool = False,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Construct a new instance of `EmbeddedFactorSequence`.
Takes three lists of vocabularies, data series IDs, and embedding
sizes and construct a `Sequence` object. The supplied lists must be
equal in length and the indices to these lists must correspond
to each other
Arguments:
name: The name for the `ModelPart` object
vocabularies: A list of `Vocabulary` objects used for each factor
data_ids: A list of strings identifying the data series used for
each factor
embedding_sizes: A list of integers specifying the size of the
embedding vector for each factor
max_length: The maximum length of the sequences
add_start_symbol: Includes <s> in the sequence
add_end_symbol: Includes </s> in the sequence
scale_embeddings_by_depth: Set to True for T2T import compatibility
save_checkpoint: The save_checkpoint parameter for `ModelPart`
load_checkpoint: The load_checkpoint parameter for `ModelPart`
"""
check_argument_types()
Sequence.__init__(
self, name, max_length, save_checkpoint, load_checkpoint,
initializers)
self.vocabularies = vocabularies
self.vocabulary_sizes = [len(vocab) for vocab in self.vocabularies]
self.data_ids = data_ids
self.embedding_sizes = embedding_sizes
self.add_start_symbol = add_start_symbol
self.add_end_symbol = add_end_symbol
self.scale_embeddings_by_depth = scale_embeddings_by_depth
if not (len(self.data_ids)
== len(self.vocabularies)
== len(self.embedding_sizes)):
raise ValueError("data_ids, vocabularies, and embedding_sizes "
"lists need to have the same length")
if any([esize <= 0 for esize in self.embedding_sizes]):
raise ValueError("Embedding size must be a positive integer.")
with self.use_scope():
self.mask = tf.placeholder(tf.float32, [None, None], "mask")
self.input_factors = [
tf.placeholder(tf.int32, [None, None], "factor_{}".format(did))
for did in self.data_ids]
# pylint: enable=too-many-arguments
# TODO this should be placed into the abstract embedding class
def tb_embedding_visualization(self, logdir: str,
prj: projector):
"""Link embeddings with vocabulary wordlist.
Used for tensorboard visualization.
Arguments:
logdir: directory where model is stored
projector: TensorBoard projector for storing linking info.
"""
for i in range(len(self.vocabularies)):
# the overriding is turned to true, because if the model would not
# be allowed to override the output folder it would failed earlier.
# TODO when vocabularies will have name parameter, change it
wordlist = os.path.join(logdir, self.name + "_" + str(i) + ".tsv")
self.vocabularies[i].save_wordlist(wordlist, True, True)
embedding = prj.embeddings.add()
# pylint: disable=unsubscriptable-object
embedding.tensor_name = self.embedding_matrices[i].name
embedding.metadata_path = wordlist
@tensor
def embedding_matrices(self) -> List[tf.Tensor]:
"""Return a list of embedding matrices for each factor."""
# Note: Embedding matrices are numbered rather than named by the data
# id so the data_id string does not need to be the same across
# experiments
return [
get_variable(
name="embedding_matrix_{}".format(i),
shape=[vocab_size, emb_size],
initializer=tf.glorot_uniform_initializer())
for i, (data_id, vocab_size, emb_size) in enumerate(zip(
self.data_ids, self.vocabulary_sizes, self.embedding_sizes))]
@tensor
def temporal_states(self) -> tf.Tensor:
"""Return the embedded factors.
A 3D Tensor of shape (batch, time, dimension),
where dimension is the sum of the embedding sizes supplied to the
constructor.
"""
embedded_factors = []
for (factor, embedding_matrix) in zip(
self.input_factors, self.embedding_matrices):
emb_factor = tf.nn.embedding_lookup(embedding_matrix, factor)
# github.com/tensorflow/tensor2tensor/blob/v1.5.6/tensor2tensor/
# layers/modalities.py#L104
if self.scale_embeddings_by_depth:
emb_size = embedding_matrix.shape.as_list()[-1]
emb_factor *= emb_size**0.5
# We explicitly set paddings to zero-value vectors
# TODO: remove unnecessary masking in the subesquent modules
emb_factor = emb_factor * tf.expand_dims(self.mask, -1)
embedded_factors.append(emb_factor)
return tf.concat(embedded_factors, 2)
@tensor
def temporal_mask(self) -> tf.Tensor:
return self.mask
def feed_dict(self, dataset: Dataset, train: bool = False) -> FeedDict:
"""Feed the placholders with the data.
Arguments:
dataset: The dataset.
train: A flag whether the train mode is enabled.
Returns:
The constructed feed dictionary that contains the factor data and
the mask.
"""
fd = {} # type: FeedDict
# for checking the lengths of individual factors
arr_strings = []
last_paddings = None
for factor_plc, name, vocabulary in zip(
self.input_factors, self.data_ids, self.vocabularies):
factors = dataset.get_series(name)
vectors, paddings = vocabulary.sentences_to_tensor(
list(factors), self.max_length, pad_to_max_len=False,
train_mode=train, add_start_symbol=self.add_start_symbol,
add_end_symbol=self.add_end_symbol)
fd[factor_plc] = list(zip(*vectors))
arr_strings.append(paddings.tostring())
last_paddings = paddings
if len(set(arr_strings)) > 1:
raise ValueError("The lenghts of factors do not match")
assert last_paddings is not None
fd[self.mask] = list(zip(*last_paddings))
return fd
class EmbeddedSequence(EmbeddedFactorSequence):
"""A sequence of embedded inputs (for a single factor)."""
# pylint: disable=too-many-arguments
def __init__(self,
name: str,
vocabulary: Vocabulary,
data_id: str,
embedding_size: int,
max_length: int = None,
add_start_symbol: bool = False,
add_end_symbol: bool = False,
scale_embeddings_by_depth: bool = False,
save_checkpoint: str = None,
load_checkpoint: str = None,
initializers: InitializerSpecs = None) -> None:
"""Construct a new instance of `EmbeddedSequence`.
Arguments:
name: The name for the `ModelPart` object
vocabulary: A `Vocabulary` object used for the sequence data
data_id: A string that identifies the data series used for
the sequence data
embedding_sizes: An integer that specifies the size of the
embedding vector for the sequence data
max_length: The maximum length of the sequences
add_start_symbol: Includes <s> in the sequence
add_end_symbol: Includes </s> in the sequence
scale_embeddings_by_depth: Set to True for T2T import compatibility
save_checkpoint: The save_checkpoint parameter for `ModelPart`
load_checkpoint: The load_checkpoint parameter for `ModelPart`
"""
EmbeddedFactorSequence.__init__(
self,
name=name,
vocabularies=[vocabulary],
data_ids=[data_id],
embedding_sizes=[embedding_size],
max_length=max_length,
add_start_symbol=add_start_symbol,
add_end_symbol=add_end_symbol,
scale_embeddings_by_depth=scale_embeddings_by_depth,
save_checkpoint=save_checkpoint,
load_checkpoint=load_checkpoint,
initializers=initializers)
# pylint: enable=too-many-arguments
@property
def inputs(self) -> tf.Tensor:
"""Return a 2D placeholder for the sequence inputs."""
return self.input_factors[0]
# pylint: disable=unsubscriptable-object
@property
def embedding_matrix(self) -> tf.Tensor:
"""Return the embedding matrix for the sequence."""
return self.embedding_matrices[0]
# pylint: enable=unsubscriptable-object
@property
def vocabulary(self) -> Vocabulary:
"""Return the input vocabulary."""
return self.vocabularies[0]
@property
def data_id(self) -> str:
"""Return the input data series indentifier."""
return self.data_ids[0]
|
{
"content_hash": "a3283eaf1386d8af8249db357af33323",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 79,
"avg_line_length": 40.40816326530612,
"alnum_prop": 0.6111952861952862,
"repo_name": "juliakreutzer/bandit-neuralmonkey",
"id": "40ad7e92f77ef310059fbd55132b9798702b5586",
"size": "11880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/model/sequence.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "723799"
},
{
"name": "Shell",
"bytes": "4358"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from dataclasses import dataclass
from copy import deepcopy
import os.path as op
import re
import numpy as np
from ..defaults import HEAD_SIZE_DEFAULT
from .._freesurfer import get_mni_fiducials
from ..viz import plot_montage
from ..transforms import (apply_trans, get_ras_to_neuromag_trans, _sph_to_cart,
_topo_to_sph, _frame_to_str, Transform,
_verbose_frames, _fit_matched_points,
_quat_to_affine, _ensure_trans)
from ..io._digitization import (_count_points_by_type,
_get_dig_eeg, _make_dig_points, write_dig,
_read_dig_fif, _format_dig_points,
_get_fid_coords, _coord_frame_const,
_get_data_as_dict_from_dig)
from ..io.meas_info import create_info
from ..io.open import fiff_open
from ..io.pick import pick_types, _picks_to_idx, channel_type
from ..io.constants import FIFF, CHANNEL_LOC_ALIASES
from ..utils import (warn, copy_function_doc_to_method_doc, _pl, verbose,
_check_option, _validate_type, _check_fname, _on_missing,
fill_doc, _docdict)
from ._dig_montage_utils import _read_dig_montage_egi
from ._dig_montage_utils import _parse_brainvision_dig_montage
@dataclass
class _BuiltinStandardMontage:
name: str
description: str
_BUILTIN_STANDARD_MONTAGES = [
_BuiltinStandardMontage(
name='standard_1005',
description='Electrodes are named and positioned according to the '
'international 10-05 system (343+3 locations)',
),
_BuiltinStandardMontage(
name='standard_1020',
description='Electrodes are named and positioned according to the '
'international 10-20 system (94+3 locations)',
),
_BuiltinStandardMontage(
name='standard_alphabetic',
description='Electrodes are named with LETTER-NUMBER combinations '
'(A1, B2, F4, …) (65+3 locations)',
),
_BuiltinStandardMontage(
name='standard_postfixed',
description='Electrodes are named according to the international '
'10-20 system using postfixes for intermediate positions '
'(100+3 locations)',
),
_BuiltinStandardMontage(
name='standard_prefixed',
description='Electrodes are named according to the international '
'10-20 system using prefixes for intermediate positions '
'(74+3 locations)',
),
_BuiltinStandardMontage(
name='standard_primed',
description="Electrodes are named according to the international "
"10-20 system using prime marks (' and '') for "
"intermediate positions (100+3 locations)",
),
_BuiltinStandardMontage(
name='biosemi16',
description='BioSemi cap with 16 electrodes (16+3 locations)',
),
_BuiltinStandardMontage(
name='biosemi32',
description='BioSemi cap with 32 electrodes (32+3 locations)',
),
_BuiltinStandardMontage(
name='biosemi64',
description='BioSemi cap with 64 electrodes (64+3 locations)',
),
_BuiltinStandardMontage(
name='biosemi128',
description='BioSemi cap with 128 electrodes (128+3 locations)',
),
_BuiltinStandardMontage(
name='biosemi160',
description='BioSemi cap with 160 electrodes (160+3 locations)',
),
_BuiltinStandardMontage(
name='biosemi256',
description='BioSemi cap with 256 electrodes (256+3 locations)',
),
_BuiltinStandardMontage(
name='easycap-M1',
description='EasyCap with 10-05 electrode names (74 locations)',
),
_BuiltinStandardMontage(
name='easycap-M10',
description='EasyCap with numbered electrodes (61 locations)',
),
_BuiltinStandardMontage(
name='EGI_256',
description='Geodesic Sensor Net (256 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-32',
description='HydroCel Geodesic Sensor Net and Cz (33+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-64_1.0',
description='HydroCel Geodesic Sensor Net (64+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-65_1.0',
description='HydroCel Geodesic Sensor Net and Cz (65+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-128',
description='HydroCel Geodesic Sensor Net (128+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-129',
description='HydroCel Geodesic Sensor Net and Cz (129+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-256',
description='HydroCel Geodesic Sensor Net (256+3 locations)',
),
_BuiltinStandardMontage(
name='GSN-HydroCel-257',
description='HydroCel Geodesic Sensor Net and Cz (257+3 locations)',
),
_BuiltinStandardMontage(
name='mgh60',
description='The (older) 60-channel cap used at MGH (60+3 locations)',
),
_BuiltinStandardMontage(
name='mgh70',
description='The (newer) 70-channel BrainVision cap used at MGH '
'(70+3 locations)',
),
_BuiltinStandardMontage(
name='artinis-octamon',
description='Artinis OctaMon fNIRS (8 sources, 2 detectors)',
),
_BuiltinStandardMontage(
name='artinis-brite23',
description='Artinis Brite23 fNIRS (11 sources, 7 detectors)',
),
_BuiltinStandardMontage(
name='brainproducts-RNP-BA-128',
description='Brain Products with 10-10 electrode names (128 channels)',
)
]
def _check_get_coord_frame(dig):
dig_coord_frames = sorted(set(d['coord_frame'] for d in dig))
if len(dig_coord_frames) != 1:
raise RuntimeError(
'Only a single coordinate frame in dig is supported, got '
f'{dig_coord_frames}')
return _frame_to_str[dig_coord_frames.pop()] if dig_coord_frames else None
def get_builtin_montages(*, descriptions=False):
"""Get a list of all standard montages shipping with MNE-Python.
The names of the montages can be passed to :func:`make_standard_montage`.
Parameters
----------
descriptions : bool
Whether to return not only the montage names, but also their
corresponding descriptions. If ``True``, a list of tuples is returned,
where the first tuple element is the montage name and the second is
the montage description. If ``False`` (default), only the names are
returned.
.. versionadded:: 1.1
Returns
-------
montages : list of str | list of tuple
If ``descriptions=False``, the names of all builtin montages that can
be used by :func:`make_standard_montage`.
If ``descriptions=True``, a list of tuples ``(name, description)``.
"""
if descriptions:
return [
(m.name, m.description) for m in _BUILTIN_STANDARD_MONTAGES
]
else:
return [m.name for m in _BUILTIN_STANDARD_MONTAGES]
def make_dig_montage(ch_pos=None, nasion=None, lpa=None, rpa=None,
hsp=None, hpi=None, coord_frame='unknown'):
r"""Make montage from arrays.
Parameters
----------
ch_pos : dict | None
Dictionary of channel positions. Keys are channel names and values
are 3D coordinates - array of shape (3,) - in native digitizer space
in m.
nasion : None | array, shape (3,)
The position of the nasion fiducial point.
This point is assumed to be in the native digitizer space in m.
lpa : None | array, shape (3,)
The position of the left periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
rpa : None | array, shape (3,)
The position of the right periauricular fiducial point.
This point is assumed to be in the native digitizer space in m.
hsp : None | array, shape (n_points, 3)
This corresponds to an array of positions of the headshape points in
3d. These points are assumed to be in the native digitizer space in m.
hpi : None | array, shape (n_hpi, 3)
This corresponds to an array of HPI points in the native digitizer
space. They only necessary if computation of a ``compute_dev_head_t``
is True.
coord_frame : str
The coordinate frame of the points. Usually this is ``'unknown'``
for native digitizer space.
Other valid values are: ``'head'``, ``'meg'``, ``'mri'``,
``'mri_voxel'``, ``'mri_tal'``, ``'ras'``, ``'fs_tal'``,
``'ctf_head'``, and ``'ctf_meg'``.
.. note::
For custom montages without fiducials, this parameter must be set
to ``'head'``.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_egi
read_dig_fif
read_dig_localite
read_dig_polhemus_isotrak
"""
_validate_type(ch_pos, (dict, None), 'ch_pos')
if ch_pos is None:
ch_names = None
else:
ch_names = list(ch_pos)
dig = _make_dig_points(
nasion=nasion, lpa=lpa, rpa=rpa, hpi=hpi, extra_points=hsp,
dig_ch_pos=ch_pos, coord_frame=coord_frame
)
return DigMontage(dig=dig, ch_names=ch_names)
class DigMontage(object):
"""Montage for digitized electrode and headshape position data.
.. warning:: Montages are typically created using one of the helper
functions in the ``See Also`` section below instead of
instantiating this class directly.
Parameters
----------
dig : list of dict
The object containing all the dig points.
ch_names : list of str
The names of the EEG channels.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
.. versionadded:: 0.9.0
"""
def __init__(self, *, dig=None, ch_names=None):
dig = list() if dig is None else dig
_validate_type(item=dig, types=list, item_name='dig')
ch_names = list() if ch_names is None else ch_names
n_eeg = sum([1 for d in dig if d['kind'] == FIFF.FIFFV_POINT_EEG])
if n_eeg != len(ch_names):
raise ValueError(
'The number of EEG channels (%d) does not match the number'
' of channel names provided (%d)' % (n_eeg, len(ch_names))
)
self.dig = dig
self.ch_names = ch_names
def __repr__(self):
"""Return string representation."""
n_points = _count_points_by_type(self.dig)
return ('<DigMontage | {extra:d} extras (headshape), {hpi:d} HPIs,'
' {fid:d} fiducials, {eeg:d} channels>').format(**n_points)
@copy_function_doc_to_method_doc(plot_montage)
def plot(self, scale_factor=20, show_names=True, kind='topomap', show=True,
sphere=None, verbose=None):
return plot_montage(self, scale_factor=scale_factor,
show_names=show_names, kind=kind, show=show,
sphere=sphere)
@fill_doc
def rename_channels(self, mapping, allow_duplicates=False):
"""Rename the channels.
Parameters
----------
%(mapping_rename_channels_duplicates)s
Returns
-------
inst : instance of DigMontage
The instance. Operates in-place.
"""
from .channels import rename_channels
temp_info = create_info(list(self._get_ch_pos()), 1000., 'eeg')
rename_channels(temp_info, mapping, allow_duplicates)
self.ch_names = temp_info['ch_names']
@verbose
def save(self, fname, *, overwrite=False, verbose=None):
"""Save digitization points to FIF.
Parameters
----------
fname : path-like
The filename to use. Should end in .fif or .fif.gz.
%(overwrite)s
%(verbose)s
"""
coord_frame = _check_get_coord_frame(self.dig)
write_dig(fname, self.dig, coord_frame, overwrite=overwrite)
def __iadd__(self, other):
"""Add two DigMontages in place.
Notes
-----
Two DigMontages can only be added if there are no duplicated ch_names
and if fiducials are present they should share the same coordinate
system and location values.
"""
def is_fid_defined(fid):
return not (
fid.nasion is None and fid.lpa is None and fid.rpa is None
)
# Check for none duplicated ch_names
ch_names_intersection = set(self.ch_names).intersection(other.ch_names)
if ch_names_intersection:
raise RuntimeError((
"Cannot add two DigMontage objects if they contain duplicated"
" channel names. Duplicated channel(s) found: {}."
).format(
', '.join(['%r' % v for v in sorted(ch_names_intersection)])
))
# Check for unique matching fiducials
self_fid, self_coord = _get_fid_coords(self.dig)
other_fid, other_coord = _get_fid_coords(other.dig)
if is_fid_defined(self_fid) and is_fid_defined(other_fid):
if self_coord != other_coord:
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations are not in the same '
'coordinate system.')
for kk in self_fid:
if not np.array_equal(self_fid[kk], other_fid[kk]):
raise RuntimeError('Cannot add two DigMontage objects if '
'fiducial locations do not match '
'(%s)' % kk)
# keep self
self.dig = _format_dig_points(
self.dig + [d for d in other.dig
if d['kind'] != FIFF.FIFFV_POINT_CARDINAL]
)
else:
self.dig = _format_dig_points(self.dig + other.dig)
self.ch_names += other.ch_names
return self
def copy(self):
"""Copy the DigMontage object.
Returns
-------
dig : instance of DigMontage
The copied DigMontage instance.
"""
return deepcopy(self)
def __add__(self, other):
"""Add two DigMontages."""
out = self.copy()
out += other
return out
def __eq__(self, other):
"""Compare different DigMontage objects for equality.
Returns
-------
Boolean output from comparison of .dig
"""
return self.dig == other.dig and self.ch_names == other.ch_names
def _get_ch_pos(self):
pos = [d['r'] for d in _get_dig_eeg(self.dig)]
assert len(self.ch_names) == len(pos)
return OrderedDict(zip(self.ch_names, pos))
def _get_dig_names(self):
NAMED_KIND = (FIFF.FIFFV_POINT_EEG,)
is_eeg = np.array([d['kind'] in NAMED_KIND for d in self.dig])
assert len(self.ch_names) == is_eeg.sum()
dig_names = [None] * len(self.dig)
for ch_name_idx, dig_idx in enumerate(np.where(is_eeg)[0]):
dig_names[dig_idx] = self.ch_names[ch_name_idx]
return dig_names
def get_positions(self):
"""Get all channel and fiducial positions.
Returns
-------
positions : dict
A dictionary of the positions for channels (``ch_pos``),
coordinate frame (``coord_frame``), nasion (``nasion``),
left preauricular point (``lpa``),
right preauricular point (``rpa``),
Head Shape Polhemus (``hsp``), and
Head Position Indicator(``hpi``).
E.g.::
{
'ch_pos': {'EEG061': [0, 0, 0]},
'nasion': [0, 0, 1],
'coord_frame': 'mni_tal',
'lpa': [0, 1, 0],
'rpa': [1, 0, 0],
'hsp': None,
'hpi': None
}
"""
# get channel positions as dict
ch_pos = self._get_ch_pos()
# get coordframe and fiducial coordinates
montage_bunch = _get_data_as_dict_from_dig(self.dig)
coord_frame = _frame_to_str.get(montage_bunch.coord_frame)
# return dictionary
positions = dict(
ch_pos=ch_pos,
coord_frame=coord_frame,
nasion=montage_bunch.nasion,
lpa=montage_bunch.lpa,
rpa=montage_bunch.rpa,
hsp=montage_bunch.hsp,
hpi=montage_bunch.hpi,
)
return positions
@verbose
def apply_trans(self, trans, verbose=None):
"""Apply a transformation matrix to the montage.
Parameters
----------
trans : instance of mne.transforms.Transform
The transformation matrix to be applied.
%(verbose)s
"""
_validate_type(trans, Transform, 'trans')
coord_frame = self.get_positions()['coord_frame']
trans = _ensure_trans(trans, fro=coord_frame, to=trans['to'])
for d in self.dig:
d['r'] = apply_trans(trans, d['r'])
d['coord_frame'] = trans['to']
@verbose
def add_estimated_fiducials(self, subject, subjects_dir=None,
verbose=None):
"""Estimate fiducials based on FreeSurfer ``fsaverage`` subject.
This takes a montage with the ``mri`` coordinate frame,
corresponding to the FreeSurfer RAS (xyz in the volume) T1w
image of the specific subject. It will call
:func:`mne.coreg.get_mni_fiducials` to estimate LPA, RPA and
Nasion fiducial points.
Parameters
----------
%(subject)s
%(subjects_dir)s
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
See Also
--------
:ref:`tut-source-alignment`
Notes
-----
Since MNE uses the FIF data structure, it relies on the ``head``
coordinate frame. Any coordinate frame can be transformed
to ``head`` if the fiducials (i.e. LPA, RPA and Nasion) are
defined. One can use this function to estimate those fiducials
and then use ``mne.channels.compute_native_head_t(montage)``
to get the head <-> MRI transform.
"""
# get coordframe and fiducial coordinates
montage_bunch = _get_data_as_dict_from_dig(self.dig)
# get the coordinate frame and check that it's MRI
if montage_bunch.coord_frame != FIFF.FIFFV_COORD_MRI:
raise RuntimeError(
f'Montage should be in the "mri" coordinate frame '
f'to use `add_estimated_fiducials`. The current coordinate '
f'frame is {montage_bunch.coord_frame}')
# estimate LPA, nasion, RPA from FreeSurfer fsaverage
fids_mri = list(get_mni_fiducials(subject, subjects_dir))
# add those digpoints to front of montage
self.dig = fids_mri + self.dig
return self
@verbose
def add_mni_fiducials(self, subjects_dir=None, verbose=None):
"""Add fiducials to a montage in MNI space.
Parameters
----------
%(subjects_dir)s
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
Notes
-----
``fsaverage`` is in MNI space and so its fiducials can be
added to a montage in "mni_tal". MNI is an ACPC-aligned
coordinate system (the posterior commissure is the origin)
so since BIDS requires channel locations for ECoG, sEEG and
DBS to be in ACPC space, this function can be used to allow
those coordinate to be transformed to "head" space (origin
between LPA and RPA).
"""
montage_bunch = _get_data_as_dict_from_dig(self.dig)
# get the coordinate frame and check that it's MNI TAL
if montage_bunch.coord_frame != FIFF.FIFFV_MNE_COORD_MNI_TAL:
raise RuntimeError(
f'Montage should be in the "mni_tal" coordinate frame '
f'to use `add_estimated_fiducials`. The current coordinate '
f'frame is {montage_bunch.coord_frame}')
fids_mni = get_mni_fiducials('fsaverage', subjects_dir)
for fid in fids_mni:
# "mri" and "mni_tal" are equivalent for fsaverage
assert fid['coord_frame'] == FIFF.FIFFV_COORD_MRI
fid['coord_frame'] = FIFF.FIFFV_MNE_COORD_MNI_TAL
self.dig = fids_mni + self.dig
return self
@verbose
def remove_fiducials(self, verbose=None):
"""Remove the fiducial points from a montage.
Parameters
----------
%(verbose)s
Returns
-------
inst : instance of DigMontage
The instance, modified in-place.
Notes
-----
MNE will transform a montage to the internal "head" coordinate
frame if the fiducials are present. Under most circumstances, this
is ideal as it standardizes the coordinate frame for things like
plotting. However, in some circumstances, such as saving a ``raw``
with intracranial data to BIDS format, the coordinate frame
should not be changed by removing fiducials.
"""
for d in self.dig.copy():
if d['kind'] == FIFF.FIFFV_POINT_CARDINAL:
self.dig.remove(d)
return self
VALID_SCALES = dict(mm=1e-3, cm=1e-2, m=1)
def _check_unit_and_get_scaling(unit):
_check_option('unit', unit, sorted(VALID_SCALES.keys()))
return VALID_SCALES[unit]
def transform_to_head(montage):
"""Transform a DigMontage object into head coordinate.
It requires that the LPA, RPA and Nasion fiducial
point are available. It requires that all fiducial
points are in the same coordinate e.g. 'unknown'
and it will convert all the point in this coordinate
system to Neuromag head coordinate system.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
montage : instance of DigMontage
The montage after transforming the points to head
coordinate system.
"""
# Get fiducial points and their coord_frame
native_head_t = compute_native_head_t(montage)
montage = montage.copy() # to avoid inplace modification
if native_head_t['from'] != FIFF.FIFFV_COORD_HEAD:
for d in montage.dig:
if d['coord_frame'] == native_head_t['from']:
d['r'] = apply_trans(native_head_t, d['r'])
d['coord_frame'] = FIFF.FIFFV_COORD_HEAD
return montage
def read_dig_dat(fname):
r"""Read electrode positions from a ``*.dat`` file.
.. Warning::
This function was implemented based on ``*.dat`` files available from
`Compumedics <https://compumedicsneuroscan.com/scan-acquire-
configuration-files/>`__ and might not work as expected with novel
files. If it does not read your files correctly please contact the
mne-python developers.
Parameters
----------
fname : path-like
File from which to read electrode locations.
Returns
-------
montage : DigMontage
The montage.
See Also
--------
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
``*.dat`` files are plain text files and can be inspected and amended with
a plain text editor.
"""
from ._standard_montage_utils import _check_dupes_odict
fname = _check_fname(fname, overwrite='read', must_exist=True)
with open(fname, 'r') as fid:
lines = fid.readlines()
ch_names, poss = list(), list()
nasion = lpa = rpa = None
for i, line in enumerate(lines):
items = line.split()
if not items:
continue
elif len(items) != 5:
raise ValueError(
"Error reading %s, line %s has unexpected number of entries:\n"
"%s" % (fname, i, line.rstrip()))
num = items[1]
if num == '67':
continue # centroid
pos = np.array([float(item) for item in items[2:]])
if num == '78':
nasion = pos
elif num == '76':
lpa = pos
elif num == '82':
rpa = pos
else:
ch_names.append(items[0])
poss.append(pos)
electrodes = _check_dupes_odict(ch_names, poss)
return make_dig_montage(electrodes, nasion, lpa, rpa)
def read_dig_fif(fname):
r"""Read digitized points from a .fif file.
Note that electrode names are not present in the .fif file so
they are here defined with the convention from VectorView
systems (EEG001, EEG002, etc.)
Parameters
----------
fname : path-like
FIF file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_captrak
read_dig_polhemus_isotrak
read_dig_hpts
read_dig_localite
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
# Load the dig data
f, tree = fiff_open(fname)[:2]
with f as fid:
dig = _read_dig_fif(fid, tree)
ch_names = []
for d in dig:
if d['kind'] == FIFF.FIFFV_POINT_EEG:
ch_names.append('EEG%03d' % d['ident'])
montage = DigMontage(dig=dig, ch_names=ch_names)
return montage
def read_dig_hpts(fname, unit='mm'):
"""Read historical .hpts mne-c files.
Parameters
----------
fname : path-like
The filepath of .hpts file.
unit : 'm' | 'cm' | 'mm'
Unit of the positions. Defaults to 'mm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
Notes
-----
The hpts format digitzer data file may contain comment lines starting
with the pound sign (#) and data lines of the form::
<*category*> <*identifier*> <*x/mm*> <*y/mm*> <*z/mm*>
where:
``<*category*>``
defines the type of points. Allowed categories are: ``hpi``,
``cardinal`` (fiducial), ``eeg``, and ``extra`` corresponding to
head-position indicator coil locations, cardinal landmarks, EEG
electrode locations, and additional head surface points,
respectively.
``<*identifier*>``
identifies the point. The identifiers are usually sequential
numbers. For cardinal landmarks, 1 = left auricular point,
2 = nasion, and 3 = right auricular point. For EEG electrodes,
identifier = 0 signifies the reference electrode.
``<*x/mm*> , <*y/mm*> , <*z/mm*>``
Location of the point, usually in the head coordinate system
in millimeters. If your points are in [m] then unit parameter can
be changed.
For example::
cardinal 2 -5.6729 -12.3873 -30.3671
cardinal 1 -37.6782 -10.4957 91.5228
cardinal 3 -131.3127 9.3976 -22.2363
hpi 1 -30.4493 -11.8450 83.3601
hpi 2 -122.5353 9.2232 -28.6828
hpi 3 -6.8518 -47.0697 -37.0829
hpi 4 7.3744 -50.6297 -12.1376
hpi 5 -33.4264 -43.7352 -57.7756
eeg FP1 3.8676 -77.0439 -13.0212
eeg FP2 -31.9297 -70.6852 -57.4881
eeg F7 -6.1042 -68.2969 45.4939
...
"""
from ._standard_montage_utils import _str_names, _str
fname = _check_fname(fname, overwrite='read', must_exist=True)
_scale = _check_unit_and_get_scaling(unit)
out = np.genfromtxt(fname, comments='#',
dtype=(_str, _str, 'f8', 'f8', 'f8'))
kind, label = _str_names(out['f0']), _str_names(out['f1'])
kind = [k.lower() for k in kind]
xyz = np.array([out['f%d' % ii] for ii in range(2, 5)]).T
xyz *= _scale
del _scale
fid_idx_to_label = {'1': 'lpa', '2': 'nasion', '3': 'rpa'}
fid = {fid_idx_to_label[label[ii]]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'cardinal'}
ch_pos = {label[ii]: this_xyz
for ii, this_xyz in enumerate(xyz) if kind[ii] == 'eeg'}
hpi = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'hpi'])
hpi.shape = (-1, 3) # in case it's empty
hsp = np.array([this_xyz for ii, this_xyz in enumerate(xyz)
if kind[ii] == 'extra'])
hsp.shape = (-1, 3) # in case it's empty
return make_dig_montage(ch_pos=ch_pos, **fid, hpi=hpi, hsp=hsp)
def read_dig_egi(fname):
"""Read electrode locations from EGI system.
Parameters
----------
fname : path-like
EGI MFF XML coordinates file from which to read digitization locations.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _read_dig_montage_egi(
fname=fname,
_scaling=1.,
_all_data_kwargs_are_none=True
)
return make_dig_montage(**data)
def read_dig_captrak(fname):
"""Read electrode locations from CapTrak Brain Products system.
Parameters
----------
fname : path-like
BrainVision CapTrak coordinates file from which to read EEG electrode
locations. This is typically in XML format with the .bvct extension.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_localite
read_dig_polhemus_isotrak
make_dig_montage
"""
_check_fname(fname, overwrite='read', must_exist=True)
data = _parse_brainvision_dig_montage(fname, scale=1e-3)
return make_dig_montage(**data)
def read_dig_localite(fname, nasion=None, lpa=None, rpa=None):
"""Read Localite .csv file.
Parameters
----------
fname : path-like
File name.
nasion : str | None
Name of nasion fiducial point.
lpa : str | None
Name of left preauricular fiducial point.
rpa : str | None
Name of right preauricular fiducial point.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_hpts
read_dig_polhemus_isotrak
make_dig_montage
"""
ch_pos = {}
with open(fname) as f:
f.readline() # skip first row
for row in f:
_, name, x, y, z = row.split(",")
ch_pos[name] = np.array((float(x), float(y), float(z))) / 1000
if nasion is not None:
nasion = ch_pos.pop(nasion)
if lpa is not None:
lpa = ch_pos.pop(lpa)
if rpa is not None:
rpa = ch_pos.pop(rpa)
return make_dig_montage(ch_pos, nasion, lpa, rpa)
def _get_montage_in_head(montage):
coords = set([d['coord_frame'] for d in montage.dig])
if len(coords) == 1 and coords.pop() == FIFF.FIFFV_COORD_HEAD:
return montage
else:
return transform_to_head(montage.copy())
def _set_montage_fnirs(info, montage):
"""Set the montage for fNIRS data.
This needs to be different to electrodes as each channel has three
coordinates that need to be set. For each channel there is a source optode
location, a detector optode location, and a channel midpoint that must be
stored. This function modifies info['chs'][#]['loc'] and info['dig'] in
place.
"""
from ..preprocessing.nirs import _validate_nirs_info
# Validate that the fNIRS info is correctly formatted
picks = _validate_nirs_info(info)
# Modify info['chs'][#]['loc'] in place
num_ficiduals = len(montage.dig) - len(montage.ch_names)
for ch_idx in picks:
ch = info['chs'][ch_idx]['ch_name']
source, detector = ch.split(' ')[0].split('_')
source_pos = montage.dig[montage.ch_names.index(source)
+ num_ficiduals]['r']
detector_pos = montage.dig[montage.ch_names.index(detector)
+ num_ficiduals]['r']
info['chs'][ch_idx]['loc'][3:6] = source_pos
info['chs'][ch_idx]['loc'][6:9] = detector_pos
midpoint = (source_pos + detector_pos) / 2
info['chs'][ch_idx]['loc'][:3] = midpoint
info['chs'][ch_idx]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# Modify info['dig'] in place
with info._unlock():
info['dig'] = montage.dig
@fill_doc
def _set_montage(info, montage, match_case=True, match_alias=False,
on_missing='raise'):
"""Apply montage to data.
With a DigMontage, this function will replace the digitizer info with
the values specified for the particular montage.
Usually, a montage is expected to contain the positions of all EEG
electrodes and a warning is raised when this is not the case.
Parameters
----------
%(info_not_none)s
%(montage)s
%(match_case)s
%(match_alias)s
%(on_missing_montage)s
Notes
-----
This function will change the info variable in place.
"""
_validate_type(montage, (DigMontage, None, str), 'montage')
if montage is None:
# Next line modifies info['dig'] in place
with info._unlock():
info['dig'] = None
for ch in info['chs']:
# Next line modifies info['chs'][#]['loc'] in place
ch['loc'] = np.full(12, np.nan)
return
if isinstance(montage, str): # load builtin montage
_check_option(
parameter='montage', value=montage,
allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES]
)
montage = make_standard_montage(montage)
mnt_head = _get_montage_in_head(montage)
del montage
def _backcompat_value(pos, ref_pos):
if any(np.isnan(pos)):
return np.full(6, np.nan)
else:
return np.concatenate((pos, ref_pos))
# get the channels in the montage in head
ch_pos = mnt_head._get_ch_pos()
# only get the eeg, seeg, dbs, ecog channels
picks = pick_types(
info, meg=False, eeg=True, seeg=True, dbs=True, ecog=True,
exclude=())
non_picks = np.setdiff1d(np.arange(info['nchan']), picks)
# get the reference position from the loc[3:6]
chs = [info['chs'][ii] for ii in picks]
non_names = [info['chs'][ii]['ch_name'] for ii in non_picks]
del picks
ref_pos = [ch['loc'][3:6] for ch in chs]
# keep reference location from EEG-like channels if they
# already exist and are all the same.
custom_eeg_ref_dig = False
# Note: ref position is an empty list for fieldtrip data
if ref_pos:
if all([np.equal(ref_pos[0], pos).all() for pos in ref_pos]) \
and not np.equal(ref_pos[0], [0, 0, 0]).all():
eeg_ref_pos = ref_pos[0]
# since we have an EEG reference position, we have
# to add it into the info['dig'] as EEG000
custom_eeg_ref_dig = True
if not custom_eeg_ref_dig:
refs = set(ch_pos) & {'EEG000', 'REF'}
assert len(refs) <= 1
eeg_ref_pos = np.zeros(3) if not refs else ch_pos.pop(refs.pop())
# This raises based on info being subset/superset of montage
info_names = [ch['ch_name'] for ch in chs]
dig_names = mnt_head._get_dig_names()
ref_names = [None, 'EEG000', 'REF']
if match_case:
info_names_use = info_names
dig_names_use = dig_names
non_names_use = non_names
else:
ch_pos_use = OrderedDict(
(name.lower(), pos) for name, pos in ch_pos.items())
info_names_use = [name.lower() for name in info_names]
dig_names_use = [name.lower() if name is not None else name
for name in dig_names]
non_names_use = [name.lower() for name in non_names]
ref_names = [name.lower() if name is not None else name
for name in ref_names]
n_dup = len(ch_pos) - len(ch_pos_use)
if n_dup:
raise ValueError('Cannot use match_case=False as %s montage '
'name(s) require case sensitivity' % n_dup)
n_dup = len(info_names_use) - len(set(info_names_use))
if n_dup:
raise ValueError('Cannot use match_case=False as %s channel '
'name(s) require case sensitivity' % n_dup)
ch_pos = ch_pos_use
del ch_pos_use
del dig_names
# use lookup table to match unrecognized channel names to known aliases
if match_alias:
alias_dict = (match_alias if isinstance(match_alias, dict) else
CHANNEL_LOC_ALIASES)
if not match_case:
alias_dict = {
ch_name.lower(): ch_alias.lower()
for ch_name, ch_alias in alias_dict.items()
}
# excluded ch_alias not in info, to prevent unnecessary mapping and
# warning messages based on aliases.
alias_dict = {
ch_name: ch_alias
for ch_name, ch_alias in alias_dict.items()
}
info_names_use = [
alias_dict.get(ch_name, ch_name) for ch_name in info_names_use
]
non_names_use = [
alias_dict.get(ch_name, ch_name) for ch_name in non_names_use
]
# warn user if there is not a full overlap of montage with info_chs
missing = np.where([use not in ch_pos for use in info_names_use])[0]
if len(missing): # DigMontage is subset of info
missing_names = [info_names[ii] for ii in missing]
missing_coord_msg = (
'DigMontage is only a subset of info. There are '
f'{len(missing)} channel position{_pl(missing)} '
'not present in the DigMontage. The required channels are:\n\n'
f'{missing_names}.\n\nConsider using inst.set_channel_types '
'if these are not EEG channels, or use the on_missing '
'parameter if the channel positions are allowed to be unknown '
'in your analyses.'
)
_on_missing(on_missing, missing_coord_msg)
# set ch coordinates and names from digmontage or nan coords
for ii in missing:
ch_pos[info_names_use[ii]] = [np.nan] * 3
del info_names
assert len(non_names_use) == len(non_names)
# There are no issues here with fNIRS being in non_names_use because
# these names are like "D1_S1_760" and the ch_pos for a fNIRS montage
# will have entries "D1" and "S1".
extra = np.where([non in ch_pos for non in non_names_use])[0]
if len(extra):
types = '/'.join(sorted(set(
channel_type(info, non_picks[ii]) for ii in extra)))
names = [non_names[ii] for ii in extra]
warn(f'Not setting position{_pl(extra)} of {len(extra)} {types} '
f'channel{_pl(extra)} found in montage:\n{names}\n'
'Consider setting the channel types to be of '
f'{_docdict["montage_types"]} '
'using inst.set_channel_types before calling inst.set_montage, '
'or omit these channels when creating your montage.')
for ch, use in zip(chs, info_names_use):
# Next line modifies info['chs'][#]['loc'] in place
if use in ch_pos:
ch['loc'][:6] = _backcompat_value(ch_pos[use], eeg_ref_pos)
ch['coord_frame'] = FIFF.FIFFV_COORD_HEAD
del ch_pos
# XXX this is probably wrong as it uses the order from the montage
# rather than the order of our info['ch_names'] ...
digpoints = [
mnt_head.dig[ii] for ii, name in enumerate(dig_names_use)
if name in (info_names_use + ref_names)]
# get a copy of the old dig
if info['dig'] is not None:
old_dig = info['dig'].copy()
else:
old_dig = []
# determine if needed to add an extra EEG REF DigPoint
if custom_eeg_ref_dig:
# ref_name = 'EEG000' if match_case else 'eeg000'
ref_dig_dict = {'kind': FIFF.FIFFV_POINT_EEG,
'r': eeg_ref_pos,
'ident': 0,
'coord_frame': info['dig'].pop()['coord_frame']}
ref_dig_point = _format_dig_points([ref_dig_dict])[0]
# only append the reference dig point if it was already
# in the old dig
if ref_dig_point in old_dig:
digpoints.append(ref_dig_point)
# Next line modifies info['dig'] in place
with info._unlock():
info['dig'] = _format_dig_points(digpoints, enforce_order=True)
# Handle fNIRS with source, detector and channel
fnirs_picks = _picks_to_idx(info, 'fnirs', allow_empty=True)
if len(fnirs_picks) > 0:
_set_montage_fnirs(info, mnt_head)
def _read_isotrak_elp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.elp`` file.
Parameters
----------
fname : str
The filepath of .elp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
value_pattern = r"\-?\d+\.?\d*e?\-?\d*"
coord_pattern = r"({0})\s+({0})\s+({0})\s*$".format(value_pattern)
with open(fname) as fid:
file_str = fid.read()
points_str = [m.groups() for m in re.finditer(coord_pattern, file_str,
re.MULTILINE)]
points = np.array(points_str, dtype=float)
return {
'nasion': points[0], 'lpa': points[1], 'rpa': points[2],
'points': points[3:]
}
def _read_isotrak_hsp_points(fname):
"""Read Polhemus Isotrak digitizer data from a ``.hsp`` file.
Parameters
----------
fname : str
The filepath of .hsp Polhemus Isotrak file.
Returns
-------
out : dict of arrays
The dictionary containing locations for 'nasion', 'lpa', 'rpa'
and 'points'.
"""
def get_hsp_fiducial(line):
return np.fromstring(line.replace('%F', ''), dtype=float, sep='\t')
with open(fname) as ff:
for line in ff:
if 'position of fiducials' in line.lower():
break
nasion = get_hsp_fiducial(ff.readline())
lpa = get_hsp_fiducial(ff.readline())
rpa = get_hsp_fiducial(ff.readline())
_ = ff.readline()
line = ff.readline()
if line:
n_points, n_cols = np.fromstring(line, dtype=int, sep='\t')
points = np.fromstring(
string=ff.read(), dtype=float, sep='\t',
).reshape(-1, n_cols)
assert points.shape[0] == n_points
else:
points = np.empty((0, 3))
return {
'nasion': nasion, 'lpa': lpa, 'rpa': rpa, 'points': points
}
def read_dig_polhemus_isotrak(fname, ch_names=None, unit='m'):
"""Read Polhemus digitizer data from a file.
Parameters
----------
fname : path-like
The filepath of Polhemus ISOTrak formatted file.
File extension is expected to be '.hsp', '.elp' or '.eeg'.
ch_names : None | list of str
The names of the points. This will make the points
considered as EEG channels. If None, channels will be assumed
to be HPI if the extension is ``'.elp'``, and extra headshape
points otherwise.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus ISOTrak systems data is usually
exported in meters. Defaults to 'm'.
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
DigMontage
make_dig_montage
read_polhemus_fastscan
read_dig_captrak
read_dig_dat
read_dig_egi
read_dig_fif
read_dig_localite
"""
VALID_FILE_EXT = ('.hsp', '.elp', '.eeg')
fname = _check_fname(fname, overwrite='read', must_exist=True)
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if ext == '.elp':
data = _read_isotrak_elp_points(fname)
else:
# Default case we read points as hsp since is the most likely scenario
data = _read_isotrak_hsp_points(fname)
if _scale != 1:
data = {key: val * _scale for key, val in data.items()}
else:
pass # noqa
if ch_names is None:
keyword = 'hpi' if ext == '.elp' else 'hsp'
data[keyword] = data.pop('points')
else:
points = data.pop('points')
if points.shape[0] == len(ch_names):
data['ch_pos'] = OrderedDict(zip(ch_names, points))
else:
raise ValueError((
"Length of ``ch_names`` does not match the number of points"
" in {fname}. Expected ``ch_names`` length {n_points:d},"
" given {n_chnames:d}"
).format(
fname=fname, n_points=points.shape[0], n_chnames=len(ch_names)
))
return make_dig_montage(**data)
def _is_polhemus_fastscan(fname):
header = ''
with open(fname, 'r') as fid:
for line in fid:
if not line.startswith('%'):
break
header += line
return 'FastSCAN' in header
@verbose
def read_polhemus_fastscan(fname, unit='mm', on_header_missing='raise', *,
verbose=None):
"""Read Polhemus FastSCAN digitizer data from a ``.txt`` file.
Parameters
----------
fname : path-like
The path of .txt Polhemus FastSCAN file.
unit : 'm' | 'cm' | 'mm'
Unit of the digitizer file. Polhemus FastSCAN systems data is usually
exported in millimeters. Defaults to 'mm'.
%(on_header_missing)s
%(verbose)s
Returns
-------
points : array, shape (n_points, 3)
The digitization points in digitizer coordinates.
See Also
--------
read_dig_polhemus_isotrak
make_dig_montage
"""
VALID_FILE_EXT = ['.txt']
fname = _check_fname(fname, overwrite='read', must_exist=True)
_scale = _check_unit_and_get_scaling(unit)
_, ext = op.splitext(fname)
_check_option('fname', ext, VALID_FILE_EXT)
if not _is_polhemus_fastscan(fname):
msg = "%s does not contain a valid Polhemus FastSCAN header" % fname
_on_missing(on_header_missing, msg)
points = _scale * np.loadtxt(fname, comments='%', ndmin=2)
_check_dig_shape(points)
return points
def _read_eeglab_locations(fname):
ch_names = np.genfromtxt(fname, dtype=str, usecols=3).tolist()
topo = np.loadtxt(fname, dtype=float, usecols=[1, 2])
sph = _topo_to_sph(topo)
pos = _sph_to_cart(sph)
pos[:, [0, 1]] = pos[:, [1, 0]] * [-1, 1]
return ch_names, pos
def read_custom_montage(fname, head_size=HEAD_SIZE_DEFAULT, coord_frame=None):
"""Read a montage from a file.
Parameters
----------
fname : path-like
File extension is expected to be:
'.loc' or '.locs' or '.eloc' (for EEGLAB files),
'.sfp' (BESA/EGI files), '.csd',
'.elc', '.txt', '.csd', '.elp' (BESA spherical),
'.bvef' (BrainVision files),
'.csv', '.tsv', '.xyz' (XYZ coordinates).
head_size : float | None
The size of the head (radius, in [m]). If ``None``, returns the values
read from the montage file with no modification. Defaults to 0.095m.
coord_frame : str | None
The coordinate frame of the points. Usually this is "unknown"
for native digitizer space. Defaults to None, which is "unknown" for
most readers but "head" for EEGLAB.
.. versionadded:: 0.20
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
make_dig_montage
make_standard_montage
Notes
-----
The function is a helper to read electrode positions you may have
in various formats. Most of these format are weakly specified
in terms of units, coordinate systems. It implies that setting
a montage using a DigMontage produced by this function may
be problematic. If you use a standard/template (eg. 10/20,
10/10 or 10/05) we recommend you use :func:`make_standard_montage`.
If you can have positions in memory you can also use
:func:`make_dig_montage` that takes arrays as input.
"""
from ._standard_montage_utils import (
_read_theta_phi_in_degrees, _read_sfp, _read_csd, _read_elc,
_read_elp_besa, _read_brainvision, _read_xyz
)
SUPPORTED_FILE_EXT = {
'eeglab': ('.loc', '.locs', '.eloc', ),
'hydrocel': ('.sfp', ),
'matlab': ('.csd', ),
'asa electrode': ('.elc', ),
'generic (Theta-phi in degrees)': ('.txt', ),
'standard BESA spherical': ('.elp', ), # NB: not same as polhemus elp
'brainvision': ('.bvef', ),
'xyz': ('.csv', '.tsv', '.xyz'),
}
fname = _check_fname(fname, overwrite='read', must_exist=True)
_, ext = op.splitext(fname)
_check_option('fname', ext, list(sum(SUPPORTED_FILE_EXT.values(), ())))
if ext in SUPPORTED_FILE_EXT['eeglab']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
ch_names, pos = _read_eeglab_locations(fname)
scale = head_size / np.median(np.linalg.norm(pos, axis=-1))
pos *= scale
montage = make_dig_montage(
ch_pos=OrderedDict(zip(ch_names, pos)),
coord_frame='head',
)
elif ext in SUPPORTED_FILE_EXT['hydrocel']:
montage = _read_sfp(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['matlab']:
montage = _read_csd(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['asa electrode']:
montage = _read_elc(fname, head_size=head_size)
elif ext in SUPPORTED_FILE_EXT['generic (Theta-phi in degrees)']:
if head_size is None:
raise ValueError(
"``head_size`` cannot be None for '{}'".format(ext))
montage = _read_theta_phi_in_degrees(fname, head_size=head_size,
fid_names=('Nz', 'LPA', 'RPA'))
elif ext in SUPPORTED_FILE_EXT['standard BESA spherical']:
montage = _read_elp_besa(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['brainvision']:
montage = _read_brainvision(fname, head_size)
elif ext in SUPPORTED_FILE_EXT['xyz']:
montage = _read_xyz(fname)
if coord_frame is not None:
coord_frame = _coord_frame_const(coord_frame)
for d in montage.dig:
d['coord_frame'] = coord_frame
return montage
def compute_dev_head_t(montage):
"""Compute device to head transform from a DigMontage.
Parameters
----------
montage : instance of DigMontage
The DigMontage must contain the fiducials in head
coordinate system and hpi points in both head and
meg device coordinate system.
Returns
-------
dev_head_t : instance of Transform
A Device-to-Head transformation matrix.
"""
_, coord_frame = _get_fid_coords(montage.dig)
if coord_frame != FIFF.FIFFV_COORD_HEAD:
raise ValueError('montage should have been set to head coordinate '
'system with transform_to_head function.')
hpi_head = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_HEAD)], float)
hpi_dev = np.array(
[d['r'] for d in montage.dig
if (d['kind'] == FIFF.FIFFV_POINT_HPI and
d['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)], float)
if not (len(hpi_head) == len(hpi_dev) and len(hpi_dev) > 0):
raise ValueError((
"To compute Device-to-Head transformation, the same number of HPI"
" points in device and head coordinates is required. (Got {dev}"
" points in device and {head} points in head coordinate systems)"
).format(dev=len(hpi_dev), head=len(hpi_head)))
trans = _quat_to_affine(_fit_matched_points(hpi_dev, hpi_head)[0])
return Transform(fro='meg', to='head', trans=trans)
def compute_native_head_t(montage):
"""Compute the native-to-head transformation for a montage.
This uses the fiducials in the native space to transform to compute the
transform to the head coordinate frame.
Parameters
----------
montage : instance of DigMontage
The montage.
Returns
-------
native_head_t : instance of Transform
A native-to-head transformation matrix.
"""
# Get fiducial points and their coord_frame
fid_coords, coord_frame = _get_fid_coords(montage.dig, raise_error=False)
if coord_frame is None:
coord_frame = FIFF.FIFFV_COORD_UNKNOWN
if coord_frame == FIFF.FIFFV_COORD_HEAD:
native_head_t = np.eye(4)
else:
fid_keys = ('nasion', 'lpa', 'rpa')
for key in fid_keys:
if fid_coords[key] is None:
warn('Fiducial point %s not found, assuming identity %s to '
'head transformation'
% (key, _verbose_frames[coord_frame],))
native_head_t = np.eye(4)
break
else:
native_head_t = get_ras_to_neuromag_trans(
*[fid_coords[key] for key in fid_keys])
return Transform(coord_frame, 'head', native_head_t)
def make_standard_montage(kind, head_size='auto'):
"""Read a generic (built-in) standard montage that ships with MNE-Python.
Parameters
----------
kind : str
The name of the montage to use.
.. note::
You can retrieve the names of all
built-in montages via :func:`mne.channels.get_builtin_montages`.
head_size : float | None | str
The head size (radius, in meters) to use for spherical montages.
Can be None to not scale the read sizes. ``'auto'`` (default) will
use 95mm for all montages except the ``'standard*'``, ``'mgh*'``, and
``'artinis*'``, which are already in fsaverage's MRI coordinates
(same as MNI).
Returns
-------
montage : instance of DigMontage
The montage.
See Also
--------
get_builtin_montages
make_dig_montage
read_custom_montage
Notes
-----
Individualized (digitized) electrode positions should be read in using
:func:`read_dig_captrak`, :func:`read_dig_dat`, :func:`read_dig_egi`,
:func:`read_dig_fif`, :func:`read_dig_polhemus_isotrak`,
:func:`read_dig_hpts`, or manually made with :func:`make_dig_montage`.
.. versionadded:: 0.19.0
"""
from ._standard_montage_utils import standard_montage_look_up_table
_validate_type(kind, str, 'kind')
_check_option(
parameter='kind', value=kind,
allowed_values=[m.name for m in _BUILTIN_STANDARD_MONTAGES]
)
_validate_type(head_size, ('numeric', str, None), 'head_size')
if isinstance(head_size, str):
_check_option('head_size', head_size, ('auto',), extra='when str')
if kind.startswith(('standard', 'mgh', 'artinis')):
head_size = None
else:
head_size = HEAD_SIZE_DEFAULT
return standard_montage_look_up_table[kind](head_size=head_size)
def _check_dig_shape(pts):
_validate_type(pts, np.ndarray, 'points')
if pts.ndim != 2 or pts.shape[-1] != 3:
raise ValueError(
f'Points must be of shape (n, 3) instead of {pts.shape}')
|
{
"content_hash": "530530434c516ff1545df0fbf512090c",
"timestamp": "",
"source": "github",
"line_count": 1682,
"max_line_length": 79,
"avg_line_length": 33.76516052318668,
"alnum_prop": 0.5824309333896783,
"repo_name": "olafhauk/mne-python",
"id": "80968a7cf0a3a6ff1cd774a972e8e5ffb1e34507",
"size": "57359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/channels/montage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "13067"
},
{
"name": "Makefile",
"bytes": "4528"
},
{
"name": "Python",
"bytes": "10131395"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19587"
}
],
"symlink_target": ""
}
|
from fabric.api import run, sudo
from fabric.decorators import with_settings
from fabric.context_managers import shell_env # , settings
from fabric.operations import put
from fabric.colors import red, green, yellow
USERNAME = 'takamaru'
FULLUSERNAME = 'Shoei Takamaru'
@with_settings(warn_only=True)
def update_apt_pkgs():
if sudo('dpkg -s nginx | grep "install ok installed" > /dev/null').failed:
sudo('apt-get update')
sudo('apt-get upgrade -y')
sudo('apt-get dist-upgrade -y')
sudo('apt-get autoclean')
sudo('apt-get autoremove -y')
@with_settings(warn_only=True)
def install_apt_pkgs():
pkgs = [
'apg',
'build-essential',
'colordiff',
'ctags',
'curl',
'git',
'imagemagick',
'libbz2-dev',
'libgd-dev',
'libmysqlclient-dev',
'libpq-dev',
'libreadline-dev',
'libreadline6-dev',
'libsox-fmt-mp3',
'libsqlite3-dev',
'libssl-dev',
'libxml2-dev',
'libxslt-dev',
'mailutils',
'ncurses-term', # xterm-256color
'nkf',
'ntp',
'openssl',
'postfix',
'postgresql',
'silversearcher-ag',
'sox',
'sqlite3',
'sysstat', # sar
'sysv-rc-conf',
'tk-dev',
'traceroute',
'vsftpd',
'yasm',
'zip',
'zlib1g-dev',
]
sudo('apt-get install -y ' + ' '.join(pkgs))
def install_nginx():
sudo('apt-get install -y nginx')
def install_redis():
sudo('apt-get install -y redis-server')
def install_mysql():
# Skip interactive mode
sudo('echo mysql-server mysql-server/root_password password test | sudo debconf-set-selections')
sudo('echo mysql-server mysql-server/root_password_again password test | sudo debconf-set-selections')
sudo('apt-get install -y mysql-server')
@with_settings(warn_only=True)
def install_mosh():
if sudo('dpkg -s mosh | grep "install ok installed" > /dev/null').failed:
sudo('apt-get install -y python-software-properties')
sudo('add-apt-repository -y ppa:keithw/mosh')
sudo('apt-get update')
sudo('apt-get install -y mosh')
@with_settings(warn_only=True)
def install_ffmpeg():
if sudo('dpkg -s ffmpeg | grep "install ok installed" > /dev/null').failed:
sudo('add-apt-repository -y ppa:mc3man/trusty-media')
sudo('apt-get update')
sudo('apt-get install -y ffmpeg')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_ruby():
with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.rbenv/bin:$PATH"):
# Install rbenv
if sudo('test -d ~/.rbenv').failed:
sudo('git clone https://github.com/sstephenson/rbenv.git ~/.rbenv')
print green('"rbenv" installed')
else:
print green('"rbenv" is already installed')
if sudo('grep ".rbenv" ~/.bashrc > /dev/null').failed:
path_str = 'PATH="$HOME\/.rbenv\/bin:$PATH"'
sudo('echo -e "\n# rbenv" >> ~/.bashrc')
sudo('echo "export %s" >> ~/.bashrc')
sudo("sed -i -e 's/%s/" + path_str + "/g' ~/.bashrc")
print green('"rbenv PATH" configured')
else:
print green('"rbenv PATH" is already written')
if sudo('grep "rbenv init" ~/.bashrc > /dev/null').failed:
rb_str = '"$(rbenv init -)"'
sudo('echo "eval %s" >> ~/.bashrc')
sudo("sed -i -e 's/%s/" + rb_str + "/g' ~/.bashrc")
else:
print green('"rbenv" init is already written')
# Install ruby-build
if sudo('test -d ~/.rbenv/plugins/ruby-build').failed:
sudo('git clone https://github.com/sstephenson/ruby-build.git ~/.rbenv/plugins/ruby-build')
else:
print green('"ruby-build" is already installed')
# Install Ruby
ruby_ver = sudo("rbenv install -l | awk '{print $1}' | egrep -v 'preview|dev|rc' | egrep --color=never '^2.1.[0-9](.*)$' | tail -1") # 2.1.x
if sudo('rbenv versions | grep --color=never "' + ruby_ver + '" > /dev/null').failed:
sudo('rbenv install ' + ruby_ver)
else:
print green('"ruby %s" is already installed' % ruby_ver)
sudo('rbenv global ' + ruby_ver)
sudo('rbenv rehash')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_gems():
with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.rbenv/bin:$PATH"):
if sudo('test -f ~/.gemrc && grep "gem:" ~/.gemrc > /dev/null').failed:
sudo('echo "gem: --no-ri --no-rdoc -V" >> ~/.gemrc')
else:
print green('".gemrc" already exists')
gems = [
'bundler',
'pry',
'rails',
'rbenv-rehash',
'rspec',
'spring',
]
not_installed = []
installed = []
for g in gems:
if sudo("eval \"$(rbenv init -)\" && gem list | awk '{print $1}' | egrep '^" + g + "$' > /dev/null").failed:
not_installed.append(g)
else:
installed.append(g)
if len(installed) > 0:
print green('"%s" is already installed') % ', '.join(installed)
if len(not_installed) > 0:
print yellow('"%s" is not installed') % ', '.join(not_installed)
sudo('eval "$(rbenv init -)" && gem install ' + ' '.join(not_installed))
sudo('rbenv rehash')
@with_settings(warn_only=True)
def create_user():
if sudo('grep "' + USERNAME + '" /etc/sudoers > /dev/null').failed:
sudo("sed -i -e '/^root/a " + USERNAME + " ALL=(ALL:ALL) ALL' /etc/sudoers")
else:
print green('"' + USERNAME + '" is already in sudoers')
if run('cat /etc/passwd | grep ' + USERNAME + ' > /dev/null').succeeded:
print green('"' + USERNAME + '" already exists')
return
params = {
'-c': '"' + FULLUSERNAME + '"',
'-d': '/home/' + USERNAME,
'-s': '/bin/bash',
'-m': USERNAME, # -m and create USERNAME user
}
param_array = []
for k, v in params.items():
param_array.append(k + ' ' + v)
sudo('useradd ' + ' '.join(param_array))
@with_settings(warn_only=True, sudo_user=USERNAME)
def put_rc_files():
vimrc = '.vimrc'
if run('test -f /home/' + USERNAME + '/' + vimrc).failed:
put('~/' + vimrc, '~/' + vimrc)
with shell_env(HOME='/home/' + USERNAME):
sudo('cp ~vagrant/.vimrc ~')
run('rm -f ~vagrant/.vimrc')
else:
print green('"%s" already exists' % vimrc)
run('rm -f ~vagrant/.vimrc')
gitconfig = '.gitconfig'
if run('test -f /home/' + USERNAME + '/' + gitconfig).failed:
put('~/' + gitconfig, '~/' + gitconfig)
with shell_env(HOME='/home/' + USERNAME):
sudo('cp ~vagrant/.gitconfig ~')
run('rm -f ~vagrant/.gitconfig')
else:
print green('"%s" already exists' % gitconfig)
run('rm -f ~vagrant/.gitconfig')
inputrc = '.inputrc'
if run('test -f /home/' + USERNAME + '/' + inputrc).failed:
put('~/' + inputrc, '~/' + inputrc)
with shell_env(HOME='/home/' + USERNAME):
sudo('cp ~vagrant/.inputrc ~')
run('rm -f ~vagrant/.inputrc')
else:
print green('"%s" already exists' % inputrc)
run('rm -f ~vagrant/.inputrc')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_neobundle():
with shell_env(HOME='/home/' + USERNAME):
if sudo('test -d ~/.vim/bundle').failed:
sudo('mkdir -p ~/.vim/bundle')
else:
print green('"~/.vim/bundle" already exists')
if sudo('test -d ~/.vim/bundle/neobundle.vim').failed:
sudo('git clone git://github.com/Shougo/neobundle.vim ~/.vim/bundle/neobundle.vim')
sudo('~/.vim/bundle/neobundle.vim/bin/neoinstall')
else:
print green('"neobundle.vim" is already installed')
@with_settings(warn_only=True, sudo_user=USERNAME)
def modify_bashrc():
with shell_env(HOME='/home/' + USERNAME):
if run('grep "GIT_EDITOR=vim" ~/.bashrc > /dev/null').succeeded:
print green('".bashrc" no need to modify')
return
sudo('echo -e "\n# aliases" >> ~/.bashrc')
sudo("echo \"alias ls='ls -F --color'\" >> ~/.bashrc")
sudo("echo \"alias grep='grep --color'\" >> ~/.bashrc")
sudo("echo \"alias egrep='egrep --color'\" >> ~/.bashrc")
sudo("echo \"alias less='less -R'\" >> ~/.bashrc")
sudo('echo -e "\n# git" >> ~/.bashrc')
sudo("echo 'export GIT_EDITOR=vim' >> ~/.bashrc")
sudo("echo 'PS1=%s' >> ~/.bashrc")
sudo("sed -i -e \"s/PS1=%s/PS1='%s'/\" ~/.bashrc")
sudo("sed -i -e 's/%s/\[" + r'\\\\' + "u:" + r'\\\\' + "W$(__git_ps1 \"(%s)\")]$ /' ~/.bashrc")
@with_settings(warn_only=True, sudo_user=USERNAME)
def put_ssh_pubkey():
macbook = 'takamario@Shoeis-MacBook-Air.local'
put('~/.ssh/id_rsa.pub', '~/authorized_keys')
if sudo('test -d ~' + USERNAME + '/.ssh').failed:
sudo('mkdir -m 700 ~' + USERNAME + '/.ssh')
else:
print green('"~' + USERNAME + '/.ssh" already exists')
if sudo('grep "' + macbook + '" ~' + USERNAME + '/.ssh/authorized_keys > /dev/null').failed:
sudo('cat ~vagrant/authorized_keys >> ~' + USERNAME + '/.ssh/authorized_keys')
else:
print green('"ssh_pubkey" is already added')
@with_settings(warn_only=True, sudo_user=USERNAME)
def create_ssh_keys():
if sudo('test -f ~' + USERNAME + '/.ssh/id_rsa').succeeded:
print green('"ssh_seckey" already exists')
else:
if sudo('test -d ~' + USERNAME + '/.ssh').failed:
sudo('mkdir -m 700 ~' + USERNAME + '/.ssh')
else:
print green('"~' + USERNAME + '/.ssh" already exists')
sudo('ssh-keygen -t rsa -N "" -f ~' + USERNAME + '/.ssh/id_rsa')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_nodejs():
with shell_env(HOME='/home/' + USERNAME):
if sudo('test -d ~/.nvm').succeeded:
print green('"nvm" is already installed')
else:
# Install nvm
sudo('curl -s https://raw.github.com/creationix/nvm/master/install.sh | sh')
sudo("echo 'export NVM_HOME=\"$HOME/.nvm\"' >> ~/.profile")
# Install node
if sudo('. $HOME/.nvm/nvm.sh && which node').failed:
current_stable = sudo("curl -s http://nodejs.org | grep -i 'current version' | sed -e 's/\(.*\)current version: \(v[0-9]*\.[0-9]*\.[0-9]*\)\(.*\)/" + '\\' + "2/i'")
sudo('. $HOME/.nvm/nvm.sh && nvm install ' + current_stable)
sudo('. $HOME/.nvm/nvm.sh && nvm use ' + current_stable)
sudo('. $HOME/.nvm/nvm.sh && nvm alias default ' + current_stable)
else:
print green('"node.js" is already installed')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_npms():
with shell_env(HOME='/home/' + USERNAME):
npms = [
'bower',
'coffee-script',
'coffeelint',
'express-generator',
'grunt-cli',
'jasmine-node',
'js-fixtures',
'js2coffee',
'jshint',
'karma',
'karma-coverage',
'karma-expect',
'karma-mocha',
'karma-chai',
'mocha',
'ngmin',
'node-dev',
'phantomjs',
'uglify-js',
'yo',
]
not_installed = []
installed = []
for n in npms:
if sudo(". $HOME/.nvm/nvm.sh && npm list -g --parseable | egrep 'lib/node_modules/" + n + "$' > /dev/null").failed:
not_installed.append(n)
else:
installed.append(n)
if len(installed) > 0:
print green('"%s" is already installed') % ', '.join(installed)
if len(not_installed) > 0:
print yellow('"%s" is not installed') % ', '.join(not_installed)
sudo('. $HOME/.nvm/nvm.sh && npm install -g ' + ' '.join(not_installed))
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_python():
with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.pyenv/bin:$PATH"):
# Install pyenv
if sudo('test -d ~/.pyenv').failed:
sudo('git clone https://github.com/yyuu/pyenv.git ~/.pyenv')
else:
print green('"pyenv" is already installed')
if sudo('grep ".pyenv" ~/.bashrc > /dev/null').failed:
pyenv_root = 'PYENV_ROOT="$HOME\/.pyenv"'
path_str = 'PATH="$PYENV_ROOT\/bin:$PATH"'
sudo('echo -e "\n# pyenv" >> ~/.bashrc')
sudo('echo "export %s" >> ~/.bashrc')
sudo("sed -i -e 's/%s/" + pyenv_root + "/g' ~/.bashrc")
sudo('echo "export %s" >> ~/.bashrc')
sudo("sed -i -e 's/%s/" + path_str + "/g' ~/.bashrc")
else:
print green('"pyenv PATH" is already written')
if sudo('grep "pyenv init" ~/.bashrc > /dev/null').failed:
py_str = '"$(pyenv init -)"'
sudo('echo "eval %s" >> ~/.bashrc')
sudo("sed -i -e 's/%s/" + py_str + "/g' ~/.bashrc")
else:
print green('"pyenv" init is already written')
# Install Python
python_ver = sudo("pyenv install -l | awk '{print $1}' | egrep --color=never '^2\.7\.[0-9.]+' | tail -1") # 2.7.x
if sudo('pyenv versions | grep --color=never "' + python_ver + '" > /dev/null').failed:
sudo('pyenv install ' + python_ver)
else:
print green('"python %s" is already installed' % python_ver)
sudo('pyenv global ' + python_ver)
sudo('pyenv rehash')
@with_settings(warn_only=True, sudo_user=USERNAME)
def install_pip():
with shell_env(HOME='/home/' + USERNAME, PATH="/home/" + USERNAME + "/.pyenv/bin:$PATH"):
pips = [
'flake8',
]
not_installed = []
installed = []
for p in pips:
if sudo("eval \"$(pyenv init -)\" && pip list | awk '{print $1}' | egrep --color=never '^" + p + "$' > /dev/null").failed:
not_installed.append(p)
else:
installed.append(p)
if len(installed) > 0:
print green('"%s" is already installed') % ', '.join(installed)
if len(not_installed) > 0:
print yellow('"%s" is not installed') % ', '.join(not_installed)
sudo('eval "$(pyenv init -)" && pip install ' + ' '.join(not_installed))
@with_settings(warn_only=True)
def install_ja_locale():
if sudo('locale -a | grep "ja_JP"').failed:
sudo('locale-gen ja_JP.UTF-8')
else:
print green('"ja_JP.UTF-8" is already installed')
@with_settings(warn_only=True)
def configure_ntp():
if sudo('grep "mfeed.ad.jp" /etc/ntp.conf > /dev/null').failed:
sudo("sed -i -e 's/^\(server *\)/#" + r'\\' +"1/' /etc/ntp.conf")
sudo("sed -i -e '/^#server ntp.ubuntu.com/a server ntp.jst.mfeed.ad.jp iburst" + r'\\' + "nserver ntp.ring.gr.jp iburst' /etc/ntp.conf")
else:
print green('"ntp" is already configured')
@with_settings(warn_only=True)
def set_utc():
tz_str = 'Etc/UTC'
if sudo('grep "' + tz_str + '" /etc/timezone > /dev/null').failed:
sudo("echo '" + tz_str + "' > /etc/timezone")
sudo('dpkg-reconfigure -f noninteractive tzdata')
else:
print green('"UTC" is already used')
@with_settings(warn_only=True)
def install_heroku_toolbelt():
if run('which heroku > /dev/null').failed:
print yellow('"heroku toolbelt" is not installed')
run('wget -qO- https://toolbelt.heroku.com/install-ubuntu.sh | sh')
else:
print green('"heroku toolbelt" is already installed')
@with_settings(warn_only=True)
def install_go():
if run('which go > /dev/null').failed:
if run('test -d /usr/local/go').failed:
version = 'go1.2.linux-amd64.tar.gz'
print yellow('"Go" is not installed')
run('wget -O /tmp/' + version + ' https://go.googlecode.com/files/' + version)
sudo('tar -C /usr/local -xf /tmp/' + version)
run('rm /tmp/' + version)
if run('grep "/usr/local/go/bin" /home/' + USERNAME + '/.bashrc > /dev/null').failed:
sudo("echo 'export PATH=\"/usr/local/go/bin:$PATH\"' >> /home/" + USERNAME + '/.bashrc')
print green('"Go" is successfully installed')
else:
print green('"Go" is already installed')
def install_middlewares():
update_apt_pkgs()
install_apt_pkgs()
install_nginx()
install_redis()
#install_mysql()
install_mosh()
install_ffmpeg()
create_user()
install_ruby()
install_gems()
install_nodejs()
install_npms()
install_python()
install_pip()
put_rc_files()
modify_bashrc()
install_neobundle()
put_ssh_pubkey()
create_ssh_keys()
install_ja_locale()
configure_ntp()
set_utc()
install_heroku_toolbelt()
install_go()
|
{
"content_hash": "a96093fb4a6099766fdc30c846705a18",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 176,
"avg_line_length": 35.4435318275154,
"alnum_prop": 0.534383871154626,
"repo_name": "takamario/fabric",
"id": "e3fc5cabfd96e6201c33e0396bf1c80f23c82cd0",
"size": "17284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import logging
from urllib import quote
from pylons import tmpl_context as c, app_globals as g
from pylons import request
from tg import expose, redirect
from tg.decorators import with_trailing_slash, without_trailing_slash
from bson import ObjectId
from ming.utils import LazyProperty
import allura.tasks
from allura import version
from allura.lib import helpers as h
from allura import model as M
from allura.lib import security
from allura.lib.decorators import require_post
from allura.lib.security import has_access
from allura.app import Application, SitemapEntry, DefaultAdminController, ConfigOption
log = logging.getLogger(__name__)
class RepositoryApp(Application):
END_OF_REF_ESCAPE = '~'
__version__ = version.__version__
permissions = [
'read', 'write', 'create',
'unmoderated_post', 'post', 'moderate', 'admin',
'configure']
permissions_desc = {
'read': 'Browse repo via web UI. Removing read does not prevent direct repo read access.',
'write': 'Repo push access.',
'create': 'Not used.',
'admin': 'Set permissions, default branch, and viewable files.',
}
config_options = Application.config_options + [
ConfigOption('cloned_from_project_id', ObjectId, None),
ConfigOption('cloned_from_repo_id', ObjectId, None),
ConfigOption('init_from_url', str, None)
]
tool_label = 'Repository'
default_mount_label = 'Code'
default_mount_point = 'code'
relaxed_mount_points = True
ordinal = 2
forkable = False
default_branch_name = None # master or default or some such
repo = None # override with a property in child class
icons = {
24: 'images/code_24.png',
32: 'images/code_32.png',
48: 'images/code_48.png'
}
def __init__(self, project, config):
Application.__init__(self, project, config)
self.admin = RepoAdminController(self)
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return [SitemapEntry(
self.config.options.mount_label,
'.')]
@property
@h.exceptionless([], log)
def sitemap(self):
menu_id = self.config.options.mount_label
with h.push_config(c, app=self):
return [
SitemapEntry(menu_id, '.')[self.sidebar_menu()]]
def admin_menu(self):
admin_url = c.project.url() + 'admin/' + \
self.config.options.mount_point + '/'
links = [
SitemapEntry('Viewable Files', admin_url + 'extensions', className='admin_modal')]
links.append(SitemapEntry('Refresh Repository',
c.project.url() +
self.config.options.mount_point +
'/refresh',
))
links += super(RepositoryApp, self).admin_menu()
[links.remove(l) for l in links[:] if l.label == 'Options']
return links
@h.exceptionless([], log)
def sidebar_menu(self):
if not self.repo or self.repo.status != 'ready':
return []
links = [SitemapEntry('Browse Commits', c.app.url +
'commit_browser', ui_icon=g.icons['folder'])]
if self.forkable and self.repo.status == 'ready':
links.append(
SitemapEntry('Fork', c.app.url + 'fork', ui_icon=g.icons['fork']))
merge_request_count = self.repo.merge_requests_by_statuses(
'open').count()
if merge_request_count:
links += [
SitemapEntry(
'Merge Requests', c.app.url + 'merge-requests/',
small=merge_request_count)]
if self.repo.forks:
links += [
SitemapEntry('Forks', c.app.url + 'forks/',
small=len(self.repo.forks))
]
if self.repo.upstream_repo.name:
repo_path_parts = self.repo.upstream_repo.name.strip(
'/').split('/')
links += [
SitemapEntry('Clone of'),
SitemapEntry('%s / %s' %
(repo_path_parts[1], repo_path_parts[-1]),
self.repo.upstream_repo.name)
]
if not c.app.repo.is_empty() and has_access(c.app.repo, 'admin'):
merge_url = c.app.url + 'request_merge'
if getattr(c, 'revision', None):
merge_url = merge_url + '?branch=' + h.urlquote(c.revision)
links.append(SitemapEntry('Request Merge', merge_url,
ui_icon=g.icons['merge'],
))
pending_upstream_merges = self.repo.pending_upstream_merges()
if pending_upstream_merges:
links.append(SitemapEntry(
'Pending Merges',
self.repo.upstream_repo.name + 'merge-requests/',
small=pending_upstream_merges))
ref_url = self.repo.url_for_commit(
self.default_branch_name, url_type='ref')
branches = self.repo.get_branches()
if branches:
links.append(SitemapEntry('Branches'))
for branch in branches:
if branch.name == self.default_branch_name:
branches.remove(branch)
branches.insert(0, branch)
break
max_branches = 10
for branch in branches[:max_branches]:
links.append(SitemapEntry(
branch.name,
quote(self.repo.url_for_commit(branch.name) + 'tree/')))
if len(branches) > max_branches:
links.append(
SitemapEntry(
'More Branches',
ref_url + 'branches/',
))
tags = self.repo.get_tags()
if tags:
links.append(SitemapEntry('Tags'))
max_tags = 10
for b in tags[:max_tags]:
links.append(SitemapEntry(
b.name,
quote(self.repo.url_for_commit(b.name) + 'tree/')))
if len(tags) > max_tags:
links.append(
SitemapEntry(
'More Tags',
ref_url + 'tags/',
))
return links
def install(self, project):
self.config.options['project_name'] = project.name
super(RepositoryApp, self).install(project)
role_admin = M.ProjectRole.by_name('Admin')._id
role_developer = M.ProjectRole.by_name('Developer')._id
role_auth = M.ProjectRole.authenticated()._id
role_anon = M.ProjectRole.anonymous()._id
self.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_auth, 'post'),
M.ACE.allow(role_auth, 'unmoderated_post'),
M.ACE.allow(role_developer, 'create'),
M.ACE.allow(role_developer, 'write'),
M.ACE.allow(role_developer, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin'),
]
def uninstall(self, project):
allura.tasks.repo_tasks.uninstall.post()
class RepoAdminController(DefaultAdminController):
def __init__(self, app):
self.app = app
@LazyProperty
def repo(self):
return self.app.repo
def _check_security(self):
security.require_access(self.app, 'configure')
@with_trailing_slash
@expose()
def index(self, **kw):
redirect('extensions')
@without_trailing_slash
@expose('jinja:allura:templates/repo/admin_extensions.html')
def extensions(self, **kw):
return dict(app=self.app,
allow_config=True,
additional_viewable_extensions=getattr(self.repo, 'additional_viewable_extensions', ''))
@without_trailing_slash
@expose()
@require_post()
def set_extensions(self, **post_data):
self.repo.additional_viewable_extensions = post_data[
'additional_viewable_extensions']
@without_trailing_slash
@expose('jinja:allura:templates/repo/default_branch.html')
def set_default_branch_name(self, branch_name=None, **kw):
if (request.method == 'POST') and branch_name:
self.repo.set_default_branch(branch_name)
redirect(request.referer)
else:
return dict(app=self.app,
default_branch_name=self.app.default_branch_name)
|
{
"content_hash": "5d768e70a1a3b54fac57d2525eef9cc5",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 108,
"avg_line_length": 37.98701298701299,
"alnum_prop": 0.552022792022792,
"repo_name": "apache/incubator-allura",
"id": "6e073d9b9dc4f5ea71dd3d2c4836ac9f7b84f090",
"size": "9645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Allura/allura/lib/repository.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "155606"
},
{
"name": "JavaScript",
"bytes": "697175"
},
{
"name": "Puppet",
"bytes": "6882"
},
{
"name": "Python",
"bytes": "3667166"
},
{
"name": "Ruby",
"bytes": "5739"
},
{
"name": "Shell",
"bytes": "31675"
},
{
"name": "XSLT",
"bytes": "3357"
}
],
"symlink_target": ""
}
|
import os
from core import path_util
from core import perf_benchmark
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story
from telemetry.value import list_of_scalar_values
from benchmarks import pywebsocket_server
from page_sets import webgl_supported_shared_state
BLINK_PERF_BASE_DIR = os.path.join(path_util.GetChromiumSrcDir(),
'third_party', 'WebKit', 'PerformanceTests')
SKIPPED_FILE = os.path.join(BLINK_PERF_BASE_DIR, 'Skipped')
def CreateStorySetFromPath(path, skipped_file,
shared_page_state_class=(
shared_page_state.SharedPageState)):
assert os.path.exists(path)
page_urls = []
serving_dirs = set()
def _AddPage(path):
if not path.endswith('.html'):
return
if '../' in open(path, 'r').read():
# If the page looks like it references its parent dir, include it.
serving_dirs.add(os.path.dirname(os.path.dirname(path)))
page_urls.append('file://' + path.replace('\\', '/'))
def _AddDir(dir_path, skipped):
for candidate_path in os.listdir(dir_path):
if candidate_path == 'resources':
continue
candidate_path = os.path.join(dir_path, candidate_path)
if candidate_path.startswith(skipped):
continue
if os.path.isdir(candidate_path):
_AddDir(candidate_path, skipped)
else:
_AddPage(candidate_path)
if os.path.isdir(path):
skipped = []
if os.path.exists(skipped_file):
for line in open(skipped_file, 'r').readlines():
line = line.strip()
if line and not line.startswith('#'):
skipped_path = os.path.join(os.path.dirname(skipped_file), line)
skipped.append(skipped_path.replace('/', os.sep))
_AddDir(path, tuple(skipped))
else:
_AddPage(path)
ps = story.StorySet(base_dir=os.getcwd() + os.sep,
serving_dirs=serving_dirs)
for url in page_urls:
ps.AddStory(page_module.Page(
url, ps, ps.base_dir,
shared_page_state_class=shared_page_state_class))
return ps
class _BlinkPerfMeasurement(legacy_page_test.LegacyPageTest):
"""Tuns a blink performance test and reports the results."""
def __init__(self):
super(_BlinkPerfMeasurement, self).__init__()
with open(os.path.join(os.path.dirname(__file__),
'blink_perf.js'), 'r') as f:
self._blink_perf_js = f.read()
def WillNavigateToPage(self, page, tab):
del tab # unused
page.script_to_evaluate_on_commit = self._blink_perf_js
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--js-flags=--expose_gc',
'--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback',
'--enable-experimental-canvas-features',
# TODO(qinmin): After fixing crbug.com/592017, remove this command line.
'--reduce-security-for-testing'
])
if 'content-shell' in options.browser_type:
options.AppendExtraBrowserArgs('--expose-internals-for-testing')
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('testRunner.isDone', 600)
log = tab.EvaluateJavaScript('document.getElementById("log").innerHTML')
for line in log.splitlines():
if line.startswith("FATAL: "):
print line
continue
if not line.startswith('values '):
continue
parts = line.split()
values = [float(v.replace(',', '')) for v in parts[1:-1]]
units = parts[-1]
metric = page.display_name.split('.')[0].replace('/', '_')
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, metric, units, values))
break
print log
class _SharedPywebsocketPageState(shared_page_state.SharedPageState):
"""Runs a pywebsocket server."""
def __init__(self, test, finder_options, user_story_set):
super(_SharedPywebsocketPageState, self).__init__(
test, finder_options, user_story_set)
self.platform.StartLocalServer(pywebsocket_server.PywebsocketServer())
class BlinkPerfBindings(perf_benchmark.PerfBenchmark):
tag = 'bindings'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.bindings'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Bindings')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/563979
@benchmark.Enabled('content-shell')
class BlinkPerfBlinkGC(perf_benchmark.PerfBenchmark):
tag = 'blink_gc'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.blink_gc'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'BlinkGC')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfCSS(perf_benchmark.PerfBenchmark):
tag = 'css'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.css'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'CSS')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('android-webview', # http://crbug.com/593200
'reference') # http://crbug.com/576779
class BlinkPerfCanvas(perf_benchmark.PerfBenchmark):
tag = 'canvas'
test = _BlinkPerfMeasurement
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/593973.
@classmethod
def Name(cls):
return 'blink_perf.canvas'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Canvas')
story_set = CreateStorySetFromPath(
path, SKIPPED_FILE,
shared_page_state_class=(
webgl_supported_shared_state.WebGLSupportedSharedState))
# WebGLSupportedSharedState requires the skipped_gpus property to
# be set on each page.
for page in story_set:
page.skipped_gpus = []
return story_set
class BlinkPerfDOM(perf_benchmark.PerfBenchmark):
tag = 'dom'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.dom'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'DOM')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('win') # http://crbug.com/588819
class BlinkPerfEvents(perf_benchmark.PerfBenchmark):
tag = 'events'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.events'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Events')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@benchmark.Disabled('win8') # http://crbug.com/462350
class BlinkPerfLayout(perf_benchmark.PerfBenchmark):
tag = 'layout'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.layout'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Layout')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/551950
class BlinkPerfPaint(perf_benchmark.PerfBenchmark):
tag = 'paint'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.paint'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Paint')
return CreateStorySetFromPath(path, SKIPPED_FILE)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/574483
@benchmark.Disabled('win') # crbug.com/488493
class BlinkPerfParser(perf_benchmark.PerfBenchmark):
tag = 'parser'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.parser'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Parser')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfSVG(perf_benchmark.PerfBenchmark):
tag = 'svg'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.svg'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'SVG')
return CreateStorySetFromPath(path, SKIPPED_FILE)
class BlinkPerfShadowDOM(perf_benchmark.PerfBenchmark):
tag = 'shadow_dom'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.shadow_dom'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'ShadowDOM')
return CreateStorySetFromPath(path, SKIPPED_FILE)
# This benchmark is for local testing, doesn't need to run on bots.
@benchmark.Disabled('all')
class BlinkPerfXMLHttpRequest(perf_benchmark.PerfBenchmark):
tag = 'xml_http_request'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.xml_http_request'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'XMLHttpRequest')
return CreateStorySetFromPath(path, SKIPPED_FILE)
# Disabled on Windows and ChromeOS due to https://crbug.com/521887
@benchmark.Disabled('win', 'chromeos')
class BlinkPerfPywebsocket(perf_benchmark.PerfBenchmark):
"""The blink_perf.pywebsocket tests measure turn-around-time of 10MB
send/receive for XHR, Fetch API and WebSocket. We might ignore < 10%
regressions, because the tests are noisy and such regressions are
often unreproducible (https://crbug.com/549017).
"""
tag = 'pywebsocket'
test = _BlinkPerfMeasurement
@classmethod
def Name(cls):
return 'blink_perf.pywebsocket'
def CreateStorySet(self, options):
path = os.path.join(BLINK_PERF_BASE_DIR, 'Pywebsocket')
return CreateStorySetFromPath(
path, SKIPPED_FILE,
shared_page_state_class=_SharedPywebsocketPageState)
@classmethod
def ShouldDisable(cls, possible_browser):
return cls.IsSvelte(possible_browser) # http://crbug.com/551950
|
{
"content_hash": "b4a149b28ab107f159d31d7ac1ff9832",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 80,
"avg_line_length": 30.09467455621302,
"alnum_prop": 0.6936688950058986,
"repo_name": "danakj/chromium",
"id": "3cf3d6550da30fffdbd7ab48a1d29e6c4bea75bc",
"size": "10335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/perf/benchmarks/blink_perf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Support for Clementine Music Player as media player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.clementine/
"""
import asyncio
from datetime import timedelta
import logging
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, PLATFORM_SCHEMA,
SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE, SUPPORT_PLAY, MEDIA_TYPE_MUSIC,
SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_ACCESS_TOKEN,
STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_UNKNOWN)
REQUIREMENTS = ['python-clementine-remote==1.0.1']
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Clementine Remote'
DEFAULT_PORT = 5500
SUPPORT_CLEMENTINE = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_VOLUME_SET | \
SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN, default=None): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Clementine platform."""
from clementineremote import ClementineRemote
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
token = config.get(CONF_ACCESS_TOKEN)
client = ClementineRemote(host, port, token, reconnect=True)
add_devices([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerDevice):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ''
self._track_artist = ''
self._track_album_name = ''
self._state = STATE_UNKNOWN
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == 'Playing':
self._state = STATE_PLAYING
elif client.state == 'Paused':
self._state = STATE_PAUSED
elif client.state == 'Disconnected':
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track['track_id']
self._track_name = client.current_track['title']
self._track_artist = client.current_track['track_artist']
self._track_album_name = client.current_track['track_album']
except:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]['name']
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s['name'] == source]
if len(sources) == 1:
client.change_song(sources[0]['id'], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track['track_id']
return None
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track['art'])
return (image, 'image/png')
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous()
|
{
"content_hash": "2db7ccf732d425c7c18b6f36b8b978ed",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 31.040178571428573,
"alnum_prop": 0.6109592981446857,
"repo_name": "shaftoe/home-assistant",
"id": "d9688badcd19fd4f986527524dfe50384dfa6fa0",
"size": "6953",
"binary": false,
"copies": "13",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/clementine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1584258"
},
{
"name": "Python",
"bytes": "5479272"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15017"
}
],
"symlink_target": ""
}
|
from . import driver
from . import target
from . import config
from . import asm
import shutil, os
import tempfile, logging
import json
import subprocess
import collections
def assemble(input_file, output_file, args):
return driver.run(config.LLVM['as'],
[input_file, '-o=%s' % output_file])
def compileS(input_file, output_file, args, tool='gcc'):
logging.getLogger().info("%(name)s\n----\n%(file)s" %
{ 'name' : input_file,
'file' : driver.open_input(input_file).read() })
return asm.assemble(input_file, output_file, args)
def compile(input_files, output_file, args, tool='clang'):
ok = driver.run(config.LLVM[tool],
['-emit-llvm'] + args + ['-c'] + input_files +
['-o', output_file])
return ok
def archive(action, inputs, ar):
while action.startswith('-'):
action = action[1:]
return driver.run(config.getStdTool('ar'),
[action, ar] + inputs)
def archive_contents(archive):
p = subprocess.Popen([config.getStdTool('ar'), 't', archive],
stdout=subprocess.PIPE)
# don't forget to remove the trailing \n
return [x.strip('\n\r') for x in p.stdout.readlines()]
def symbols(obj):
p = subprocess.Popen([config.getLLVMTool('nm'), '--format=posix', obj],
stdout=subprocess.PIPE)
# don't forget to remove the trailing \n
result = collections.defaultdict(set)
for x in p.stdout.readlines():
x = x.strip()
entry = x.split(' ',2)
[sym,ty] = entry[:2]
result[ty].add(sym)
p.wait()
return result
def archive_to_module(input_file, output_file, minimal=None):
SCACHE = {}
def syms(s):
if not SCACHE.has_key(s):
SCACHE[s] = symbols(s)
return SCACHE[s]
DCACHE = {}
def defined(s):
if not DCACHE.has_key(s):
ss = syms(s)
DCACHE[s] = ss['T'].union(ss['t']).union(ss['d']).union(ss['D']).union(ss['W']).union(ss['C'])
return DCACHE[s]
UCACHE = {}
def undefined(s):
if not UCACHE.has_key(s):
ss = syms(s)
UCACHE[s] = ss['U'].union(ss['u']).difference(defined(s))
return UCACHE[s]
output_file = os.path.abspath(output_file)
d = tempfile.mkdtemp()
contents = archive_contents(input_file)
for c in contents:
try:
os.makedirs(os.path.join(d, os.path.dirname(c)))
except:
pass
driver.run(config.getStdTool('ar'), ['x', os.path.abspath(input_file)], wd=d)
if not (minimal is None):
all_contents = contents
undef_symbols = set()
for x in minimal:
undef_symbols = undef_symbols.union(undefined(x)).difference(defined(x))
contents = []
progress = True
while progress:
progress = False
for x in all_contents:
s = os.path.join(d, x)
if not (x in contents):
defed = defined(s)
if not undef_symbols.isdisjoint(defed):
contents = contents + [x]
undef_symbols = undef_symbols.union(undefined(s)).difference(defed)
progress = True
driver.run(config.LLVM['ld'],
['-link-as-library', '-o=%s' % output_file] + \
[os.path.join(d, x) for x in contents])
shutil.rmtree(d)
class LibNotFound (Exception):
def __init__(self, lib, path):
self._lib = lib
self._path = path
def __str__(self):
return "Could not find library %s on path %s" % (self._lib, ':'.join(self._path))
def getSearchPath():
args = [config.LLVM['clang'], '-print-search-dirs']
proc = subprocess.Popen(args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
(stdout,stderr) = proc.communicate()
for line in stdout.splitlines():
if line.startswith('libraries: ='):
return line[12:]
return None
def findlib(l, paths):
if l.startswith('-l'):
return l
#HOME = os.getenv('OCCAM_HOME')
#paths += ['',
# '%s/root/lib' % HOME,
# '%s/root/usr/lib' % HOME]
#if 'OCCAM_LIB_PATH' in os.environ:
# for libpath in os.environ['OCCAM_LIB_PATH'].split(os.pathsep):
# paths.append(libpath)
if 'OCCAM_LIB_PATH' in os.environ:
paths.extend(os.environ['OCCAM_LIB_PATH'].split(os.pathsep))
for p in paths:
if p == '':
path = l
else:
path = os.path.join(p, 'lib%s.a' % l)
if os.path.exists(path):
return path
search = getSearchPath()
if search is not None:
paths = search.split(os.pathsep)
for p in paths:
if p == '':
path = l
else:
path = os.path.join(p, 'lib%s.a' % l)
if os.path.exists(path):
return path
logging.getLogger().warning("Couldn't find bitcode library %s", l)
return None
# raise LibNotFound(l,paths)
def bundle(output, inputs, libs, paths):
args = ['-link-as-library', '-o=%s' % output] + libs
args += inputs + [x for x in [findlib(x, paths) for x in libs]
if not (x is None)]
return driver.run(config.LLVM['ld'], args)
def clean(input_file, output_file):
# TODO: I shouldn't need to do this, but I'm getting \01 characters
# on some of my symbol names...
return driver.previrt(input_file, output_file, ['-Pfix-1function'])
# shutil.copyfile(input_file, output_file)
# return 0
NATIVE_LIBS=['pthread'] #'c','m','resolv','nsl','crypt','pthread','dl', 'rt']
def libarg(l, paths):
lib = findlib('%s.bc' % l, paths)
if lib is not None:
return lib
else:
return '-l%s' % l
def haveBitcode(l, paths):
return findlib('%s.bc' % l, paths) is not None
def manifest(filename, exe, lib, libs=[], nativelibs=[], search=[], shared=[]):
data = { 'binary' : exe
, 'modules' : lib
, 'libs' : libs
, 'native-libs' : nativelibs
, 'search' : search
, 'shared' : shared
}
mani = open(filename, 'w')
mani.write(json.dumps(data,indent=0))
mani.close()
def link(inputs, output_file, args, save=None, link=True):
#assert not link
paths = [x[2:] for x in args if x.startswith('-L')]
if save is None:
save = output_file + '.manifest'
#libraries = [libarg(x[2:], paths) for x in args if x.startswith('-l')]
#nativelibs = [x for x in libraries if x.startswith('-l')]
#libraries = [x for x in libraries if x not in set(nativelibs)]
libraries = [x for x in args if x.startswith('-l')]
nativelibs = [x for x in libraries if not x]
if '-lc' in args:
nativelibs.append('-lc')
if '-lstdc++' in args:
nativelibs.append('-lstdc++')
if '-lpthread' in args or '-pthread' in args:
nativelibs.extend(['-lpthread'])
libraries = [x for x in libraries if x not in set(nativelibs)]
#temporary = False
retcode = 0
if len(libraries) > 0 or len(inputs) > 1:
#tout = tempfile.NamedTemporaryFile(suffix='.bc', delete=False)
#tout.close()
#tout = tout.name
#temporary = True
#inputs = inputs
if not (save is None):
main_name = save
# Bundle the non-library inputs into a single module
retcode = bundle(main_name, inputs, [], [])
if retcode != 0:
return retcode
logging.getLogger().info("Bundled executable '%(exe)s' into '%(mod)s' from %(libs)s",
{'exe' : output_file,
'mod' : main_name,
'libs' : inputs.__str__() })
if 'OCCAM_LIB_PATH' in os.environ:
paths.extend(os.environ['OCCAM_LIB_PATH'].split(os.pathsep))
search = getSearchPath()
if search is not None:
paths.extend(search.split(os.pathsep))
def soTolFlag(so):
libname = os.path.basename(x)[:-3]
if libname.startswith('lib'):
libname = "-l%s" % libname[3:]
return libname
manifest('%s.manifest' % main_name,
output_file,
[main_name],
libs = libraries,
nativelibs = nativelibs,
search = paths,
shared = [soTolFlag(x) for x in inputs if x.endswith('.so')])
# shared=['%s.bc.a' % x[:-3] for x in args if x.endswith('.so')])
#retcode = bundle(tout, inputs, libraries, paths)
#if retcode != 0:
# return retcode
#else:
#temporary = False
#tout = inputs[0]
if link and '.o' not in output_file:
args = ["%s.manifest" % main_name, "%s" % output_file]
print "Calling build with %s" % ' '.join(args)
logging.getLogger().info("Calling build with %s" % ' '.join(args))
retcode = target.run(args, tool='build')
# def linker_option(x):
# if x.startswith('-O') or x.startswith('-D') or x.startswith('-I'):
# return None
# if x.startswith('-Wl'):
# return x[4:]
# if x.startswith('-l') and x[2:] in NATIVE_LIBS:
# return x
# if x.startswith('-l'):
# # This was included with a direct path
# return None
# if x in ['-pthread','-pthreads']:
# return '-lpthread'
# if x in ['--export-dynamic', '-export-dynamic']:
# return x
# return None
# cmd = ['-native', '-o=%s.llvm' % output_file, tout] + \
# [linker_option(x) for x in args
# if not (linker_option(x) is None)]
#
# retcode = driver.run(config.LLVM['ld'], cmd)
#if temporary:
# os.unlink(tout)
return retcode
def watch(input_file, output_file, watches,
local=False, fancy=False, failName='exit'):
def to_bool(x, s):
if x:
return [s]
else:
return []
extra_args = to_bool(local, '-Pwatch2-local') + \
to_bool(fancy, '-Pwatch2-fancy') + \
['-Pwatch2-fail', failName]
return driver.previrt(input_file, output_file,
['-Pwatch2'] +
driver.all_args('-Pwatch2-input', watches) +
extra_args)
def callgraph(input_file, output_file):
return driver.previrt(input_file, '/dev/null',
['-dot-callgraph-output', output_file])
|
{
"content_hash": "b683fe2227e5736bc4f04030e6525740",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 106,
"avg_line_length": 36.4271523178808,
"alnum_prop": 0.5193164257794746,
"repo_name": "Wajihulhassan/SelfContainedPrevirt",
"id": "e28abd342b90a924b6544607040705ee3586c898",
"size": "12722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/occam/occam/toolchain.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7600"
},
{
"name": "C++",
"bytes": "250635"
},
{
"name": "Shell",
"bytes": "2442"
}
],
"symlink_target": ""
}
|
"""
The classes in this file are interfaces for metrics. They are not intended
to be subclassed or created directly by users. To work with and access metrics,
users should use the classes and methods exposed in metric.py.
Available classes:
- Metric - Base interface of a metrics object.
- Counter - Counter metric interface. Allows a count to be incremented or
decremented during pipeline execution.
- Distribution - Distribution Metric interface. Allows statistics about the
distribution of a variable to be collected during pipeline execution.
- Gauge - Gauge Metric interface. Allows to track the latest value of a
variable during pipeline execution.
- MetricName - Namespace and name used to refer to a Metric.
"""
# pytype: skip-file
from __future__ import absolute_import
from builtins import object
__all__ = ['Metric', 'Counter', 'Distribution', 'Gauge', 'MetricName']
class MetricName(object):
"""The name of a metric.
The name of a metric consists of a namespace and a name. The namespace
allows grouping related metrics together and also prevents collisions
between multiple metrics of the same name.
"""
def __init__(self, namespace, name):
"""Initializes ``MetricName``.
Args:
namespace: A string with the namespace of a metric.
name: A string with the name of a metric.
"""
if not namespace:
raise ValueError('Metric namespace must be non-empty')
if not name:
raise ValueError('Metric name must be non-empty')
self.namespace = namespace
self.name = name
def __eq__(self, other):
return self.namespace == other.namespace and self.name == other.name
def __ne__(self, other):
# TODO(BEAM-5949): Needed for Python 2 compatibility.
return not self == other
def __str__(self):
return 'MetricName(namespace={}, name={})'.format(self.namespace, self.name)
def __hash__(self):
return hash((self.namespace, self.name))
class Metric(object):
"""Base interface of a metric object."""
pass
class Counter(Metric):
"""Counter metric interface. Allows a count to be incremented/decremented
during pipeline execution."""
def inc(self, n=1):
raise NotImplementedError
def dec(self, n=1):
self.inc(-n)
class Distribution(Metric):
"""Distribution Metric interface.
Allows statistics about the distribution of a variable to be collected during
pipeline execution."""
def update(self, value):
raise NotImplementedError
class Gauge(Metric):
"""Gauge Metric interface.
Allows tracking of the latest value of a variable during pipeline
execution."""
def set(self, value):
raise NotImplementedError
|
{
"content_hash": "59c2e9b0e38faee894676a7895cc2eca",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 80,
"avg_line_length": 28.92391304347826,
"alnum_prop": 0.7147688838782412,
"repo_name": "iemejia/incubator-beam",
"id": "e08872f193e3cf664843f65c0164fb5b7d93df88",
"size": "3446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/metrics/metricbase.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
import os
from fabric.api import env, task, local, prefix, lcd
env.repo_url = 'https://github.com/tianyi33/fabric_presentation.git'
env.db_name = 'fabric_presentation'
env.requirement_file = 'requirements.txt'
env.project_path = os.path.abspath(os.path.dirname(__file__))
env.virtualenv_path = (env.virtualenv_path
if env.virtualenv_path
else os.path.join(env.project_path, 'venv'))
@task
def create_virtualenv():
local('virtualenv %s' % env.virtualenv_path)
@task
def pip_install():
with prefix('source %s/bin/activate' % env.virtualenv_path):
local('pip install -r %s' % env.requirement_file)
@task
def create_db():
local('createdb %s' % env.db_name)
@task
def start_project():
with prefix('source %s/bin/activate' % env.virtualenv_path):
local('django-admin.py startproject %s' % env.django_project_name)
@task
def start_app():
with lcd('%s/%s' % (env.project_path, env.django_project_name)):
with prefix('source %s/bin/activate' % env.virtualenv_path):
local('python manage.py startapp %s' % env.django_app_name)
@task
def initialize_repo():
local('git init')
create_git_ignore_file()
local('git add -A')
local('git commit -m "initial project version"')
local('git remote add origin %s' % env.repo_url)
local('git push -u origin master')
@task
def create_git_ignore_file():
with open('./.gitignore', 'w') as output_file:
output_file.write('*.pyc\n*.pyo\nvenv\n')
@task
def bootstrap(new_project=False):
"""
Bootstrap a project, can be a new project or an existing project.
If new_prject is True, then will create a new project
"""
create_virtualenv()
pip_install()
create_db()
if new_project:
start_project()
start_app()
initialize_repo()
|
{
"content_hash": "85062b6334c1f6e1f80b17d16f5e6318",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 26.12676056338028,
"alnum_prop": 0.6420485175202156,
"repo_name": "tianyi33/tweetnotes",
"id": "b61d2866aa125f550c65583fad082f5554f43eaf",
"size": "1855",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fabfile_local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6993"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
# make a square figure and axes
# pie chart looks best in square figures
# otherwise it looks like ellipses
plt.figure(1, figsize=(8, 8))
ax = plt.axes([0.1, 0.1, 0.8, 0.8])
# The slices will be ordered and plotted counter-clockwise.
labels = 'Spring', 'Summer', 'Autumn', 'Winter'
values = [15, 16, 16, 28]
explode =[0.1, 0.1, 0.1, 0.1]
# make a pie
plt.pie(values, explode=explode, labels=labels,
autopct='%1.1f%%', startangle=67)
plt.title('Rainy days by season')
plt.show()
|
{
"content_hash": "3047a5a0c7e6e7dfc1c0871b204db113",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 59,
"avg_line_length": 26,
"alnum_prop": 0.6807692307692308,
"repo_name": "pletisan/python-data-viz-cookbook",
"id": "f20f97378ff71d4a3d9e2795f6a357298773f355",
"size": "520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3367OS_Code/3367OS_03_Code/3367OS_03_CODE_1stDraft/ch03-code/ch03-rec13-pie-chart.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Periscope API for the masses
"""
from periapi.broadcast import Broadcast
class Listener:
"""Class to check notifications stream for new broadcasts and return new broadcast ids"""
def __init__(self, api, check_backlog=False, cap_invited=False):
self.api = api
self.follows = set([i['username'] for i in self.api.following])
self.config = self.api.session.config
self.check_backlog = check_backlog
self.cap_invited = cap_invited
self.no_dls_yet = True
def check_for_new(self):
"""Check for new broadcasts"""
current_notifications = self.api.notifications
if len(current_notifications) == 0:
return None
new_broadcasts = self.process_notifications(current_notifications)
if len(new_broadcasts) == 0:
return None
return new_broadcasts
def process_notifications(self, notifications):
"""Process list of broadcasts obtained from notifications API endpoint."""
new_broadcasts = list()
new = self.new_follows()
for i in notifications:
broadcast = Broadcast(self.api, i)
if self.check_if_wanted(broadcast, new):
new_broadcasts.append(broadcast)
if self.check_backlog:
self.check_backlog = False
if len(new_broadcasts) > 0:
if self.no_dls_yet:
self.no_dls_yet = False
self.update_latest_broadcast_time(new_broadcasts)
return new_broadcasts
def check_if_wanted(self, broadcast, new_follow):
"""Check if broadcast in notifications string is desired for download"""
if not (broadcast.islive or broadcast.isreplay):
return None
if self.check_backlog or broadcast.isnewer or (broadcast.islive and self.no_dls_yet):
if self.cap_invited or (broadcast.username in self.follows):
return True
elif new_follow and broadcast.username in new_follow:
return True
return None
def new_follows(self):
"""Get set of new follows since last check"""
cur_follows = set([i['username'] for i in self.api.following])
new_follows = cur_follows - self.follows
self.follows = cur_follows
if len(new_follows) > 0:
return new_follows
return None
def update_latest_broadcast_time(self, broadcasts):
"""Get most recent broadcast time from iterable of broadcast objects"""
for broadcast in broadcasts:
if broadcast.isnewer or not self.last_new_bc:
self.last_new_bc = broadcast.start
@property
def last_new_bc(self):
"""Get the ATOM timestamp of when the last new broadcast was found."""
return self.config.get('last_check')
@last_new_bc.setter
def last_new_bc(self, when):
"""Set the ATOM timestamp of when the last new broadcast was found."""
self.config['last_check'] = when
self.config.write()
|
{
"content_hash": "e8447c14c63baff38bbd38ce5097d4b0",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 93,
"avg_line_length": 31.604166666666668,
"alnum_prop": 0.6209624258404747,
"repo_name": "baliscope/periapi",
"id": "242a9bc212193293072f140677905bea29530f7b",
"size": "3057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "periapi/listener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59858"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import imp
import os.path
import sys
import cffi.vengine_cpy
import cffi.vengine_gen
def _get_so_suffixes():
suffixes = []
for suffix, mode, type in imp.get_suffixes():
if type == imp.C_EXTENSION:
suffixes.append(suffix)
if not suffixes:
# bah, no C_EXTENSION available. Occurs on pypy without cpyext
if sys.platform == 'win32':
suffixes = [".pyd"]
else:
suffixes = [".so"]
return suffixes
def vengine_cpy_find_module(self, module_name, path, so_suffix):
# We will ignore so_suffix and get it ourselves
so_suffixes = _get_so_suffixes()
try:
f, filename, descr = imp.find_module(module_name, path)
except ImportError:
return None
if f is not None:
f.close()
# Note that after a setuptools installation, there are both .py
# and .so files with the same basename. The code here relies on
# imp.find_module() locating the .so in priority.
if descr[0] not in so_suffixes:
return None
return filename
def vengine_gen_find_module(self, module_name, path, so_suffixes):
# We will ignore so_suffix and get it ourselves
so_suffixes = _get_so_suffixes()
for so_suffix in so_suffixes:
basename = module_name + so_suffix
if path is None:
path = sys.path
for dirname in path:
filename = os.path.join(dirname, basename)
if os.path.isfile(filename):
return filename
cffi.vengine_cpy.VCPythonEngine.find_module = vengine_cpy_find_module
cffi.vengine_gen.VGenericEngine.find_module = vengine_gen_find_module
|
{
"content_hash": "30e9530c74f2b52b4bcbbd1c93d5b239",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 28.131147540983605,
"alnum_prop": 0.6404428904428905,
"repo_name": "alex/pynacl",
"id": "62772202f6195abd82c67040e0c2fc390c902ab9",
"size": "2318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nacl/_cffi_fix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "17958"
},
{
"name": "C",
"bytes": "558078"
},
{
"name": "C++",
"bytes": "15818"
},
{
"name": "Python",
"bytes": "76544"
},
{
"name": "Shell",
"bytes": "341585"
},
{
"name": "Visual Basic",
"bytes": "294"
}
],
"symlink_target": ""
}
|
import thoonk
from thoonk.feeds import Queue
import unittest
from ConfigParser import ConfigParser
class TestQueue(unittest.TestCase):
def setUp(self):
conf = ConfigParser()
conf.read('test.cfg')
if conf.sections() == ['Test']:
self.ps = thoonk.Thoonk(host=conf.get('Test', 'host'),
port=conf.getint('Test', 'port'),
db=conf.getint('Test', 'db'))
self.ps.redis.flushdb()
else:
print 'No test configuration found in test.cfg'
exit()
def test_basic_queue(self):
"""Test basic QUEUE publish and retrieve."""
q = self.ps.queue("testqueue")
self.assertEqual(q.__class__, Queue)
q.put("10")
q.put("20")
q.put("30")
q.put("40")
r = []
for x in range(0,4):
r.append(q.get(timeout=2))
self.assertEqual(r, ["10", "20", "30", "40"], "Queue results did not match publish.")
suite = unittest.TestLoader().loadTestsFromTestCase(TestQueue)
|
{
"content_hash": "191b2d17b4b46aa97f43d202aea07a6b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 93,
"avg_line_length": 30.942857142857143,
"alnum_prop": 0.5373961218836565,
"repo_name": "andyet/thoonk.py",
"id": "49ee5830b1bc2fa89d0613b7252ce8c161319ac0",
"size": "1083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89130"
}
],
"symlink_target": ""
}
|
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import moving_averages
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import variables
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class BatchNormalization(base._Layer): # pylint: disable=protected-access
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
if self.center:
self.beta = vs.get_variable('beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
trainable=True)
else:
self.beta = None
if self.scale:
self.gamma = vs.get_variable('gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
trainable=True)
else:
self.gamma = None
# Disable variable partitioning when creating the moving mean and variance
partitioner = vs.get_variable_scope().partitioner
try:
vs.get_variable_scope().set_partitioner(None)
self.moving_mean = vs.get_variable(
'moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = vs.get_variable(
'moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
finally:
vs.get_variable_scope().set_partitioner(partitioner)
def call(self, inputs, training=False):
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != range(ndim)[:-1])
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if needs_broadcasting:
# In this case we must explictly broadcast all parameters.
if self.center:
broadcast_beta = array_ops.reshape(self.beta, broadcast_shape)
else:
broadcast_beta = None
if self.scale:
broadcast_gamma = array_ops.reshape(self.gamma, broadcast_shape)
else:
broadcast_gamma = None
if training_value is not False:
if needs_broadcasting:
broadcast_mean, broadcast_variance = nn.moments(
inputs, reduction_axes, keep_dims=True)
mean = array_ops.reshape(broadcast_mean, [-1])
variance = array_ops.reshape(broadcast_variance, [-1])
else:
mean, variance = nn.moments(inputs, reduction_axes)
# Prepare updates if necessary.
if not self.updates:
mean_update = moving_averages.assign_moving_average(
self.moving_mean, mean, self.momentum, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, variance, self.momentum, zero_debias=False)
# In the future this should be refactored into a self.add_update
# methods in order to allow for instance-based BN layer sharing
# across unrelated input streams (e.g. like in Keras).
self.updates.append(mean_update)
self.updates.append(variance_update)
# Normalize batch. We do this inside separate functions for training
# and inference so as to avoid evaluating both branches.
def normalize_in_test():
if needs_broadcasting:
broadcast_moving_mean = array_ops.reshape(self.moving_mean,
broadcast_shape)
broadcast_moving_variance = array_ops.reshape(self.moving_variance,
broadcast_shape)
return nn.batch_normalization(inputs,
broadcast_moving_mean,
broadcast_moving_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
self.moving_mean,
self.moving_variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
def normalize_in_training():
if needs_broadcasting:
return nn.batch_normalization(inputs,
broadcast_mean,
broadcast_variance,
broadcast_beta,
broadcast_gamma,
self.epsilon)
else:
return nn.batch_normalization(inputs,
mean,
variance,
self.beta if self.center else None,
self.gamma if self.scale else None,
self.epsilon)
return utils.smart_cond(training,
normalize_in_training,
normalize_in_test)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
training=False,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics).
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
|
{
"content_hash": "5c0b49972afba8fb08cccdf957268366",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 80,
"avg_line_length": 41.93485342019544,
"alnum_prop": 0.6172129874164983,
"repo_name": "MycChiu/tensorflow",
"id": "19f6a9854788065d918e8b17d8faa627b61f3ff5",
"size": "13614",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/python/layers/normalization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175009"
},
{
"name": "C++",
"bytes": "21518650"
},
{
"name": "CMake",
"bytes": "130133"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "277432"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64764"
},
{
"name": "Protocol Buffer",
"bytes": "197812"
},
{
"name": "Python",
"bytes": "17851708"
},
{
"name": "Shell",
"bytes": "319872"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
}
|
"""Contains the TaskNotRunningDep."""
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.session import provide_session
from airflow.utils.state import State
class TaskNotRunningDep(BaseTIDep):
"""Ensures that the task instance's state is not running."""
NAME = "Task Instance Not Running"
IGNOREABLE = False
def __eq__(self, other):
return type(self) == type(other) # pylint: disable=C0123
def __hash__(self):
return hash(type(self))
@provide_session
def _get_dep_statuses(self, ti, session, dep_context=None):
if ti.state != State.RUNNING:
yield self._passing_status(reason="Task is not in running state.")
return
yield self._failing_status(
reason='Task is in the running state')
|
{
"content_hash": "1efbb46f6e218537b7d401afea7c5044",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 31.153846153846153,
"alnum_prop": 0.6604938271604939,
"repo_name": "mtagle/airflow",
"id": "10c5d31c11a3914a7912c146ca524e2da8505751",
"size": "1597",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/ti_deps/deps/task_not_running_dep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import cPickle as pickle
import json
import pandas as pd
import numpy as np
from rq.job import Status
from kitchensink.testutils.testrpc import make_rpc, dummy_add
from kitchensink.serialization import (json_serialization,
pickle_serialization,
pack_rpc_call,
unpack_result)
def test_rpc():
a = pd.DataFrame({'a' : [1,2]})
b = pd.DataFrame({'a' : [0,1]})
rpc = make_rpc()
args = (a,b)
kwargs = {}
metadata = {'result_fmt' : 'cloudpickle',
'async' : False,
'func_string' : 'dummy_add'}
data = {'args' : args,
'kwargs' : kwargs}
msg = pack_rpc_call(metadata, data, fmt='cloudpickle')
result = rpc.call(msg)
msg_format, [metadata, result] = unpack_result(result)
status = metadata['status']
assert status == Status.FINISHED
result = result == dummy_add(a, b)
assert np.all(result)
def test_rpc_json():
a = 1
b = 2
rpc = make_rpc()
args = (a,b)
kwargs = {}
metadata = {'result_fmt' : 'json',
'async' : False,
'func_string' : 'dummy_add'}
data = {
'args' : args,
'kwargs' : kwargs}
msg = pack_rpc_call(metadata, data, fmt='cloudpickle')
result = rpc.call(msg)
msg_format, [metadata, result] = unpack_result(result)
status = metadata['status']
assert status == Status.FINISHED
assert metadata['result_fmt'] == 'json'
assert result == 3
def test_rpc_error():
a = 1
b = "sdf"
rpc = make_rpc()
args = (a,b)
kwargs = {}
metadata = {'result_fmt' : 'json',
'async' : False,
'func_string' : 'dummy_add',
}
data = {
'args' : args,
'kwargs' : kwargs}
msg = pack_rpc_call(metadata, data, fmt='json')
result = rpc.call(msg)
msg_format, [metadata, result] = unpack_result(result)
status = metadata['status']
assert status == Status.FAILED
assert metadata['result_fmt'] == 'json'
print (metadata['error'])
|
{
"content_hash": "be0534c0761cf4910698c7bd3668aba1",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 61,
"avg_line_length": 29.794520547945204,
"alnum_prop": 0.5388505747126436,
"repo_name": "hhuuggoo/kitchensink",
"id": "676001b893fb650af9615e3b131801474854585d",
"size": "2175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitchensink/rpc/tests/rpc_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "806"
},
{
"name": "CSS",
"bytes": "125713"
},
{
"name": "HTML",
"bytes": "14465"
},
{
"name": "JavaScript",
"bytes": "146752"
},
{
"name": "Python",
"bytes": "161023"
},
{
"name": "Shell",
"bytes": "779"
}
],
"symlink_target": ""
}
|
"""Certificate chain where the target certificate sets the extended key usage
to clientAuth. Neither the root nor the intermediate have an EKU."""
import sys
sys.path += ['../..']
import gencerts
# Self-signed root certificate.
root = gencerts.create_self_signed_root_certificate('Root')
# Intermediate certificate.
intermediate = gencerts.create_intermediate_certificate('Intermediate', root)
# Target certificate.
target = gencerts.create_end_entity_certificate('Target', intermediate)
target.get_extensions().set_property('extendedKeyUsage', 'anyExtendedKeyUsage')
chain = [target, intermediate, root]
gencerts.write_chain(__doc__, chain, 'chain.pem')
|
{
"content_hash": "f45e4c10a7c47152c88e8c7e3ebd99de",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 33.05,
"alnum_prop": 0.7685325264750378,
"repo_name": "chromium/chromium",
"id": "319aaf8a864b7f385f744eff40c7196e4be8f0ea",
"size": "823",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "net/data/verify_certificate_chain_unittest/target-eku-any/generate-chains.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import os
import sys
import json
# import requests
from flask import Flask, request, jsonify
from config import get_DB_URL
from play_new_game import play_game
import random as rd
from models.gen_work_history import gen_rand_work_history
from models.worker_level import worker_level
from models.worker import worker
from models.work_history import work_history
from models.game import game
from models.ranking import rank
from notify_hole_results import get_hole_results, get_hole_details
import datetime as dt
from notify_game_results import notify_game_results
app = Flask(__name__)
DB_URL = get_DB_URL()
print("DB URL at server.py: %s" % DB_URL)
@app.route('/', methods=['GET'])
def verify():
# when the endpoint is registered as a webhook, it must echo back
# the 'hub.challenge' value it receives in the query arguments
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == os.environ["VERIFY_TOKEN"]:
return "Verification token mismatch", 403
return request.args["hub.challenge"], 200
return "This web server contains APIs for gamification", 200
@app.route('/golfgame-api/getlevels', methods=['GET'])
def webhook1():
params = request.json
print(params)
if 'username' in params:
user_name = params['username']
if user_name != "All":
wk = worker(DB_URL)
wkl = worker_level(DB_URL)
user_id = wk.get_by_username(user_name)
levels = wkl.get_by_id(user_id)
print(levels)
return jsonify({"username": user_name, "levels": levels}), 200
else:
return "No username", 200
else:
return "No params", 200
@app.route('/golfgame-api/playgame', methods=['POST'])
def webhook():
params = request.json
hole = params['hole']
user_name = params['username']
if user_name != "All":
game = play_game(hole, user_name)
game_data = game.start_game()
if game_data:
return jsonify(game_data), 200
else:
return "No game data", 200
else:
return "No username", 200
return "OK", 200
@app.route('/golfgame-api/work-history', methods=['GET'])
def rand_work_history():
work_history = gen_rand_work_history((1, 8, 2017), (31, 8, 2017))
return jsonify(work_history), 200
@app.route('/golfgame-api/work-history', methods=['POST'])
def insert_work_history():
params = request.json
work = {
'USR_ID': params['USR_ID'],
'PROC_NM': params['PROC_NM'],
'ST_DT': dt.datetime.strptime(params['ST_DT'], "%b %d, %Y %H:%M:%S"),
'END_DT': dt.datetime.strptime(params['END_DT'], "%b %d, %Y %H:%M:%S"),
'LD_TM': params['LD_TM'],
'CRE_DT': dt.datetime.today()
}
for usr in work['USR_ID']:
tmp = work
wk = worker(DB_URL)
wrk_hist = work_history(DB_URL)
tmp['USR_ID'] = wk.get_by_username(usr)
wrk_hist.insert_to(tmp)
return "ok", 200
@app.route('/golfgame-api/notify-hole-results', methods=['POST'])
def notify_hole_results():
params = request.json
user_name = params['username']
leage_name = params['leagename']
start_date = dt.date(2017, 9, 11)
end_date = dt.date(2017, 9, 15)
wk = worker(DB_URL)
worker_id = wk.get_by_username(user_name)
parse_params = {
'league_name': leage_name,
'start_date': dt.date(2017, 9, 11),
'end_date': dt.date(2017, 9, 15),
'worker_id': worker_id
}
res = get_hole_results(parse_params)
if res:
return jsonify(res), 200
else:
return "No data found", 200
@app.route('/golfgame-api/notify-hole-details', methods=['POST'])
def notify_hole_details():
params = request.json
hole_id = params['holeId']
if hole_id:
res = get_hole_details(hole_id)
if res:
return jsonify(res), 200
else:
return "No data found", 200
else:
return "No hole id received", 200
@app.route('/golfgame-api/notify-game-results', methods=['POST'])
def notify_game_results_api():
params = request.json
user_name = params['username']
leage_name = params['leagename']
start_date = dt.date(2017, 9, 11)
end_date = dt.date(2017, 9, 15)
wk = worker(DB_URL)
worker_id = wk.get_by_username(user_name)
if user_name:
gm_result = notify_game_results(leage_name, start_date, end_date)
game_results = gm_result.call_cal_game_results(gm_result, worker_id)
else:
return "No user name", 200
return jsonify(game_results), 200
@app.route('/golfgame-api/notify-game-ranking', methods=['POST'])
def notify_ranking():
params = request.json
user_name = params['username']
leage_name = params['leagename']
start_date = dt.date(2017, 9, 11)
end_date = dt.date(2017, 9, 15)
if user_name:
rnk = rank(DB_URL)
ranking_obj = rnk.get_all()
rank_results = []
for r in ranking_obj:
rank_results.append({
'rank': r['RNK_NO'],
'player': r['WRKR_NM'],
'point': r['PNT_NO']
})
return jsonify(rank_results), 200
else:
return "No user name", 200
def log(message): # simple wrapper for logging to stdout on heroku
print(message)
sys.stdout.flush()
if __name__ == '__main__':
# app.run(debug=True, host="0.0.0.0", port=5055)
app.run(debug=True, host="0.0.0.0")
|
{
"content_hash": "dace30e4d79c154e29ad2b2e2fd2b239",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 87,
"avg_line_length": 27.236842105263158,
"alnum_prop": 0.6481159420289855,
"repo_name": "xuanthuong/golfgame",
"id": "d7cdf0f204e9932cb97a24f57dca3bfd85cbbfe4",
"size": "5175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77371"
}
],
"symlink_target": ""
}
|
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import TwentyThreeAndMeProvider
urlpatterns = default_urlpatterns(TwentyThreeAndMeProvider)
|
{
"content_hash": "3cc4bd1cc49971e1d605aa450821d40e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 75,
"avg_line_length": 46,
"alnum_prop": 0.8804347826086957,
"repo_name": "nimbis/django-allauth",
"id": "2b12907cc96c7c69f66b49d92813ec01f3360a23",
"size": "184",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "allauth/socialaccount/providers/twentythreeandme/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42101"
},
{
"name": "JavaScript",
"bytes": "3967"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Python",
"bytes": "581551"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='aiovoyager',
version='0.0.1',
description='Python 3\Asyncio based web framework with websockets',
long_description=open('README.rst').read(),
url='https://github.com/GomZik/aiovoyager',
author='Aliaksiej Homza',
author_email='aliaksei.homza@gmail.com',
license=open('LICENSE').read(),
packages=['aiovoyager'],
install_requires=[
],
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers'
],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False
)
|
{
"content_hash": "fbef74cb7b06b602610afaf5b15d07cc",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 71,
"avg_line_length": 30.703703703703702,
"alnum_prop": 0.6139927623642943,
"repo_name": "GomZik/aiovoyager",
"id": "dd087dd8a64a79ab85c93f01606add4e44564b42",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "987"
}
],
"symlink_target": ""
}
|
class ip6tunnel_args :
ur""" Provides additional arguments required for fetching the ip6tunnel resource.
"""
def __init__(self) :
self._remote = ""
@property
def remote(self) :
ur"""The IPv6 address at which the remote NetScaler appliance connects to the tunnel.<br/>Minimum length = 1.
"""
try :
return self._remote
except Exception as e:
raise e
@remote.setter
def remote(self, remote) :
ur"""The IPv6 address at which the remote NetScaler appliance connects to the tunnel.<br/>Minimum length = 1
"""
try :
self._remote = remote
except Exception as e:
raise e
|
{
"content_hash": "de77b18bbf529bf622e03aa777385f4d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 112,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6870860927152318,
"repo_name": "atopuzov/nitro-python",
"id": "52aca21e2fbd3d1ebd6db0e7a56286aa58678c37",
"size": "1219",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/network/ip6tunnel_args.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10881939"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
}
|
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(True, isinstance(string, basestring))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(True, isinstance(string, basestring))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(True, isinstance(string, basestring))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(True, isinstance(string, basestring))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(True, isinstance(string, basestring))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual("He said, \"Go Away.\"", string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual('Don\'t', string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(True, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(52, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(15, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(True, (a == b))
def test_escaping_quotes_at_the_end_of_triple_quoted_string(self):
string = """Hello "world\""""
self.assertEqual('Hello \"world\"', string)
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual("Hello, world", string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "world"
self.assertEqual("Hello, world", string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual("Hello, ", hi)
self.assertEqual("world", there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual("Hello, world", hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual(__, original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(__, len(string))
|
{
"content_hash": "fa6e1a9c4723c901cb6ba64a32705e7b",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 85,
"avg_line_length": 33.55434782608695,
"alnum_prop": 0.6141885325558795,
"repo_name": "thetuxracer/completed-python-koans",
"id": "0647bf6ea19e8fc4abb490fb7422f53a2807514b",
"size": "3134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "327247"
},
{
"name": "Shell",
"bytes": "1599"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/particle/shared_particle_test_72.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "c869767f6c9db4a357f9baadd7a5e1b3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.076923076923077,
"alnum_prop": 0.69,
"repo_name": "anhstudios/swganh",
"id": "3b73c0c5013d300210540385ab256b9a2e835247",
"size": "445",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/static/particle/shared_particle_test_72.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""Tests triangle count for ATK against the networkx implementation"""
import unittest
import networkx as nx
from sparktkregtests.lib import sparktk_test
class TriangleCount(sparktk_test.SparkTKTestCase):
@unittest.skip("DPNG-11961")
def test_triangle_counts(self):
"""Build frames and graphs to exercise"""
super(TriangleCount, self).setUp()
graph_data = self.get_file("clique_10.csv")
schema = [('src', str),
('dst', str)]
# set up the vertex frame, which is the union of the src and
# the dst columns of the edges
self.frame = self.context.frame.import_csv(graph_data, schema=schema)
self.vertices = self.frame.copy()
self.vertices2 = self.frame.copy()
self.vertices.rename_columns({"src": "id"})
self.vertices.drop_columns(["dst"])
self.vertices2.rename_columns({"dst": "id"})
self.vertices2.drop_columns(["src"])
self.vertices.append(self.vertices2)
self.vertices.drop_duplicates()
self.graph = self.context.graph.create(self.vertices, self.frame)
result = self.graph.triangle_count()
triangles = result.to_pandas(result.count())
# Create a dictionary of triangle count per triangle:
dictionary_of_triangle_count = {vertex['id']: (vertex['count'])
for (index, vertex) in triangles.iterrows()}
edge_list = self.frame.take(
n=self.frame.count(), columns=['src', 'dst'])
# build the network x result
g = nx.Graph()
g.add_edges_from(edge_list)
triangle_counts_from_networkx = nx.triangles(g)
self.assertEqual(
dictionary_of_triangle_count, triangle_counts_from_networkx)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "a59361ef40c4c899e0b9cdfd519c67dc",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 84,
"avg_line_length": 34.54716981132076,
"alnum_prop": 0.6160567995630802,
"repo_name": "rodorad/spark-tk",
"id": "5f127c6a6653d1900315d4a8ac5e6c4eaccb4285",
"size": "2536",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "regression-tests/sparktkregtests/testcases/graph/graph_triangle_count_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "11509"
},
{
"name": "Jupyter Notebook",
"bytes": "152677"
},
{
"name": "Python",
"bytes": "1542925"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "1509526"
},
{
"name": "Shell",
"bytes": "24942"
}
],
"symlink_target": ""
}
|
"""Support for LightwaveRF switches."""
from homeassistant.components.lightwave import LIGHTWAVE_LINK
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import CONF_NAME
DEPENDENCIES = ['lightwave']
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Find and return LightWave switches."""
if not discovery_info:
return
switches = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
switches.append(LWRFSwitch(name, device_id, lwlink))
async_add_entities(switches)
class LWRFSwitch(SwitchDevice):
"""Representation of a LightWaveRF switch."""
def __init__(self, name, device_id, lwlink):
"""Initialize LWRFSwitch entity."""
self._name = name
self._device_id = device_id
self._state = None
self._lwlink = lwlink
@property
def should_poll(self):
"""No polling needed for a LightWave light."""
return False
@property
def name(self):
"""Lightwave switch name."""
return self._name
@property
def is_on(self):
"""Lightwave switch is on state."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the LightWave switch on."""
self._state = True
self._lwlink.turn_on_switch(self._device_id, self._name)
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the LightWave switch off."""
self._state = False
self._lwlink.turn_off(self._device_id, self._name)
self.async_schedule_update_ha_state()
|
{
"content_hash": "e2b92b0243dac4b0edea59ec6f308e9d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 64,
"avg_line_length": 29.066666666666666,
"alnum_prop": 0.6381880733944955,
"repo_name": "nugget/home-assistant",
"id": "d6c00b7fddb66d9cb795e8dd4558808e5caf47ca",
"size": "1744",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lightwave/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
}
|
import unittest
from SpiffyWorld.models import Dungeon
from SpiffyWorld import Database
class TestDungeon(unittest.TestCase):
"""
Functional tests for unit model
"""
@classmethod
def setUpClass(cls):
cls._db = Database()
cls.db = cls._db.get_connection()
def test_get_dungeons(self):
dungeon_model = Dungeon(db=self.db)
dungeons = dungeon_model.get_dungeons()
self.assertIsInstance(dungeons, list)
self.assertTrue(len(dungeons) > 0)
for dungeon in dungeons:
self.assertIsInstance(dungeon, dict)
self.assertTrue(dungeon["id"])
self.assertTrue(dungeon["name"])
self.assertTrue(dungeon["channel"])
self.assertTrue("description" in dungeon)
self.assertTrue("min_level" in dungeon)
self.assertTrue("max_level" in dungeon)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "3eb003aaf9ddcc1eff063e8f47b2c18e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 53,
"avg_line_length": 27.529411764705884,
"alnum_prop": 0.6217948717948718,
"repo_name": "butterscotchstallion/SpiffyRPG",
"id": "f562d38c3c29bd3584a2e9f6e54495196a18d2b4",
"size": "982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SpiffyRPG/test/functional/models/test_dungeon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "294210"
}
],
"symlink_target": ""
}
|
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.orm import scoped_session, sessionmaker
db = SQLAlchemy()
session = scoped_session(sessionmaker())
|
{
"content_hash": "37c9ffbcd27ca135efd85a9b96e68891",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 55,
"avg_line_length": 32,
"alnum_prop": 0.8125,
"repo_name": "SimpletonPatterns/EasySchedule",
"id": "84870dda888b62cf2acd79e9b27066c325fb2cc9",
"size": "190",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "easyschedule/database.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import numpy as np
import scipy.sparse as sp
from anemoi import BaseModelDependent, BaseSCCache, MultiFreq, MiniZephyr, Eurus
import SimPEG
from .survey import HelmBaseSurvey, Helm2DSurvey, Helm25DSurvey
from .fields import HelmFields
EPS = 1e-15
class HelmBaseProblem(SimPEG.Problem.BaseProblem, BaseModelDependent, BaseSCCache):
# initMap = {
# # Argument Required Rename as ... Store as type
# }
# maskKeys = []
surveyPair = HelmBaseSurvey
SystemWrapper = MultiFreq
cacheItems = ['_system']
def __init__(self, systemConfig, *args, **kwargs):
# Initialize anemoi side
BaseSCCache.__init__(self, systemConfig, *args, **kwargs)
# Initialize SimPEG side
hx = [(self.dx, self.nx-1)]
hz = [(self.dz, self.nz-1)]
mesh = SimPEG.Mesh.TensorMesh([hx, hz], '00')
SimPEG.Problem.BaseProblem.__init__(self, mesh, *args, **kwargs)
# @property
# def _survey(self):
# return self.__survey
# @_survey.setter
# def _survey(self, value):
# self.__survey = value
# if value is None:
# self._cleanSystem()
# else:
# self._buildSystem()
def updateModel(self, m, loneKey='c'):
if m is None:
return
elif isinstance(m, dict):
self.systemConfig.update(m)
self.clearCache()
elif isinstance(m, np.ndarray) or isinstance(m, np.inexact) or isinstance(m, complex) or isinstance(m, float):
if not np.linalg.norm(m - self.systemConfig.get(loneKey, 0.)) < EPS:
self.systemConfig[loneKey] = m
self.clearCache()
else:
raise Exception('Class %s doesn\'t know how to update with model of type %s'%(self.__class__.__name__, type(m)))
@property
def system(self):
if getattr(self, '_system', None) is None:
self._system = self.SystemWrapper(self.systemConfig)
return self._system
@SimPEG.Utils.timeIt
def Jtvec(self, m=None, v=None, uF=None):
if not self.ispaired:
raise Exception('%s instance is not paired to a survey'%(self.__class__.__name__,))
if v is None:
raise Exception('Actually, Jtvec requires a residual vector')
self.updateModel(m)
# v.shape <nrec, nsrc, nfreq>
# o.shape [<nelem, nsrc> . nfreq]
# r.shape [<nrec, nelem> . nsrc]
resid = v.reshape((self.survey.nrec, self.survey.nsrc, self.survey.nfreq))
if uF is None:
uF = self._lazyFields(m)
# Make a list of receiver vectors for each frequency, each of size <nelem, nsrc>
qb = [
sp.hstack(
[self.survey.rVec(isrc).T * # <-- <nelem, nrec>
sp.csc_matrix(resid[:,isrc, ifreq].reshape((self.survey.nrec,1))) # <-- <nrec, 1>
for isrc in xrange(self.survey.nsrc)
] # <-- List comprehension creates sparse vectors of size <nelem, 1> for each source and all receivers
# (self.survey.rVec(isrc).T * # <-- <nelem, nrec>
# sp.csc_matrix(resid[:,isrc, ifreq].reshape((self.survey.nrec,1))) # <-- <nrec, 1>
# for isrc in xrange(self.survey.nsrc)
# ) # <-- Generator expression creates sparse vectors of size <nelem, 1> for each source and all receivers
) # <-- Sparse matrix of size <nelem, nsrc> constructed by hstack from generator
for ifreq in xrange(self.survey.nfreq) # <-- Outer list of size <nfreq>
]
uB = self.system * qb
g = reduce(np.add, ((uFf * uBf).sum(axis=1) for uFf, uBf in zip(uF, uB)))
return g
def _lazyFields(self, m=None):
if not self.ispaired:
raise Exception('%s instance is not paired to a survey'%(self.__class__.__name__,))
self.updateModel(m)
qs = self.survey.sVecs
uF = self.system * qs
if not np.iterable(uF):
uF = [uF]
return uF
def fields(self, m=None):
uF = self._lazyFields(m)
fields = HelmFields(self.mesh, self.survey)
for ifreq, uFsub in enumerate(uF):
fields[:,'u',ifreq] = uFsub
return fields
class Helm2DProblem(HelmBaseProblem):
surveyPair = Helm2DSurvey
class Helm25DProblem(HelmBaseProblem):
surveyPair = Helm25DSurvey
|
{
"content_hash": "3552c1d6ddfe0414695a85dd011db4b8",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 128,
"avg_line_length": 33.27142857142857,
"alnum_prop": 0.5487333619579219,
"repo_name": "uwoseis/windtunnel",
"id": "4b10558ef5e4bc24cad8fe26dde2f8c18d665a67",
"size": "4659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windtunnel/problem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20997"
}
],
"symlink_target": ""
}
|
"""Do a minimal test of all the modules that aren't otherwise tested."""
import importlib
import platform
import sys
from test import support
import unittest
class TestUntestedModules(unittest.TestCase):
def test_untested_modules_can_be_imported(self):
untested = ('encodings', 'formatter')
with support.check_warnings(quiet=True):
for name in untested:
try:
support.import_module('test.test_{}'.format(name))
except unittest.SkipTest:
importlib.import_module(name)
else:
self.fail('{} has tests even though test_sundry claims '
'otherwise'.format(name))
import distutils.bcppcompiler
import distutils.ccompiler
import distutils.cygwinccompiler
import distutils.filelist
import distutils.text_file
import distutils.unixccompiler
import distutils.command.bdist_dumb
if sys.platform.startswith('win') and not platform.win32_is_iot():
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
import distutils.command.bdist_wininst
import distutils.command.build_clib
import distutils.command.build_ext
import distutils.command.build
import distutils.command.clean
import distutils.command.config
import distutils.command.install_data
import distutils.command.install_egg_info
import distutils.command.install_headers
import distutils.command.install_lib
import distutils.command.register
import distutils.command.sdist
import distutils.command.upload
import html.entities
try:
import tty # Not available on Windows
except ImportError:
if support.verbose:
print("skipping tty")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "248f3e989766dedeb2e44c86566dcd39",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 78,
"avg_line_length": 37.24561403508772,
"alnum_prop": 0.6015073009891663,
"repo_name": "kikocorreoso/brython",
"id": "2accad1aeebd4f9734835c02c7ee1d3faaf0bf1e",
"size": "2123",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "www/src/Lib/test/test_sundry.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21158"
},
{
"name": "HTML",
"bytes": "5011615"
},
{
"name": "JavaScript",
"bytes": "7230101"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19224768"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.