hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13a4eaea9e2402891521cc56201ae27b7976fb0d
| 4,794
|
py
|
Python
|
src/ezdxf/math/bulge.py
|
dmtvanzanten/ezdxf
|
6fe9d0aa961e011c87768aa6511256de21a662dd
|
[
"MIT"
] | null | null | null |
src/ezdxf/math/bulge.py
|
dmtvanzanten/ezdxf
|
6fe9d0aa961e011c87768aa6511256de21a662dd
|
[
"MIT"
] | null | null | null |
src/ezdxf/math/bulge.py
|
dmtvanzanten/ezdxf
|
6fe9d0aa961e011c87768aa6511256de21a662dd
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2018-2021 Manfred Moitzi
# License: MIT License
# source: http://www.lee-mac.com/bulgeconversion.html
# source: http://www.afralisp.net/archive/lisp/Bulges1.htm
from typing import Any, TYPE_CHECKING, Tuple
import math
from ezdxf.math import Vec2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = [
"bulge_to_arc", "bulge_3_points", "bulge_center", "bulge_radius",
"arc_to_bulge"
]
def polar(p: Any, angle: float, distance: float) -> Vec2:
""" Returns the point at a specified `angle` and `distance` from point `p`.
Args:
p: point as :class:`Vec2` compatible object
angle: angle in radians
distance: distance
"""
return Vec2(p) + Vec2.from_angle(angle, distance)
def angle(p1: Any, p2: Any) -> float:
""" Returns angle a line defined by two endpoints and x-axis in radians.
Args:
p1: start point as :class:`Vec2` compatible object
p2: end point as :class:`Vec2` compatible object
"""
return (Vec2(p2) - Vec2(p1)).angle
def arc_to_bulge(center: 'Vertex', start_angle: float, end_angle: float,
radius: float) -> Tuple['Vec2', 'Vec2', float]:
"""
Returns bulge parameters from arc parameters.
Args:
center: circle center point as :class:`Vec2` compatible object
start_angle: start angle in radians
end_angle: end angle in radians
radius: circle radius
Returns:
tuple: (start_point, end_point, bulge)
"""
start_point = polar(center, start_angle, radius)
end_point = polar(center, end_angle, radius)
pi2 = math.pi * 2
a = math.fmod((pi2 + (end_angle - start_angle)), pi2) / 4.
bulge = math.sin(a) / math.cos(a)
return start_point, end_point, bulge
def bulge_3_points(start_point: 'Vertex', end_point: 'Vertex',
point: 'Vertex') -> float:
""" Returns bulge value defined by three points.
Based on 3-Points to Bulge by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
point: arbitrary point as :class:`Vec2` compatible object
"""
a = (math.pi - angle(point, start_point) + angle(point, end_point)) / 2
return math.sin(a) / math.cos(a)
def bulge_to_arc(start_point: 'Vertex',
end_point: 'Vertex',
bulge: float) -> Tuple['Vec2', float, float, float]:
""" Returns arc parameters from bulge parameters.
The arcs defined by bulge values of :class:`~ezdxf.entities.LWPolyline`
and 2D :class:`~ezdxf.entities.Polyline` entities start at the vertex which
includes the bulge value and ends at the following vertex.
Based on Bulge to Arc by `Lee Mac`_.
Args:
start_point: start vertex as :class:`Vec2` compatible object
end_point: end vertex as :class:`Vec2` compatible object
bulge: bulge value
Returns:
Tuple: (center, start_angle, end_angle, radius)
"""
r = signed_bulge_radius(start_point, end_point, bulge)
a = angle(start_point, end_point) + (math.pi / 2 - math.atan(bulge) * 2)
c = polar(start_point, a, r)
if bulge < 0:
return c, angle(c, end_point), angle(c, start_point), abs(r)
else:
return c, angle(c, start_point), angle(c, end_point), abs(r)
def bulge_center(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> 'Vec2':
""" Returns center of arc described by the given bulge parameters.
Based on Bulge Center by `Lee Mac`_.
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value as float
"""
start_point = Vec2(start_point)
a = angle(start_point, end_point) + (math.pi / 2. - math.atan(bulge) * 2.)
return start_point + Vec2.from_angle(a, signed_bulge_radius(start_point,
end_point,
bulge))
def signed_bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
return Vec2(start_point).distance(Vec2(end_point)) * (
1. + (bulge * bulge)) / 4. / bulge
def bulge_radius(start_point: 'Vertex', end_point: 'Vertex',
bulge: float) -> float:
""" Returns radius of arc defined by the given bulge parameters.
Based on Bulge Radius by `Lee Mac`_
Args:
start_point: start point as :class:`Vec2` compatible object
end_point: end point as :class:`Vec2` compatible object
bulge: bulge value
"""
return abs(signed_bulge_radius(start_point, end_point, bulge))
| 32.391892
| 79
| 0.629954
| 643
| 4,794
| 4.553655
| 0.169518
| 0.099044
| 0.048839
| 0.093238
| 0.416325
| 0.406421
| 0.338456
| 0.304645
| 0.183743
| 0.183743
| 0
| 0.016676
| 0.261994
| 4,794
| 147
| 80
| 32.612245
| 0.81091
| 0.440968
| 0
| 0.040816
| 0
| 0
| 0.061703
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0
| 0.081633
| 0.020408
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a54ea2fa4b8b8724c32c2f486041ebcedd4707
| 33,005
|
py
|
Python
|
mypython/keys.py
|
asmeurer/mypython
|
ae984926739cc2bb3abe70566762d7b4052ed0ae
|
[
"MIT"
] | 27
|
2017-02-09T06:18:30.000Z
|
2022-02-16T08:32:42.000Z
|
mypython/keys.py
|
asmeurer/mypython
|
ae984926739cc2bb3abe70566762d7b4052ed0ae
|
[
"MIT"
] | 1
|
2022-01-20T20:23:41.000Z
|
2022-01-20T20:23:41.000Z
|
mypython/keys.py
|
asmeurer/mypython
|
ae984926739cc2bb3abe70566762d7b4052ed0ae
|
[
"MIT"
] | 2
|
2019-12-14T06:45:04.000Z
|
2021-10-04T00:28:48.000Z
|
from prompt_toolkit.key_binding.bindings.named_commands import (accept_line,
self_insert, backward_delete_char, beginning_of_line)
from prompt_toolkit.key_binding.bindings.basic import if_no_repeat
from prompt_toolkit.key_binding.bindings.basic import load_basic_bindings
from prompt_toolkit.key_binding.bindings.emacs import load_emacs_bindings, load_emacs_search_bindings
from prompt_toolkit.key_binding.bindings.mouse import load_mouse_bindings
from prompt_toolkit.key_binding.bindings.cpr import load_cpr_bindings
from prompt_toolkit.key_binding.bindings.page_navigation import load_emacs_page_navigation_bindings
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.keys import Keys, ALL_KEYS
from prompt_toolkit.filters import Condition, HasSelection, is_searching
from prompt_toolkit.selection import SelectionState
from prompt_toolkit.clipboard import ClipboardData
from prompt_toolkit.input.vt100_parser import ANSI_SEQUENCES
from prompt_toolkit.application.current import get_app
from prompt_toolkit.application import run_in_terminal
from prompt_toolkit import __version__ as prompt_toolkit_version
from .multiline import (auto_newline, tab_should_insert_whitespace,
document_is_multiline_python)
from .tokenize import inside_string, matching_parens
from .theme import emoji, emoji_pudb
from .processors import get_pyflakes_warnings
import re
import subprocess
import sys
import textwrap
import platform
def get_key_bindings():
# Based on prompt_toolkit.key_binding.defaults.load_key_bindings()
return merge_key_bindings([
load_basic_bindings(),
load_emacs_bindings(),
load_emacs_search_bindings(),
load_emacs_page_navigation_bindings(),
load_mouse_bindings(),
load_cpr_bindings(),
custom_key_bindings,
])
r = custom_key_bindings = KeyBindings()
def warning_positions(event):
document = event.current_buffer.document
warnings = get_pyflakes_warnings(document.text, frozenset(event.current_buffer.session._locals))
positions = []
for (row, col, msg, m) in warnings:
# Handle SyntaxErrorMessage which is the same warning for the whole
# line.
if m.col != col:
continue
pos = document.translate_row_col_to_index(row, col)
positions.append(pos)
return positions
@r.add_binding(Keys.Escape, 'p')
def previous_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[0] >= buffer.cursor_position:
return
p = positions[0]
for pos in positions:
if pos >= buffer.cursor_position:
break
p = pos
event.current_buffer._show_syntax_warning = True
event.current_buffer.cursor_position = p
@r.add_binding(Keys.Escape, 'n')
def next_warning(event):
positions = warning_positions(event)
buffer = event.current_buffer
buffer._show_syntax_warning = True
if not positions or positions[-1] <= buffer.cursor_position:
return
p = positions[-1]
for pos in reversed(positions):
if pos <= buffer.cursor_position:
break
p = pos
event.current_buffer.cursor_position = p
# This can be removed once
# https://github.com/prompt-toolkit/python-prompt-toolkit/pull/857 is in a
# released version of prompt-toolkit.
ANSI_SEQUENCES['\x1b[1;9A'] = (Keys.Escape, Keys.Up)
ANSI_SEQUENCES['\x1b[1;9B'] = (Keys.Escape, Keys.Down)
@r.add_binding(Keys.Escape, Keys.Up)
def previous_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_backward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, 'P')
@r.add_binding(Keys.Escape, Keys.Down)
def forward_history_search(event):
event.key_sequence[-1].accept_next = True
buffer = event.current_buffer
buffer.history_forward(count=event.arg, history_search=True)
@r.add_binding(Keys.Escape, '<')
def beginning(event):
"""
Move to the beginning
"""
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, '>')
def end(event):
"""
Move to the end
"""
event.current_buffer.cursor_position = len(event.current_buffer.text)
# Document.start_of_paragraph/end_of_paragraph don't treat multiple blank
# lines correctly.
# Gives the positions right before one or more blank lines
BLANK_LINES = re.compile(r'\S *(\n *\n)')
@r.add_binding(Keys.Escape, '}')
def forward_paragraph(event):
"""
Move forward one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text):
if m.start(0) > cursor_position:
event.current_buffer.cursor_position = m.start(1)+1
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, '{')
def backward_paragraph(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in BLANK_LINES.finditer(text[::-1]):
if m.start(0) > len(text) - cursor_position:
event.current_buffer.cursor_position = len(text) - m.end(1) + 1
return
event.current_buffer.cursor_position = 0
WORD = re.compile(r'([a-z0-9]+|[A-Z]{2,}|[a-zA-Z0-9][a-z0-9]*)')
@r.add_binding(Keys.Escape, 'f')
@r.add_binding(Keys.Escape, Keys.Right)
def forward_word(event):
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
event.current_buffer.cursor_position = m.end(0)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'b')
@r.add_binding(Keys.Escape, Keys.Left)
def backward_word(event):
"""
Move back one paragraph of text
"""
text = event.current_buffer.text
cursor_position = event.current_buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
event.current_buffer.cursor_position = m.start(0)
return
event.current_buffer.cursor_position = 0
@r.add_binding(Keys.Escape, 'd')
def kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
pos = None
for m in WORD.finditer(text):
if m.end(0) > cursor_position:
pos = m.end(0) - cursor_position
break
if pos:
deleted = buffer.delete(count=pos)
event.app.clipboard.set_text(deleted)
@r.add_binding(Keys.Escape, Keys.Backspace)
def backward_kill_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = buffer.cursor_position
for m in reversed(list(WORD.finditer(text))):
if m.start(0) < cursor_position:
pos = cursor_position - m.start(0)
break
else:
pos = buffer.cursor_position
if pos:
deleted = buffer.delete_before_cursor(count=pos)
event.app.clipboard.set_text(deleted)
def insert_text_ovewrite(buffer, data, move_cursor=True):
"""
Insert characters at cursor position.
:param fire_event: Fire `on_text_insert` event. This is mainly used to
trigger autocompletion while typing.
"""
# Original text & cursor position.
otext = buffer.text
ocpos = buffer.cursor_position
# Don't overwrite the newline itself. Just before the line ending,
# it should act like insert mode.
overwritten_text = otext[ocpos:ocpos + len(data)]
buffer.text = otext[:ocpos] + data + otext[ocpos + len(overwritten_text):]
if move_cursor:
buffer.cursor_position += len(data)
@r.add_binding(Keys.Escape, 'l')
def downcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.lower())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'u')
def upcase_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
insert_text_ovewrite(buffer, word.upper())
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, 'c')
def capitalize_word(event):
buffer = event.current_buffer
text = buffer.text
cursor_position = event.current_buffer.cursor_position
for m in WORD.finditer(text):
pos = m.end(0)
if pos > cursor_position:
word = buffer.document.text[cursor_position:pos]
# Don't use word.capitalize() because the first character could be
# - or _
for i, c in enumerate(word):
if c.isalnum():
word = word[:i] + c.capitalize() + word[i+1:].lower()
break
insert_text_ovewrite(buffer, word)
return
event.current_buffer.cursor_position = len(text)
@r.add_binding(Keys.Escape, Keys.ControlF)
def forward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if opening.start == (row, col):
new_pos = document.translate_row_col_to_index(closing.end[0]-1, closing.end[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Escape, Keys.ControlB)
def backward_sexp(event):
buffer = event.current_buffer
document = buffer.document
text = buffer.text
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
matching, mismatching = matching_parens(text)
for opening, closing in matching:
if closing.end == (row, col):
new_pos = document.translate_row_col_to_index(opening.start[0]-1, opening.start[1])
buffer.cursor_position = new_pos
return
event.app.output.bell()
@r.add_binding(Keys.Left)
def left_multiline(event):
"""
Left that wraps around in multiline.
"""
if event.current_buffer.cursor_position - event.arg >= 0:
event.current_buffer.cursor_position -= event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.Right)
def right_multiline(event):
"""
Right that wraps around in multiline.
"""
if event.current_buffer.cursor_position + event.arg <= len(event.current_buffer.text):
event.current_buffer.cursor_position += event.arg
if getattr(event.current_buffer.selection_state, "shift_arrow", False):
event.current_buffer.selection_state = None
@r.add_binding(Keys.ControlD)
def exit(event):
event.app.exit(exception=EOFError, style='class:exiting')
@r.add_binding(Keys.ControlC, filter=~is_searching)
def keyboard_interrupt(event):
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
is_returnable = Condition(
lambda: get_app().current_buffer.is_returnable)
@r.add_binding(Keys.Enter, filter=is_returnable)
def multiline_enter(event):
"""
When not in multiline, execute. When in multiline, try to
intelligently add a newline or execute.
"""
buffer = event.current_buffer
document = buffer.document
multiline = document_is_multiline_python(document)
text_after_cursor = document.text_after_cursor
text_before_cursor = document.text_before_cursor
text = buffer.text
# isspace doesn't respect vacuous truth
if (not text_after_cursor or text_after_cursor.isspace()) and text_before_cursor.replace(' ', '').endswith('\n'):
# If we are at the end of the buffer, accept unless we are in a
# docstring
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if multiline and inside_string(text, row, col):
# We are inside a docstring
auto_newline(event.current_buffer)
else:
accept_line(event)
elif not multiline:
# Always accept a single valid line. Also occurs for unclosed single
# quoted strings (which will give a syntax error)
accept_line(event)
else:
auto_newline(event.current_buffer)
# Always accept the line if the previous key was Up
# Requires https://github.com/jonathanslenders/python-prompt-toolkit/pull/492.
# We don't need a parallel for down because down is already at the end of the
# prompt.
@r.add_binding(Keys.Enter, filter=is_returnable)
def accept_after_history_backward(event):
pks = event.previous_key_sequence
if pks and getattr(pks[-1], 'accept_next', False) and ((len(pks) == 1 and
pks[0].key == "up") or (len(pks) == 2 and pks[0].key == "escape"
and isinstance(pks[1].key, str) and pks[1].key in ['p', 'P', 'up',
'down'])):
accept_line(event)
else:
multiline_enter(event)
@r.add_binding(Keys.Escape, Keys.Enter)
@r.add_binding(Keys.Escape, Keys.ControlJ)
def insert_newline(event):
auto_newline(event.current_buffer)
@r.add_binding(Keys.ControlO)
def open_line(event):
event.current_buffer.newline(copy_margin=False)
event.current_buffer.cursor_left()
# M-[ a g is set to S-Enter in iTerm2 settings
Keys.ShiftEnter = "<Shift-Enter>"
ALL_KEYS.append('<Shift-Enter>')
ANSI_SEQUENCES['\x1b[ag'] = Keys.ShiftEnter
ANSI_SEQUENCES['\x1bOM'] = Keys.ShiftEnter
if prompt_toolkit_version[0] != '3':
r.add_binding(Keys.ShiftEnter)(accept_line)
@r.add_binding(Keys.Tab, filter=tab_should_insert_whitespace)
def indent(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
# Text before cursor on the line must be whitespace because of the
# TabShouldInsertWhitespaceFilter.
before_cursor = event.app.current_buffer.document.current_line_before_cursor
event.app.current_buffer.insert_text(' '*(4 - len(before_cursor)%4))
LEADING_WHITESPACE = re.compile(r'( *)[^ ]?')
@r.add_binding(Keys.Escape, 'm')
def back_to_indentation(event):
"""
Move back to the beginning of the line, ignoring whitespace.
"""
current_line = event.app.current_buffer.document.current_line
before_cursor = event.app.current_buffer.document.current_line_before_cursor
indent = LEADING_WHITESPACE.search(current_line)
if indent:
event.app.current_buffer.cursor_position -= len(before_cursor) - indent.end(1)
@r.add_binding(Keys.Backspace, save_before=if_no_repeat)
def delete_char_or_unindent(event):
buffer = event.app.current_buffer
if buffer.document.current_line_before_cursor.isspace():
spaces = len(buffer.document.current_line_before_cursor)
# Delete up to the tab stop
buffer.delete_before_cursor(count=4 + spaces%-4)
else:
backward_delete_char(event)
# Reset the history search text
buffer.history_search_text = None
@r.add_binding(Keys.Escape, ' ')
def cycle_spacing(event):
"""
Based on emacs's cycle-spacing
On first call, remove all whitespace (if any) from around the cursor and
replace it with a single space.
On second call, remove all whitespace.
On third call, restore the original whitespace and cursor position.
"""
buffer = event.app.current_buffer
# Avoid issues when text grows or shrinks below, keeping the cursor
# position out of sync
cursor_position = buffer.cursor_position
buffer.cursor_position = 0
buffer.text, buffer.cursor_position = do_cycle_spacing(buffer.text, cursor_position)
def do_cycle_spacing(text, cursor_position, state=[]):
rstripped = text[:cursor_position].rstrip()
lstripped = text[cursor_position:].lstrip()
text_before_cursor = text[:cursor_position]
# The first element of state is the original text. The last element is the
# buffer text and cursor position as we last left them. If either of those
# have changed, reset. The state here is global, but that's fine, because
# we consider any change to be enough clear the state. The worst that
# happens here is that we resume when we shouldn't if things look exactly
# as they did where we left off.
# TODO: Use event.previous_key_sequence instead.
if state and state[-1] != (text, cursor_position):
state.clear()
if len(state) == 0:
# Replace all whitespace at the cursor (if any) with a single space.
state.append((text, cursor_position))
cursor_position -= len(text_before_cursor) - len(rstripped) -1
text = rstripped + ' ' + lstripped
state.append((text, cursor_position))
elif len(state) == 2:
# Exactly one space at the cursor. Remove it.
cursor_position -= 1
text = rstripped + lstripped
state.append((text, cursor_position))
elif len(state) == 3:
# Restore original text and cursor position
text, cursor_position = state[0]
state.clear()
if cursor_position < 0:
cursor_position = 0
if cursor_position > len(text):
cursor_position = len(text)
return text, cursor_position
@r.add_binding(Keys.ControlX, Keys.ControlO)
def delete_blank_lines(event):
"""
On blank line, delete all surrounding blank lines, leaving just one.
On isolated blank line, delete that one.
On nonblank line, delete any immediately following blank lines.
"""
buffer = event.app.current_buffer
document = buffer.document
lines_up_to_current = document.lines[:document.cursor_position_row+1]
lines_after_current = document.lines[document.cursor_position_row+1:]
blank_lines_before = 0
for line in lines_up_to_current[::-1]:
if not line.strip():
blank_lines_before += 1
else:
break
blank_lines_after = 0
for line in lines_after_current:
if not line.strip():
blank_lines_after += 1
else:
break
if not blank_lines_before:
stripped_before = lines_up_to_current
else:
stripped_before = lines_up_to_current[:-blank_lines_before]
stripped_after = lines_after_current[blank_lines_after:]
# XXX: Emacs always keeps a newline at the end of the file, but I don't
# think it matters here.
if (not blank_lines_before and blank_lines_after) or blank_lines_before + blank_lines_after == 1:
new_text = '\n'.join(stripped_before + stripped_after)
elif blank_lines_before + blank_lines_after == 0:
return
else:
buffer.cursor_up(max(blank_lines_before-1, 0))
new_text = '\n'.join(stripped_before + [''] + stripped_after)
# Even though we do auto_up, it can be out of bounds from trailing
# whitespace
buffer.cursor_position = min(buffer.cursor_position, len(new_text))
buffer.text = new_text
@r.add_binding(Keys.ControlX, Keys.ControlT)
def transpose_lines(event):
buffer = event.current_buffer
document = buffer.document
row = document.cursor_position_row
new_lines = document.lines[:]
if len(new_lines) == 1:
new_lines.append('')
if row == 0:
buffer.cursor_down()
row += 1
if row == len(new_lines) - 1:
new_lines.append('')
new_lines[row], new_lines[row-1] = new_lines[row-1], new_lines[row]
buffer.text = '\n'.join(new_lines)
buffer.cursor_down()
beginning_of_line(event)
# Selection stuff
@r.add_binding(Keys.ShiftLeft)
def select_left(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position -= event.arg
@r.add_binding(Keys.ShiftRight)
def select_right(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
buffer.cursor_position += event.arg
@r.add_binding(Keys.Up)
def auto_up(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row > 0:
buffer.cursor_up(count=count)
elif not buffer.selection_state:
event.key_sequence[-1].accept_next = True
buffer.history_backward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.Down)
def auto_down(event):
buffer = event.current_buffer
count = event.arg
if buffer.document.cursor_position_row < buffer.document.line_count - 1:
buffer.cursor_down(count=count)
elif not buffer.selection_state:
buffer.history_forward(count=count)
if getattr(buffer.selection_state, "shift_arrow", False):
buffer.selection_state = None
@r.add_binding(Keys.ShiftUp)
def select_line_up(event):
buffer = event.current_buffer
if buffer.document.text_before_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
up_position = buffer.document.get_cursor_up_position()
buffer.cursor_position += up_position
if not up_position:
buffer.cursor_position = 0
@r.add_binding(Keys.ShiftDown)
def select_line_down(event):
buffer = event.current_buffer
if buffer.document.text_after_cursor:
if not buffer.selection_state:
buffer.start_selection()
buffer.selection_state.shift_arrow = True
down_position = buffer.document.get_cursor_down_position()
buffer.cursor_position += down_position
if not down_position:
buffer.cursor_position = len(buffer.document.text)
# The default doesn't toggle correctly
@r.add_binding(Keys.ControlSpace)
def toggle_selection(event):
buffer = event.current_buffer
if buffer.selection_state:
buffer.selection_state = None
else:
buffer.start_selection()
@r.add_binding(Keys.ControlX, 'h')
def select_all(event):
buffer = event.current_buffer
buffer.selection_state = SelectionState(len(buffer.document.text))
buffer.cursor_position = 0
@r.add_binding(Keys.Delete, filter=HasSelection())
@r.add_binding(Keys.Backspace, filter=HasSelection())
def delete_selection(event):
event.current_buffer.cut_selection()
@r.add_binding(Keys.Any, filter=HasSelection())
def self_insert_and_clear_selection(event):
event.current_buffer.cut_selection()
self_insert(event)
@r.add_binding(Keys.ControlK, filter=HasSelection())
@r.add_binding(Keys.ControlU, filter=HasSelection())
def kill_selection(event):
data = event.current_buffer.cut_selection()
event.app.clipboard.set_data(data)
def system_copy(text):
if "Linux" in platform.platform():
copy_command = ['xclip', '-selection', 'c']
else:
copy_command = ['pbcopy']
try:
# In Python 3.6 we can do this:
# run(copy_command, input=text, encoding='utf-8', check=True)
subprocess.run(copy_command, input=text.encode('utf-8'), check=True)
except FileNotFoundError:
print("Error: could not find", copy_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(copy_command[0], "error:", e, file=sys.stderr)
def system_paste():
if "Linux" in platform.platform():
paste_command = ['xsel', '-b']
else:
paste_command = ['pbpaste']
try:
# In Python 3.6 we can do this:
# run(paste_command, input=text, encoding='utf-8')
p = subprocess.run(paste_command, stdout=subprocess.PIPE, check=True)
except FileNotFoundError:
print("Error: could not find", paste_command[0], file=sys.stderr)
except subprocess.CalledProcessError as e:
print(paste_command[0], "error:", e, file=sys.stderr)
return p.stdout.decode('utf-8')
@r.add_binding(Keys.ControlX, Keys.ControlW)
def copy_to_clipboard(event):
if event.current_buffer.document.selection:
from_, to = event.current_buffer.document.selection_range()
run_in_terminal(lambda:system_copy(event.current_buffer.document.text[from_:to + 1]))
@r.add_binding(Keys.ControlX, Keys.ControlY)
def paste_from_clipboard(event):
paste_text_future = run_in_terminal(system_paste)
event.current_buffer.cut_selection()
paste_text_future.add_done_callback(lambda future:\
event.current_buffer.paste_clipboard_data(ClipboardData(future.result())))
# M-[ a b is set to C-S-/ (C-?) in iTerm2 settings
Keys.ControlQuestionmark = "<C-?>"
ALL_KEYS.append("<C-?>")
ANSI_SEQUENCES['\x1b[ab'] = Keys.ControlQuestionmark
Keys.ControlSlash = "<C-/>"
ALL_KEYS.append("<C-/>")
ANSI_SEQUENCES['\x1b"5/'] = Keys.ControlSlash
# This won't work until
# https://github.com/jonathanslenders/python-prompt-toolkit/pull/484 is
# merged.
if prompt_toolkit_version[0] != '3':
@r.add_binding(Keys.ControlQuestionmark, save_before=lambda e: False)
def redo(event):
event.current_buffer.redo()
@r.add_binding(Keys.ControlSlash, save_before=lambda e: False)
def undo(event):
event.current_buffer.undo()
# Need to escape all spaces here because of verbose (x) option below
ps1_prompts = [r'>>>\ '] + [re.escape(i) + r'\[\d+\]:\ ' for i, j in emoji + [emoji_pudb]] + [r'In\ \[\d+\]:\ ']
ps2_prompts = [r'\ *\.\.\.:\ ?', r'\.\.\.\ ?', '\N{CLAPPING HANDS SIGN}+\\ ?⎢\\ ?']
PS1_PROMPTS_RE = re.compile('|'.join(ps1_prompts))
PS2_PROMPTS_RE = re.compile('|'.join(ps2_prompts))
PROMPTED_TEXT_RE = re.compile(r'''(?x) # Multiline and verbose
(?P<prompt>
(?P<ps1prompt>{PS1_PROMPTS_RE.pattern}) # Match prompts at the front
| (?P<ps2prompt>{PS2_PROMPTS_RE.pattern}))? # of the line.
(?P<noprompt>(?(prompt)\r|))? # If the prompt is not
# matched, this is a special
# marker group that will match
# the empty string.
# Otherwise it will not
# match (because all \r's
# have been stripped from
# the string).
(?P<line>.*)\n # The actual line.
'''.format(PS1_PROMPTS_RE=PS1_PROMPTS_RE, PS2_PROMPTS_RE=PS2_PROMPTS_RE))
def prompt_repl(match):
r"""
repl function for re.sub for clearing prompts
Replaces PS1 prompts with \r and removes PS2 prompts.
"""
# TODO: Remove the lines with no prompt
if match.group('ps1prompt') is not None:
return '\r' + match.group('line') + '\n'
elif match.group('ps2prompt') is not None:
return match.group('line') + '\n'
return ''
def split_prompts(text, indent=''):
r"""
Takes text copied from mypython, Python, or IPython session and returns a
list of inputs
Outputs are stripped. If no prompts are found the text is left alone.
The resulting text is indented by indent, except for the first line.
It is assumed that the text contains no carriage returns (\r).
Trailing whitespace and newlines is stripped from the outputs.
Example:
>>> split_prompts('''
... In [1]: a = 1
...
... In [2]: a
... Out[2]: 1
...
... In [3]: def test():
... ...: pass
... ...:
... ''')
['a = 1', 'a', 'def test():\n pass']
"""
from .mypython import validate_text
text = textwrap.dedent(text).strip() + '\n'
text = textwrap.dedent(PROMPTED_TEXT_RE.sub(prompt_repl, text)).lstrip()
lines = text.split('\r')
# Make sure multilines end in two newlines
for i, line in enumerate(lines):
try:
validate_text(line)
except SyntaxError:
# If there is a syntax error, we can't use the CMD_QUEUE (it
# breaks things).
lines = ['\n'.join(lines)]
break
if '\n' in line.rstrip():
lines[i] += '\n'
lines[0] = textwrap.indent(lines[0], indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))
for i in range(1, len(lines)):
lines[i] = textwrap.indent(lines[i], indent)
# Extraneous newlines at the end will be stripped by the prompt anyway.
# This just makes this function easier to test.
lines = [i.rstrip() for i in lines]
return lines
@r.add_binding(Keys.BracketedPaste)
def bracketed_paste(event):
from .mypython import CMD_QUEUE
data = event.data
buffer = event.current_buffer
# Be sure to use \n as line ending.
# This part is the same as the default binding
# Some terminals (Like iTerm2) seem to paste \r\n line endings in a
# bracketed paste. See: https://github.com/ipython/ipython/issues/9737
data = data.replace('\r\n', '\n')
data = data.replace('\r', '\n')
# Replace tabs with four spaces (C-x C-y will still paste the text exactly)
data = data.replace('\t', ' ')
# Strip prompts off pasted text
document = buffer.document
row, col = document.translate_index_to_position(buffer.cursor_position)
row += 1
if not inside_string(event.current_buffer.text, row, col):
indent = LEADING_WHITESPACE.match(document.current_line_before_cursor)
current_line_indent = indent.group(1) if indent else ''
if PS1_PROMPTS_RE.match(data.strip()) or PS2_PROMPTS_RE.match(data.strip()):
lines = split_prompts(data, current_line_indent)
else:
lines = [textwrap.indent(data, current_line_indent,
# Don't indent the first line, it's already indented
lambda line, _x=[]: bool(_x or _x.append(1)))]
else:
lines = [data]
event.current_buffer.insert_text(lines[0])
for text in lines[1:]:
# TODO: Send last chunk as bracketed paste, so it can be edited
CMD_QUEUE.append(text)
if CMD_QUEUE:
accept_line(event)
@r.add_binding(Keys.Escape, ';')
def comment(event):
buffer = event.current_buffer
document = buffer.document
cursor_line, cursor_col = document.translate_index_to_position(document.cursor_position)
if document.selection:
from_, to = document.selection_range()
start_line, start_col = document.translate_index_to_position(from_)
end_line, end_col = document.translate_index_to_position(to - 1)
end_line += 1
else:
start_line = cursor_line
end_line = start_line + 1
# Get the indentation for the comment delimiters
min_indent = float('inf')
for line in document.lines[start_line:end_line]:
if not line.strip():
continue
indent = LEADING_WHITESPACE.search(line)
if indent:
min_indent = min(min_indent, len(indent.group(1)))
else:
min_indent = 0
if min_indent == 0:
break
if min_indent == float('inf'):
min_indent = 0
uncomment = (all(not line.strip() or line[min_indent] == '#' for line in
document.lines[start_line:end_line])
and ''.join(document.lines[start_line:end_line]).strip())
lines = []
for i, line in enumerate(document.lines):
if start_line <= i < end_line:
if uncomment:
lines.append(line[:min_indent] + line[min_indent+2:])
else:
lines.append(line[:min_indent] + '# ' + line[min_indent:])
else:
lines.append(line)
new_text = '\n'.join(lines)
# TODO: Set the cursor position correctly
n_changed = 2*(cursor_line - start_line + 1)
if cursor_line >= end_line - 1:
n_changed -= 2
if uncomment:
buffer.cursor_position -= n_changed
buffer.text = new_text
else:
buffer.text = new_text
buffer.cursor_position += n_changed
@r.add_binding(Keys.ControlX, Keys.ControlE)
def open_in_editor(event):
event.current_buffer.open_in_editor(event.app)
@r.add_binding(Keys.ControlX, Keys.ControlS)
@r.add_binding(Keys.ControlX, Keys.ControlC)
def noop(event):
pass
| 34.344433
| 117
| 0.676988
| 4,436
| 33,005
| 4.838819
| 0.124211
| 0.071745
| 0.067086
| 0.040531
| 0.475611
| 0.40191
| 0.339716
| 0.308083
| 0.266853
| 0.252923
| 0
| 0.007133
| 0.218391
| 33,005
| 960
| 118
| 34.380208
| 0.824902
| 0.153371
| 0
| 0.360305
| 0
| 0.001527
| 0.050879
| 0.005484
| 0
| 0
| 0
| 0.003125
| 0
| 1
| 0.08855
| false
| 0.001527
| 0.041221
| 0.001527
| 0.160305
| 0.006107
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a5765c2edbddbec6f546bc1dadb0d5693914fe
| 10,909
|
py
|
Python
|
demand/preday_model_estimation/isg.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 50
|
2018-12-21T08:21:38.000Z
|
2022-01-24T09:47:59.000Z
|
demand/preday_model_estimation/isg.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 2
|
2018-12-19T13:42:47.000Z
|
2019-05-13T04:11:45.000Z
|
demand/preday_model_estimation/isg.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 27
|
2018-11-28T07:30:34.000Z
|
2022-02-05T02:22:26.000Z
|
from biogeme import *
from headers import *
from loglikelihood import *
from statistics import *
from nested import *
#import random
cons_work= Beta('cons for work', 0,-10,10,0)
cons_edu = Beta('cons for education',0,-50,10,0)
cons_shopping = Beta('cons for shopping',0,-10,10,0)
cons_other = Beta('cons for other',0,-10,10,0)
cons_Q = Beta('cons for quit',0,-10,10,1)
first_stop_inbound= Beta('dummy for first stop of inbound half tour', 0,-10,10,1)
second_stop_inbound= Beta('dummy for second stop of inbound half tour',0,-10,10,0)
threeplus_stop_inbound=Beta('dummy for 3+ stop of inbound half tour',0,-10,10,0)
first_stop_outbound= Beta('dummy for first stop of outbound half tour', 0,-10,10,0)
second_stop_outbound= Beta('dummy for second stop of outbound half tour',0,-10,10,0)
threeplus_stop_outbound=Beta('dummy for 3+ stop of outbound half tour',0,-10,10,0)
work_tour_dummy_Q=Beta('work tour dummy in quit',0,-10,10,1)
edu_tour_dummy_Q=Beta('edu tour dummy in quit',0,-10,10,1)
shopping_tour_dummy_Q=Beta('shopping tour dummy in quit',0,-10,10,1)
other_tour_dummy_Q=Beta('other tour dummy in quit',0,-10,10,1)
first_tour_dummy_Q=Beta('first tour dummy in quit',0,-10,10,0)
sub_tour_dummy_Q=Beta('has subtour dummy in quit',0,-10,10,0)
zero_tour_remain_Q=Beta('zero tour remain dummy',0,-10,10,1)
one_tour_remain_Q=Beta('one tour remain dummy',0,-10,10,0)
twoplus_tour_remain_Q=Beta('2+ tour remain dummy',0,-10,10,1)
work_tour_dummy_W=Beta('work tour dummy in work',0,-10,10,1)
edu_tour_dummy_W=Beta('edu tour dummy in work',0,-10,10,1)
shopping_tour_dummy_W=Beta('shopping tour dummy in work',0,-10,10,1)
other_tour_dummy_W=Beta('other tour dummy in work',0,-10,10,1)
female_dummy_W=Beta('female dummy in work',0,-10,10,0)
student_dummy_W=Beta('student dummy in work',0,-10,10,1)
worker_dummy_W=Beta('worker dummy in work',0,-10,10,1)
driver_dummy_W=Beta('driver dummy in work',0,-10,10,0)
passenger_dummy_W=Beta('passenger dummy in work',0,-10,10,0)
public_dummy_W=Beta('PT dummy in work',0,-10,10,0)
work_tour_dummy_E=Beta('work tour dummy in edu',0,-10,10,1)
edu_tour_dummy_E=Beta('edu tour dummy in edu',0,-10,10,1)
shopping_tour_dummy_E=Beta('shopping tour dummy in edu',0,-10,10,1)
other_tour_dummy_E=Beta('other tour dummy in edu',0,-10,10,1)
female_dummy_E=Beta('female dummy in edu',0,-10,10,0)
student_dummy_E=Beta('student dummy in edu',0,-10,10,1)
worker_dummy_E=Beta('worker dummy in edu',0,-10,10,1)
driver_dummy_E=Beta('driver dummy in edu',0,-10,10,0)
passenger_dummy_E=Beta('passenger dummy in edu',0,-10,10,0)
public_dummy_E=Beta('PT dummy in edu',0,-10,10,0)
work_tour_dummy_S=Beta('work tour dummy in shopping',0,-10,10,1)
edu_tour_dummy_S=Beta('edu tour dummy in shopping',0,-10,10,1)
shopping_tour_dummy_S=Beta('shopping tour dummy in shopping',0,-10,10,1)
other_tour_dummy_S=Beta('other tour dummy in shopping',0,-10,10,0)
female_dummy_S=Beta('female dummy in shopping',0,-10,10,0)
student_dummy_S=Beta('student dummy in shopping',0,-10,10,1)
worker_dummy_S=Beta('worker dummy in shopping',0,-10,10,0)
driver_dummy_S=Beta('driver dummy in shopping',0,-10,10,0)
passenger_dummy_S=Beta('passenger dummy in shopping',0,-10,10,0)
public_dummy_S=Beta('PT dummy in shopping',0,-10,10,0)
work_tour_dummy_O=Beta('work tour dummy in other',0,-10,10,0)
edu_tour_dummy_O=Beta('edu tour dummy in other',0,-10,10,0)
shopping_tour_dummy_O=Beta('shopping tour dummy in other',0,-10,10,0)
other_tour_dummy_O=Beta('other tour dummy in other',0,-10,10,1)
female_dummy_O=Beta('female dummy in other',0,-10,10,0)
student_dummy_O=Beta('student dummy in other',0,-10,10,0)
worker_dummy_O=Beta('worker dummy in other',0,-10,10,0)
driver_dummy_O=Beta('driver dummy in other',0,-10,10,0)
passenger_dummy_O=Beta('passenger dummy in other',0,-10,10,0)
public_dummy_O=Beta('PT dummy in other',0,-10,10,0)
work_logsum=Beta('work logsum in work',0,-10,10,1)
edu_logsum=Beta('edu logsum in edu',0,-10,10,1)
shop_logsum=Beta('shop logsum in shop',0,-10,10,1)
other_logsum=Beta('other logsum in other',0,-10,10,1)
time_window_work=Beta('time available in work',0,-10,10,1)
time_window_edu= Beta('time available in edu',0,-10,10,1)
time_window_shopping= Beta('time available in shopping',0,-10,10,1)
time_window_other= Beta('time available in other',0,-10,10,1)
tour_distance_work= Beta('log tour distance in work',0,-10,10,0)
tour_distance_edu= Beta('log tour distance in edu',0,-10,10,0)
tour_distance_shopping= Beta('log tour distance in shopping',0,-10,10,0)
tour_distance_other=Beta('log tour distance in other',0,-10,10,0)
a700_a930_work= Beta('period 7am to 9:30am in work',0,-10,10,0)
a930_a1200_work=Beta('period 9:30am to 12pm in work',0,-10,10,0)
p300_p530_work=Beta('period 3pm to 5:30pm in work',0,-10,10,0)
p530_p730_work=Beta('period 5:30pm to 7:30 pm in work',0,-10,10,0)
p730_p1000_work=Beta('period 7:30pm to 10pm in work',0,-10,10,0)
p1000_a700_work=Beta('period 10pm to 7am in work',0,-10,10,0)
a700_a930_edu= Beta('period 7am to 9:30am in edu',0,-10,10,0)
a930_a1200_edu=Beta('period 9:30am to 12pm in edu',0,-10,10,0)
p300_p530_edu=Beta('period 3pm to 5:30pm in edu',0,-10,10,0)
p530_p730_edu=Beta('period 5:30pm to 7:30 pm in edu',0,-10,10,0)
p730_p1000_edu=Beta('period 7:30pm to 10pm in edu',0,-10,10,0)
p1000_a700_edu=Beta('period 10pm to 7am in edu',0,-10,10,0)
a700_a930_shopping= Beta('period 7am to 9:30am in shopping',0,-10,10,0)
a930_a1200_shopping=Beta('period 9:30am to 12pm in shopping',0,-10,10,0)
p300_p530_shopping=Beta('period 3pm to 5:30pm in shopping',0,-10,10,0)
p530_p730_shopping=Beta('period 5:30pm to 7:30 pm in shopping',0,-10,10,0)
p730_p1000_shopping=Beta('period 7:30pm to 10pm in shopping',0,-10,10,0)
p1000_a700_shopping=Beta('period 10pm to 7am in shopping',0,-10,10,0)
a700_a930_other= Beta('period 7am to 9:30am in other',0,-10,10,0)
a930_a1200_other=Beta('period 9:30am to 12pm in other',0,-10,10,0)
p300_p530_other=Beta('period 3pm to 5:30pm in other',0,-10,10,0)
p530_p730_other=Beta('period 5:30pm to 7:30 pm in other',0,-10,10,0)
p730_p1000_other=Beta('period 7:30pm to 10pm in other',0,-10,10,0)
p1000_a700_other=Beta('period 10pm to 7am in other',0,-10,10,0)
MU1 = Beta('MU for quit',1,0,100,1)
MU2 = Beta('MU for non-quit', 1.0,0,100,1)
#V for work
V_work= cons_work+\
work_tour_dummy_W*1*(tour_type==1)+\
edu_tour_dummy_W*1*(tour_type==2)+\
shopping_tour_dummy_W*1*(tour_type==3)+\
other_tour_dummy_W*1*(tour_type==4)+\
female_dummy_W*female_dummy+\
student_dummy_W*student_dummy+\
worker_dummy_W*worker_dummy+\
driver_dummy_W*driver_dummy+\
passenger_dummy_W*passenger_dummy+\
public_dummy_W*public_dummy+\
work_logsum * worklogsum+\
time_window_work*time_window_h+\
tour_distance_work*log(1+distance)+\
a700_a930_work*p_700a_930a+\
a930_a1200_work*p_930a_1200a+\
p300_p530_work*p_300p_530p+\
p530_p730_work*p_530p_730p+\
p730_p1000_work*p_730p_1000p+\
p1000_a700_work*p_1000p_700a
#V for education
V_edu = cons_edu+\
work_tour_dummy_E*1*(tour_type==1)+\
edu_tour_dummy_E*1*(tour_type==2)+\
shopping_tour_dummy_E*1*(tour_type==3)+\
other_tour_dummy_E*1*(tour_type==4)+\
female_dummy_E*female_dummy+\
student_dummy_E*student_dummy+\
worker_dummy_E*worker_dummy+\
driver_dummy_E*driver_dummy+\
passenger_dummy_E*passenger_dummy+\
public_dummy_E*public_dummy+\
edu_logsum * edulogsum+\
time_window_edu*time_window_h+\
tour_distance_edu*log(1+distance)+\
a700_a930_edu*p_700a_930a+\
a930_a1200_edu*p_930a_1200a+\
p300_p530_edu*p_300p_530p+\
p530_p730_edu*p_530p_730p+\
p730_p1000_edu*p_730p_1000p+\
p1000_a700_edu*p_1000p_700a
#V for shopping
V_shopping = cons_shopping+\
work_tour_dummy_S*1*(tour_type==1)+\
edu_tour_dummy_S*1*(tour_type==2)+\
shopping_tour_dummy_S*1*(tour_type==3)+\
other_tour_dummy_S*1*(tour_type==4)+\
female_dummy_S*female_dummy+\
student_dummy_S*student_dummy+\
worker_dummy_S*worker_dummy+\
driver_dummy_S*driver_dummy+\
passenger_dummy_S*passenger_dummy+\
public_dummy_S*public_dummy+\
shop_logsum * shoplogsum+\
time_window_shopping*time_window_h+\
tour_distance_shopping*log(1+distance)+\
a700_a930_shopping*p_700a_930a+\
a930_a1200_shopping*p_930a_1200a+\
p300_p530_shopping*p_300p_530p+\
p530_p730_shopping*p_530p_730p+\
p730_p1000_shopping*p_730p_1000p+\
p1000_a700_shopping*p_1000p_700a
#V for other
V_other=cons_other+\
work_tour_dummy_O*1*(tour_type==1)+\
edu_tour_dummy_O*1*(tour_type==2)+\
shopping_tour_dummy_O*1*(tour_type==3)+\
other_tour_dummy_O*1*(tour_type==4)+\
female_dummy_O*female_dummy+\
student_dummy_O*student_dummy+\
worker_dummy_O*worker_dummy+\
driver_dummy_O*driver_dummy+\
passenger_dummy_O*passenger_dummy+\
public_dummy_O*public_dummy+\
other_logsum * otherlogsum+\
time_window_other*time_window_h+\
tour_distance_other*log(1+distance)+\
a700_a930_other*p_700a_930a+\
a930_a1200_other*p_930a_1200a+\
p300_p530_other*p_300p_530p+\
p530_p730_other*p_530p_730p+\
p730_p1000_other*p_730p_1000p+\
p1000_a700_other*p_1000p_700a
#V for quit
V_quit= cons_Q+first_stop_inbound*first_stop*first_bound+\
second_stop_inbound*second_stop*first_bound+\
threeplus_stop_inbound*three_plus_stop*first_bound+\
first_stop_outbound*first_stop*second_bound+\
second_stop_outbound*second_stop*second_bound+\
threeplus_stop_outbound*three_plus_stop*second_bound+\
work_tour_dummy_Q*1*(tour_type==1)+\
edu_tour_dummy_Q*1*(tour_type==2)+\
shopping_tour_dummy_Q*1*(tour_type==3)+\
other_tour_dummy_Q*1*(tour_type==4)+\
first_tour_dummy_Q*first_tour_dummy+\
sub_tour_dummy_Q*has_subtour+zero_tour_remain_Q*1*(tour_remain==0)+\
one_tour_remain_Q*1*(tour_remain==1)+twoplus_tour_remain_Q*1*(tour_remain>=2)
V = {0:V_quit,1: V_work,2:V_edu,3:V_shopping,4:V_other}
av= {0:avail_quit,1:avail_workstop,2:avail_edustop,3:avail_shopstop,4:avail_otherstop}
nest_quit = MU1 , [0]
nest_nonquit = MU2 , [1,2,3,4]
nests=nest_quit,nest_nonquit
prob = nested(V,av,nests,stop_type)
#prob = bioLogit(V,av,stop_type)
rowIterator('obsIter')
BIOGEME_OBJECT.ESTIMATE = Sum(log(prob),'obsIter')
exclude = ((avail_violation==1)+(origin_mtz==0)+(destination_mtz==0)+(time_window_h>=10)) > 0
BIOGEME_OBJECT.EXCLUDE = exclude
nullLoglikelihood(av,'obsIter')
choiceSet = [0,1,2,3,4]
cteLoglikelihood(choiceSet,stop_type,'obsIter')
availabilityStatistics(av,'obsIter')
BIOGEME_OBJECT.PARAMETERS['optimizationAlgorithm'] = "CFSQP"
BIOGEME_OBJECT.PARAMETERS['checkDerivatives'] = "1"
BIOGEME_OBJECT.PARAMETERS['numberOfThreads'] = "6"
| 26.098086
| 94
| 0.735356
| 2,047
| 10,909
| 3.641426
| 0.06595
| 0.038235
| 0.063724
| 0.049906
| 0.609471
| 0.479206
| 0.294741
| 0.090555
| 0.031393
| 0
| 0
| 0.130453
| 0.121643
| 10,909
| 418
| 95
| 26.098086
| 0.647464
| 0.009533
| 0
| 0
| 0
| 0
| 0.244269
| 0.002023
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.037559
| 0.023474
| 0
| 0.023474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13a8c046daa5c36fb60a09676a0be10c4e57fb9f
| 4,227
|
py
|
Python
|
code/prisonersDilemma.py
|
ben9583/PrisonersDilemmaTournament
|
8227c05f835c93a0b30feb4207a7d7c631e670a0
|
[
"MIT"
] | 1
|
2021-09-16T03:38:21.000Z
|
2021-09-16T03:38:21.000Z
|
code/prisonersDilemma.py
|
ben9583/PrisonersDilemmaTournament
|
8227c05f835c93a0b30feb4207a7d7c631e670a0
|
[
"MIT"
] | null | null | null |
code/prisonersDilemma.py
|
ben9583/PrisonersDilemmaTournament
|
8227c05f835c93a0b30feb4207a7d7c631e670a0
|
[
"MIT"
] | null | null | null |
import os
import itertools
import importlib
import numpy as np
import random
STRATEGY_FOLDER = "exampleStrats"
RESULTS_FILE = "results.txt"
pointsArray = [[1,5],[0,3]] # The i-j-th element of this array is how many points you receive if you do play i, and your opponent does play j.
moveLabels = ["D","C"]
# D = defect, betray, sabotage, free-ride, etc.
# C = cooperate, stay silent, comply, upload files, etc.
# Returns a 2-by-n numpy array. The first axis is which player (0 = us, 1 = opponent)
# The second axis is which turn. (0 = first turn, 1 = next turn, etc.
# For example, it might return
#
# [[0 0 1] a.k.a. D D C
# [1 1 1]] a.k.a. C C C
#
# if there have been 3 turns, and we have defected twice then cooperated once,
# and our opponent has cooperated all three times.
def getVisibleHistory(history, player, turn):
historySoFar = history[:,:turn].copy()
if player == 1:
historySoFar = np.flip(historySoFar,0)
return historySoFar
def runRound(pair):
moduleA = importlib.import_module(STRATEGY_FOLDER+"."+pair[0])
moduleB = importlib.import_module(STRATEGY_FOLDER+"."+pair[1])
memoryA = None
memoryB = None
LENGTH_OF_GAME = int(200-40*np.log(random.random())) # The games are a minimum of 50 turns long. The np.log here guarantees that every turn after the 50th has an equal (low) chance of being the final turn.
history = np.zeros((2,LENGTH_OF_GAME),dtype=int)
for turn in range(LENGTH_OF_GAME):
playerAmove, memoryA = moduleA.strategy(getVisibleHistory(history,0,turn),memoryA)
playerBmove, memoryB = moduleB.strategy(getVisibleHistory(history,1,turn),memoryB)
history[0,turn] = playerAmove
history[1,turn] = playerBmove
return history
def tallyRoundScores(history):
scoreA = 0
scoreB = 0
ROUND_LENGTH = history.shape[1]
for turn in range(ROUND_LENGTH):
playerAmove = history[0,turn]
playerBmove = history[1,turn]
scoreA += pointsArray[playerAmove][playerBmove]
scoreB += pointsArray[playerBmove][playerAmove]
return scoreA/ROUND_LENGTH, scoreB/ROUND_LENGTH
def outputRoundResults(f, pair, roundHistory, scoresA, scoresB):
f.write(pair[0]+" (P1) VS. "+pair[1]+" (P2)\n")
for p in range(2):
for t in range(roundHistory.shape[1]):
move = roundHistory[p,t]
f.write(moveLabels[move]+" ")
f.write("\n")
f.write("Final score for "+pair[0]+": "+str(scoresA)+"\n")
f.write("Final score for "+pair[1]+": "+str(scoresB)+"\n")
f.write("\n")
def pad(stri, leng):
result = stri
for i in range(len(stri),leng):
result = result+" "
return result
def runFullPairingTournament(inFolder, outFile):
print("Starting tournament, reading files from "+inFolder)
scoreKeeper = {}
STRATEGY_LIST = []
for file in os.listdir(inFolder):
if file.endswith(".py"):
STRATEGY_LIST.append(file[:-3])
for strategy in STRATEGY_LIST:
scoreKeeper[strategy] = 0
f = open(outFile,"w+")
for pair in itertools.combinations(STRATEGY_LIST, r=2):
roundHistory = runRound(pair)
scoresA, scoresB = tallyRoundScores(roundHistory)
outputRoundResults(f, pair, roundHistory, scoresA, scoresB)
scoreKeeper[pair[0]] += scoresA
scoreKeeper[pair[1]] += scoresB
scoresNumpy = np.zeros(len(scoreKeeper))
for i in range(len(STRATEGY_LIST)):
scoresNumpy[i] = scoreKeeper[STRATEGY_LIST[i]]
rankings = np.argsort(scoresNumpy)
f.write("\n\nTOTAL SCORES\n")
for rank in range(len(STRATEGY_LIST)):
i = rankings[-1-rank]
score = scoresNumpy[i]
scorePer = score/(len(STRATEGY_LIST)-1)
f.write("#"+str(rank+1)+": "+pad(STRATEGY_LIST[i]+":",16)+' %.3f'%score+' (%.3f'%scorePer+" average)\n")
f.flush()
f.close()
print("Done with everything! Results file written to "+RESULTS_FILE)
runFullPairingTournament(STRATEGY_FOLDER, RESULTS_FILE)
| 37.078947
| 210
| 0.62503
| 543
| 4,227
| 4.813996
| 0.35175
| 0.041316
| 0.013772
| 0.00306
| 0.109411
| 0.085692
| 0.018363
| 0
| 0
| 0
| 0
| 0.018647
| 0.251479
| 4,227
| 113
| 211
| 37.40708
| 0.807522
| 0.178377
| 0
| 0.02439
| 0
| 0
| 0.06844
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.085366
| 0
| 0.207317
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13aef1dee9a4b31316309780ab660b17ad23d9b2
| 1,285
|
py
|
Python
|
docs/buildscripts/docs.py
|
cwlalyy/mongo-c-driver
|
d771be13bc8f7d8b84d233de6fdc725d9bb337cc
|
[
"Apache-2.0"
] | 13
|
2016-07-14T16:36:59.000Z
|
2018-06-01T18:06:14.000Z
|
docs/buildscripts/docs.py
|
cwlalyy/mongo-c-driver
|
d771be13bc8f7d8b84d233de6fdc725d9bb337cc
|
[
"Apache-2.0"
] | null | null | null |
docs/buildscripts/docs.py
|
cwlalyy/mongo-c-driver
|
d771be13bc8f7d8b84d233de6fdc725d9bb337cc
|
[
"Apache-2.0"
] | 9
|
2015-01-26T09:30:41.000Z
|
2016-03-15T14:48:18.000Z
|
"""Build the C client docs.
"""
from __future__ import with_statement
import os
import shutil
import socket
import subprocess
import time
import urllib2
def clean_dir(dir):
try:
shutil.rmtree(dir)
except:
pass
os.makedirs(dir)
def gen_api(dir):
clean_dir(dir)
clean_dir("docs/source/doxygen")
with open(os.devnull, 'w') as null:
subprocess.call(["doxygen", "doxygenConfig"], stdout=null, stderr=null)
os.rename("docs/source/doxygen/html", dir)
def gen_sphinx(dir):
clean_dir(dir)
os.chdir("docs/source/sphinx")
with open(os.devnull, 'w') as null:
subprocess.call(["make", "html"], stdout=null, stderr=null)
os.chdir("../../../")
if os.path.isdir("docs/source/sphinx/build/html"):
os.rename("docs/source/sphinx/build/html", dir)
def version():
"""Get the driver version from doxygenConfig.
"""
with open("doxygenConfig") as f:
for line in f.readlines():
if line.startswith("PROJECT_NUMBER"):
return line.split("=")[1].strip()
def main():
print("Generating Sphinx docs in docs/html")
gen_sphinx("docs/html")
print("Generating Doxygen docs in docs/html/api")
gen_api("docs/html/api")
if __name__ == "__main__":
main()
| 22.155172
| 79
| 0.636576
| 173
| 1,285
| 4.601156
| 0.369942
| 0.062814
| 0.041457
| 0.035176
| 0.213568
| 0.095477
| 0.095477
| 0.095477
| 0.095477
| 0
| 0
| 0.001974
| 0.211673
| 1,285
| 57
| 80
| 22.54386
| 0.78381
| 0.056031
| 0
| 0.102564
| 0
| 0
| 0.2425
| 0.068333
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0.025641
| 0.179487
| 0
| 0.333333
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13afb94123467877efe58458ed4c502384fb753b
| 1,921
|
py
|
Python
|
tilegame/render/rs.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | 1
|
2021-11-05T11:49:26.000Z
|
2021-11-05T11:49:26.000Z
|
tilegame/render/rs.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | null | null | null |
tilegame/render/rs.py
|
defgsus/thegame
|
38a627d9108f1418b94b08831fd640dd87fbba83
|
[
"MIT"
] | null | null | null |
import glm
import math
from lib.opengl import RenderSettings
class GameProjection:
def __init__(self, rs: "GameRenderSettings"):
self.rs = rs
self.scale = 10.
self.rotation_deg = 0.
self.location = glm.vec3(0)
self._stack = []
def projection_matrix_4(self) -> glm.mat4:
scale = 1.
ratio = self.rs.render_width / self.rs.render_height
m = glm.ortho(-scale * ratio, scale * ratio, -scale, scale, -10, 10)
return m
def transformation_matrix_4(self) -> glm.mat4:
m = glm.rotate(
glm.mat4(1), -self.rotation_deg / 180 * glm.pi(), glm.vec3(0, 0, 1)
)
m = m * glm.scale(glm.mat4(), glm.vec3(2. / self.scale))
m = m * glm.translate(glm.mat4(), glm.vec3(-self.location.x, -self.location.y, 0))
return m
def transformation_matrix(self) -> glm.mat3:
m = rotation_matrix_2d(self.rotation_deg)
m *= self.scale * .5
m[2][0] = self.location.x
m[2][1] = self.location.y
return m
def push(self):
self._stack.append({
"scale": self.scale,
"rotation": self.rotation_deg,
"location": self.location.__copy__(),
})
def pop(self):
s = self._stack.pop(-1)
self.scale = s["scale"]
self.rotation_deg = s["rotation"]
self.location = s["location"]
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.pop()
def rotation_matrix_2d(degree: float) -> glm.mat3:
a = degree / 180. * math.pi
sa = math.sin(a)
ca = math.cos(a)
return glm.mat3(
ca, sa, 0,
-sa, ca, 0,
0, 0, 1
)
class GameRenderSettings(RenderSettings):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.projection = GameProjection(self)
| 26.315068
| 90
| 0.563248
| 252
| 1,921
| 4.111111
| 0.261905
| 0.081081
| 0.072394
| 0.027027
| 0.086873
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035503
| 0.2962
| 1,921
| 72
| 91
| 26.680556
| 0.730769
| 0
| 0
| 0.052632
| 0
| 0
| 0.031234
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175439
| false
| 0
| 0.052632
| 0
| 0.350877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13afbf984a23501bfd2bc3f3a3b28ed6375a3779
| 4,865
|
py
|
Python
|
tools/stats/export_slow_tests.py
|
stungkit/pytorch
|
0f05e398705bf15406bce79f7ee57d3935ad2abd
|
[
"Intel"
] | 2
|
2020-03-13T06:57:49.000Z
|
2020-05-17T04:18:14.000Z
|
tools/stats/export_slow_tests.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | 1
|
2022-01-10T18:39:28.000Z
|
2022-01-10T19:15:57.000Z
|
tools/stats/export_slow_tests.py
|
ellhe-blaster/pytorch
|
e5282c3cb8bf6ad8c5161f9d0cc271edb9abed25
|
[
"Intel"
] | 1
|
2022-03-26T14:42:50.000Z
|
2022-03-26T14:42:50.000Z
|
#!/usr/bin/env python3
import argparse
import json
import os
import statistics
from collections import defaultdict
from tools.stats.s3_stat_parser import (
get_previous_reports_for_branch,
Report,
Version2Report,
)
from typing import cast, DefaultDict, Dict, List, Any
from urllib.request import urlopen
SLOW_TESTS_FILE = ".pytorch-slow-tests.json"
SLOW_TEST_CASE_THRESHOLD_SEC = 60.0
RELATIVE_DIFFERENCE_THRESHOLD = 0.1
IGNORED_JOBS = ["asan", "periodic"]
def get_test_case_times() -> Dict[str, float]:
reports: List[Report] = get_previous_reports_for_branch("origin/viable/strict", "")
# an entry will be like ("test_doc_examples (__main__.TestTypeHints)" -> [values]))
test_names_to_times: DefaultDict[str, List[float]] = defaultdict(list)
for report in reports:
if report.get("format_version", 1) != 2: # type: ignore[misc]
raise RuntimeError("S3 format currently handled is version 2 only")
v2report = cast(Version2Report, report)
if any(job_name in str(report["build_job"]) for job_name in IGNORED_JOBS):
continue
for test_file in v2report["files"].values():
for suitename, test_suite in test_file["suites"].items():
for casename, test_case in test_suite["cases"].items():
# The below attaches a __main__ as that matches the format of test.__class__ in
# common_utils.py (where this data will be used), and also matches what the output
# of a running test would look like.
name = f"{casename} (__main__.{suitename})"
succeeded: bool = test_case["status"] is None
if succeeded:
test_names_to_times[name].append(test_case["seconds"])
return {
test_case: statistics.mean(times)
for test_case, times in test_names_to_times.items()
}
def filter_slow_tests(test_cases_dict: Dict[str, float]) -> Dict[str, float]:
return {
test_case: time
for test_case, time in test_cases_dict.items()
if time >= SLOW_TEST_CASE_THRESHOLD_SEC
}
def get_test_infra_slow_tests() -> Dict[str, float]:
url = "https://raw.githubusercontent.com/pytorch/test-infra/generated-stats/stats/slow-tests.json"
contents = urlopen(url, timeout=1).read().decode("utf-8")
return cast(Dict[str, float], json.loads(contents))
def too_similar(
calculated_times: Dict[str, float], other_times: Dict[str, float], threshold: float
) -> bool:
# check that their keys are the same
if calculated_times.keys() != other_times.keys():
return False
for test_case, test_time in calculated_times.items():
other_test_time = other_times[test_case]
relative_difference = abs(
(other_test_time - test_time) / max(other_test_time, test_time)
)
if relative_difference > threshold:
return False
return True
def export_slow_tests(options: Any) -> None:
filename = options.filename
if os.path.exists(filename):
print(f"Overwriting existent file: {filename}")
with open(filename, "w+") as file:
slow_test_times: Dict[str, float] = filter_slow_tests(get_test_case_times())
if options.ignore_small_diffs:
test_infra_slow_tests_dict = get_test_infra_slow_tests()
if too_similar(
slow_test_times, test_infra_slow_tests_dict, options.ignore_small_diffs
):
slow_test_times = test_infra_slow_tests_dict
json.dump(
slow_test_times, file, indent=" ", separators=(",", ": "), sort_keys=True
)
file.write("\n")
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Export a JSON of slow test cases in PyTorch unit test suite"
)
parser.add_argument(
"-f",
"--filename",
nargs="?",
type=str,
default=SLOW_TESTS_FILE,
const=SLOW_TESTS_FILE,
help="Specify a file path to dump slow test times from previous S3 stats. Default file path: .pytorch-slow-tests.json",
)
parser.add_argument(
"--ignore-small-diffs",
nargs="?",
type=float,
const=RELATIVE_DIFFERENCE_THRESHOLD,
help="Compares generated results with stats/slow-tests.json in pytorch/test-infra. If the relative differences "
"between test times for each test are smaller than the threshold and the set of test cases have not "
"changed, we will export the stats already in stats/slow-tests.json. Else, we will export the calculated "
"results. The default threshold is 10%.",
)
return parser.parse_args()
def main() -> None:
options = parse_args()
export_slow_tests(options)
if __name__ == "__main__":
main()
| 36.856061
| 127
| 0.658787
| 634
| 4,865
| 4.805994
| 0.318612
| 0.050213
| 0.031506
| 0.029537
| 0.09255
| 0.022973
| 0.022973
| 0.022973
| 0
| 0
| 0
| 0.005426
| 0.242343
| 4,865
| 131
| 128
| 37.137405
| 0.821215
| 0.072148
| 0
| 0.076923
| 0
| 0.038462
| 0.196805
| 0.025072
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067308
| false
| 0
| 0.076923
| 0.009615
| 0.211538
| 0.009615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b0a600d04c4f624b452b2451a329662c1e6704
| 14,128
|
py
|
Python
|
ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py
|
michaeltashman/Horizon
|
ee310b34adeb807bbae379a6e1703d0f725f26a9
|
[
"BSD-3-Clause"
] | 1
|
2020-07-30T06:15:20.000Z
|
2020-07-30T06:15:20.000Z
|
ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py
|
michaeltashman/Horizon
|
ee310b34adeb807bbae379a6e1703d0f725f26a9
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/evaluation/weighted_sequential_doubly_robust_estimator.py
|
michaeltashman/Horizon
|
ee310b34adeb807bbae379a6e1703d0f725f26a9
|
[
"BSD-3-Clause"
] | 1
|
2019-06-05T15:52:18.000Z
|
2019-06-05T15:52:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import itertools
import logging
import numpy as np
import scipy as sp
import torch
from ml.rl.evaluation.cpe import CpeEstimate
from ml.rl.evaluation.evaluation_data_page import EvaluationDataPage
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class WeightedSequentialDoublyRobustEstimator:
NUM_SUBSETS_FOR_CB_ESTIMATES = 25
CONFIDENCE_INTERVAL = 0.9
NUM_BOOTSTRAP_SAMPLES = 50
BOOTSTRAP_SAMPLE_PCT = 0.5
def __init__(self, gamma):
self.gamma = gamma
def estimate(
self,
edp: EvaluationDataPage,
num_j_steps,
whether_self_normalize_importance_weights,
) -> CpeEstimate:
# For details, visit https://arxiv.org/pdf/1604.00923.pdf Section 5, 7, 8
(
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
) = WeightedSequentialDoublyRobustEstimator.transform_to_equal_length_trajectories(
edp.mdp_id,
edp.action_mask.cpu().numpy(),
edp.logged_rewards.cpu().numpy().flatten(),
edp.logged_propensities.cpu().numpy().flatten(),
edp.model_propensities.cpu().numpy(),
edp.model_values.cpu().numpy(),
)
num_trajectories = actions.shape[0]
trajectory_length = actions.shape[1]
j_steps = [float("inf")]
if num_j_steps > 1:
j_steps.append(-1)
if num_j_steps > 2:
interval = trajectory_length // (num_j_steps - 1)
j_steps.extend([i * interval for i in range(1, num_j_steps - 1)])
target_propensity_for_logged_action = np.sum(
np.multiply(target_propensities, actions), axis=2
)
estimated_q_values_for_logged_action = np.sum(
np.multiply(estimated_q_values, actions), axis=2
)
estimated_state_values = np.sum(
np.multiply(target_propensities, estimated_q_values), axis=2
)
importance_weights = target_propensity_for_logged_action / logged_propensities
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([num_trajectories, 1]) * 1.0 / num_trajectories
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
discounts = np.logspace(
start=0, stop=trajectory_length - 1, num=trajectory_length, base=self.gamma
)
j_step_return_trajectories = []
for j_step in j_steps:
j_step_return_trajectories.append(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values_for_logged_action,
j_step,
)
)
j_step_return_trajectories = np.array(j_step_return_trajectories)
j_step_returns = np.sum(j_step_return_trajectories, axis=1)
if len(j_step_returns) == 1:
weighted_doubly_robust = j_step_returns[0]
weighted_doubly_robust_std_error = 0.0
else:
# break trajectories into several subsets to estimate confidence bounds
infinite_step_returns = []
num_subsets = int(
min(
num_trajectories / 2,
WeightedSequentialDoublyRobustEstimator.NUM_SUBSETS_FOR_CB_ESTIMATES,
)
)
interval = num_trajectories / num_subsets
for i in range(num_subsets):
trajectory_subset = np.arange(
int(i * interval), int((i + 1) * interval)
)
importance_weights = (
target_propensity_for_logged_action[trajectory_subset]
/ logged_propensities[trajectory_subset]
)
importance_weights = np.cumprod(importance_weights, axis=1)
importance_weights = WeightedSequentialDoublyRobustEstimator.normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
)
importance_weights_one_earlier = (
np.ones([len(trajectory_subset), 1]) * 1.0 / len(trajectory_subset)
)
importance_weights_one_earlier = np.hstack(
[importance_weights_one_earlier, importance_weights[:, :-1]]
)
infinite_step_return = np.sum(
WeightedSequentialDoublyRobustEstimator.calculate_step_return(
rewards[trajectory_subset],
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values[trajectory_subset],
estimated_q_values_for_logged_action[trajectory_subset],
float("inf"),
)
)
infinite_step_returns.append(infinite_step_return)
# Compute weighted_doubly_robust mean point estimate using all data
weighted_doubly_robust = self.compute_weighted_doubly_robust_point_estimate(
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
)
# Use bootstrapping to compute weighted_doubly_robust standard error
bootstrapped_means = []
sample_size = int(
WeightedSequentialDoublyRobustEstimator.BOOTSTRAP_SAMPLE_PCT
* num_subsets
)
for _ in range(
WeightedSequentialDoublyRobustEstimator.NUM_BOOTSTRAP_SAMPLES
):
random_idxs = np.random.choice(num_j_steps, sample_size, replace=False)
random_idxs.sort()
wdr_estimate = self.compute_weighted_doubly_robust_point_estimate(
j_steps=[j_steps[i] for i in random_idxs],
num_j_steps=sample_size,
j_step_returns=j_step_returns[random_idxs],
infinite_step_returns=infinite_step_returns,
j_step_return_trajectories=j_step_return_trajectories[random_idxs],
)
bootstrapped_means.append(wdr_estimate)
weighted_doubly_robust_std_error = np.std(bootstrapped_means)
episode_values = np.sum(np.multiply(rewards, discounts), axis=1)
denominator = np.nanmean(episode_values)
if abs(denominator) < 1e-6:
return CpeEstimate(
raw=0.0, normalized=0.0, raw_std_error=0.0, normalized_std_error=0.0
)
return CpeEstimate(
raw=weighted_doubly_robust,
normalized=weighted_doubly_robust / denominator,
raw_std_error=weighted_doubly_robust_std_error,
normalized_std_error=weighted_doubly_robust_std_error / denominator,
)
def compute_weighted_doubly_robust_point_estimate(
self,
j_steps,
num_j_steps,
j_step_returns,
infinite_step_returns,
j_step_return_trajectories,
):
low_bound, high_bound = WeightedSequentialDoublyRobustEstimator.confidence_bounds(
infinite_step_returns,
WeightedSequentialDoublyRobustEstimator.CONFIDENCE_INTERVAL,
)
# decompose error into bias + variance
j_step_bias = np.zeros([num_j_steps])
where_lower = np.where(j_step_returns < low_bound)[0]
j_step_bias[where_lower] = low_bound - j_step_returns[where_lower]
where_higher = np.where(j_step_returns > high_bound)[0]
j_step_bias[where_higher] = j_step_returns[where_higher] - high_bound
covariance = np.cov(j_step_return_trajectories)
error = covariance + j_step_bias.T * j_step_bias
# minimize mse error
constraint = {"type": "eq", "fun": lambda x: np.sum(x) - 1.0}
x = np.zeros([len(j_steps)])
res = sp.optimize.minimize(
mse_loss,
x,
args=error,
constraints=constraint,
bounds=[(0, 1) for _ in range(x.shape[0])],
)
x = np.array(res.x)
return float(np.dot(x, j_step_returns))
@staticmethod
def transform_to_equal_length_trajectories(
mdp_ids,
actions,
rewards,
logged_propensities,
target_propensities,
estimated_q_values,
):
"""
Take in samples (action, rewards, propensities, etc.) and output lists
of equal-length trajectories (episodes) accoriding to terminals.
As the raw trajectories are of various lengths, the shorter ones are
filled with zeros(ones) at the end.
"""
num_actions = len(target_propensities[0])
terminals = np.zeros(mdp_ids.shape[0])
for x in range(0, mdp_ids.shape[0]):
if x + 1 == mdp_ids.shape[0] or mdp_ids[x, 0] != mdp_ids[x + 1, 0]:
terminals[x] = 1
trajectories = []
episode_start = 0
episode_ends = np.nonzero(terminals)[0]
if len(terminals) - 1 not in episode_ends:
episode_ends = np.append(episode_ends, len(terminals) - 1)
for episode_end in episode_ends:
trajectories.append(np.arange(episode_start, episode_end + 1))
episode_start = episode_end + 1
action_trajectories = []
reward_trajectories = []
logged_propensity_trajectories = []
target_propensity_trajectories = []
Q_value_trajectories = []
for trajectory in trajectories:
action_trajectories.append(actions[trajectory])
reward_trajectories.append(rewards[trajectory])
logged_propensity_trajectories.append(logged_propensities[trajectory])
target_propensity_trajectories.append(target_propensities[trajectory])
Q_value_trajectories.append(estimated_q_values[trajectory])
def to_equal_length(x, fill_value):
x_equal_length = np.array(
list(itertools.zip_longest(*x, fillvalue=fill_value))
).swapaxes(0, 1)
return x_equal_length
action_trajectories = to_equal_length(
action_trajectories, np.zeros([num_actions])
)
reward_trajectories = to_equal_length(reward_trajectories, 0)
logged_propensity_trajectories = to_equal_length(
logged_propensity_trajectories, 1
)
target_propensity_trajectories = to_equal_length(
target_propensity_trajectories, np.zeros([num_actions])
)
Q_value_trajectories = to_equal_length(
Q_value_trajectories, np.zeros([num_actions])
)
return (
action_trajectories,
reward_trajectories,
logged_propensity_trajectories,
target_propensity_trajectories,
Q_value_trajectories,
)
@staticmethod
def normalize_importance_weights(
importance_weights, whether_self_normalize_importance_weights
):
if whether_self_normalize_importance_weights:
sum_importance_weights = np.sum(importance_weights, axis=0)
where_zeros = np.where(sum_importance_weights == 0.0)[0]
sum_importance_weights[where_zeros] = len(importance_weights)
importance_weights[:, where_zeros] = 1.0
importance_weights /= sum_importance_weights
return importance_weights
else:
importance_weights /= importance_weights.shape[0]
return importance_weights
@staticmethod
def calculate_step_return(
rewards,
discounts,
importance_weights,
importance_weights_one_earlier,
estimated_state_values,
estimated_q_values,
j_step,
):
trajectory_length = len(rewards[0])
num_trajectories = len(rewards)
j_step = int(min(j_step, trajectory_length - 1))
weighted_discounts = np.multiply(discounts, importance_weights)
weighted_discounts_one_earlier = np.multiply(
discounts, importance_weights_one_earlier
)
importance_sampled_cumulative_reward = np.sum(
np.multiply(weighted_discounts[:, : j_step + 1], rewards[:, : j_step + 1]),
axis=1,
)
if j_step < trajectory_length - 1:
direct_method_value = (
weighted_discounts_one_earlier[:, j_step + 1]
* estimated_state_values[:, j_step + 1]
)
else:
direct_method_value = np.zeros([num_trajectories])
control_variate = np.sum(
np.multiply(
weighted_discounts[:, : j_step + 1], estimated_q_values[:, : j_step + 1]
)
- np.multiply(
weighted_discounts_one_earlier[:, : j_step + 1],
estimated_state_values[:, : j_step + 1],
),
axis=1,
)
j_step_return = (
importance_sampled_cumulative_reward + direct_method_value - control_variate
)
return j_step_return
@staticmethod
def confidence_bounds(x, confidence):
n = len(x)
m, se = np.mean(x), sp.stats.sem(x)
h = se * sp.stats.t._ppf((1 + confidence) / 2.0, n - 1)
return m - h, m + h
def mse_loss(x, error):
return np.dot(np.dot(x, error), x.T)
| 37.674667
| 106
| 0.613675
| 1,477
| 14,128
| 5.49763
| 0.163846
| 0.098399
| 0.03202
| 0.041872
| 0.41835
| 0.29803
| 0.250739
| 0.22266
| 0.22266
| 0.178941
| 0
| 0.011646
| 0.313208
| 14,128
| 374
| 107
| 37.775401
| 0.825209
| 0.046928
| 0
| 0.232484
| 0
| 0
| 0.001119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028662
| false
| 0
| 0.136943
| 0.003185
| 0.213376
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b20f00d86e94eb5e2fb0079121846394df7d95
| 5,255
|
py
|
Python
|
src/wspc/feature_selection.py
|
shakedna1/wspc_rep
|
f4492af8cec25a3f7b00687c08d30754a1c0c91f
|
[
"MIT"
] | null | null | null |
src/wspc/feature_selection.py
|
shakedna1/wspc_rep
|
f4492af8cec25a3f7b00687c08d30754a1c0c91f
|
[
"MIT"
] | null | null | null |
src/wspc/feature_selection.py
|
shakedna1/wspc_rep
|
f4492af8cec25a3f7b00687c08d30754a1c0c91f
|
[
"MIT"
] | null | null | null |
import numpy as np
import sklearn
import pandas as pd
import scipy.spatial.distance as ssd
from scipy.cluster import hierarchy
from scipy.stats import chi2_contingency
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_selection import SelectKBest, SelectorMixin
from sklearn.pipeline import Pipeline
class SelectHierarchicalClustering(SelectorMixin, BaseEstimator):
"""
A transformer that clusters the features in X according to dist_matrix, and selects a feature from each cluster with
the highest chi2 score of X[feature] versus y
"""
def __init__(self, dist_matrix=None, threshold=1):
self.dist_matrix = dist_matrix
self.threshold = threshold
def _phi_coef(self, x, y):
"""
Calculates phi coefficient between features
Parameters
----------
x - feature x column
y - feature y column
Returns
----------
phi coefficient value
"""
confusion_matrix = pd.crosstab(x, y)
chi2 = chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
corr = np.sqrt(chi2 / n)
return corr
def _calc_dist_matrix(self, X):
"""
Calculate distance matrix between each two features in X, each value is 1-phi_correlation
"""
X_df = pd.DataFrame.sparse.from_spmatrix(X)
X_corr_mat = X_df.corr(method=self._phi_coef)
feature_corr_dist_matrix = 1 - X_corr_mat
feature_corr_dist_matrix_condensed = ssd.squareform(feature_corr_dist_matrix)
self.dist_matrix = feature_corr_dist_matrix_condensed
def _corr_linkage(self, method='average'):
linkage = hierarchy.linkage(self.dist_matrix, method=method)
return linkage
def _hierarchical_clustering(self, linkage):
"""
Perform hierarchical clustering
Parameters
----------
linkage - linkage dendogram created by hierarchy.linkage(self.distance_matrix, method=method)
Returns
----------
a list of lists, each list represents a cluster and contains the indexes of features belonging
to the cluster
"""
# array of len(X) - array[i] is the cluster number to which sample i belongs
cluster_ids = hierarchy.fcluster(linkage, self.threshold, criterion='distance')
cluster_id_to_feature_idx = {}
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_idx.setdefault(cluster_id, []).append(idx)
return list(cluster_id_to_feature_idx.values())
def fit(self, X, y):
"""
Clusters the features (X columns) using self.dist_matrix and self.threshold, and selects a feature from each
cluster with the highest chi2 score versus y.
The attribute self.n_features_ represents the number of features selected (=number of clusters)
The attribute self.selected_features_ is a list of indexes that correspond to the selected features
"""
if not self.dist_matrix:
self._calc_dist_matrix(X)
linkage = self._corr_linkage()
clusters = self._hierarchical_clustering(linkage)
chi2_vals, __ = sklearn.feature_selection.chi2(X, y)
chi2_vals = pd.Series(chi2_vals)
# fitted attributes
self.n_features_ = X.shape[1]
self.selected_features_ = [chi2_vals[cluster].idxmax() for cluster in clusters]
self.clusters_ = clusters
print(f'threshold={self.threshold:.2f}, selected_features={len(self.selected_features_)}')
return self
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
----------
mask - boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
# Checks if the estimator is fitted by verifying the presence of fitted attributes (ending with a trailing
# underscore) and otherwise raises a NotFittedError with the given message.
sklearn.utils.validation.check_is_fitted(self)
mask = np.zeros((self.n_features_, ), dtype=bool)
mask[self.selected_features_] = 1
return mask
def get_fs_pipeline(k, threshold, random_state=0):
"""
Creates feature selection pipeline
Parameters
----------
k - the k parameter for the SelectKBest features function
threshold - clustering threshold for the Hierarchial clustering
random_state - random state for the RandomForestClassifier. Deafult value: 0
Returns
----------
pipeline - feature selection pipeline
"""
pipeline = Pipeline(steps=[('vectorize', CountVectorizer(lowercase=False, binary=True)),
('k_best', SelectKBest(score_func=sklearn.feature_selection.chi2, k=k)),
('cluster', SelectHierarchicalClustering(threshold=threshold)),
('rf', RandomForestClassifier(random_state=random_state))])
return pipeline
| 33.259494
| 120
| 0.664891
| 621
| 5,255
| 5.444444
| 0.297907
| 0.041408
| 0.024845
| 0.024845
| 0.069506
| 0.033126
| 0.033126
| 0.033126
| 0.033126
| 0.033126
| 0
| 0.00536
| 0.254424
| 5,255
| 157
| 121
| 33.471338
| 0.85758
| 0.350714
| 0
| 0
| 0
| 0
| 0.039016
| 0.025902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135593
| false
| 0
| 0.186441
| 0
| 0.440678
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b2ef5da8cb4bdd6ae2ffffe9632e5405ed5cb0
| 5,985
|
py
|
Python
|
Python3/PS_scraping_selenium.py
|
fsj-digital/pages
|
8360f27e67974ed2b4f39eb64377f39c0189a224
|
[
"MIT"
] | 5
|
2019-10-28T19:09:16.000Z
|
2021-08-19T07:44:54.000Z
|
Python3/PS_scraping_selenium.py
|
fsj-digital/pages
|
8360f27e67974ed2b4f39eb64377f39c0189a224
|
[
"MIT"
] | null | null | null |
Python3/PS_scraping_selenium.py
|
fsj-digital/pages
|
8360f27e67974ed2b4f39eb64377f39c0189a224
|
[
"MIT"
] | 6
|
2020-04-28T22:33:06.000Z
|
2021-06-22T15:53:52.000Z
|
from bs4 import BeautifulSoup
import requests
import re
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.touch_actions import TouchActions
from selenium.common.exceptions import TimeoutException
URL = 'https://shopping.thinkwithgoogle.com'
EXAMPLES = ["Demonstrate unexpected use-case",
"Demonstrate google search",
"Demonstrate search on thinkwithgoogle",
"Demonstrate search on WebDriverWait",
"Demonstrate search on thinkwithgoogle search result",
"Download and extract additional data",
"Demonstrate maximizing screen",
"Demonstrate mouse actions for Chrome",
"Demonstrate navigation"]
def run(input, URL):
if(input == 0):
content = requests.get(URL)
soup = BeautifulSoup(content.text,'html.parser')
print(soup.prettify()) # Print row with HTML formatting
elif(input == 1):
driver = webdriver.Safari()
driver.get("https://www.google.com")
search = driver.find_element_by_name("q")
search.send_keys("Selenium") # Google Search "Selenium"
search.submit()
elif(input == 2):
browser = webdriver.Safari()
browser.get(URL)
time.sleep(5)
search = browser.find_elements_by_id('subjectInput')[1]
search.send_keys('Google Pixel 3') # Google Search "Google Pixel 3"
time.sleep(5)
search.send_keys(Keys.RETURN)
elif(input == 3):
browser = webdriver.Safari()
browser.maximize_window() # Required for the input tag visibility
browser.get('https://trends.google.com/trends/')
try: # proceed if element is found within 3 seconds otherwise raise TimeoutException
element = WebDriverWait(browser, 3).until(EC.presence_of_element_located((By.ID, 'input-254')))
except TimeoutException:
print("Loading took too much time!")
search = browser.find_elements(By.ID,'input-254')[0]
search.send_keys('Google Pixel 3')
elif(input == 4):
browser = webdriver.Safari()
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
elif(input == 5):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
browser.find_element_by_class_name('si-button-data download-all').click()
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
elif(input == 6):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
elif(input == 7):
browser = webdriver.Chrome()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
element_to_hover_over = returnVisibleElement(browser.find_elements_by_xpath("//i[@class='material-icons'][contains(./text(),'help')]"))
## ActionChains are not supported in safari but will work on other browser
## https://github.com/seleniumhq/selenium-google-code-issue-archive/issues/4136
ActionChains(browser).click(element_to_hover_over).perform()
TouchActions(browser).long_press(element_to_hover_over).perform()
elif(input == 8):
browser = webdriver.Safari()
browser.maximize_window() # Required for the button visibility
browser.get(URL) # with visibility search
time.sleep(2)
search = returnVisibleElement(browser.find_elements_by_id('subjectInput'))
search.send_keys('Google Pixel 3')
time.sleep(2)
search.send_keys(Keys.ENTER)
time.sleep(2)
data = browser.find_element_by_class_name('content content-breakpoint-gt-md')
dataList = data.find_elements_by_tag_name('li')
for item in dataList:
text = item.text
print(text)
browser.back()
print('\n' * 5) # For convenient visual
def returnVisibleElement(listOfInputElements):
for element in listOfInputElements:
if element.is_displayed():
return element
def printSelection():
print('Press:')
for i in range(0, len(EXAMPLES)):
print('',i,'to',EXAMPLES[i], sep = ' ')
if __name__ == '__main__':
while(True):
printSelection()
choice = input('Enter choice: ')
try:
choice = int(choice)
except ValueError:
print('Invalid input, stop program')
break
if(choice not in range(0,9)):
print('Invalid input, stop program')
break
run(int(choice), URL)
| 43.057554
| 143
| 0.637594
| 677
| 5,985
| 5.511078
| 0.264402
| 0.028947
| 0.037523
| 0.0394
| 0.491289
| 0.472259
| 0.372018
| 0.372018
| 0.372018
| 0.356741
| 0
| 0.010379
| 0.259482
| 5,985
| 138
| 144
| 43.369565
| 0.831453
| 0.1066
| 0
| 0.46875
| 0
| 0
| 0.162944
| 0.02966
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023438
| false
| 0
| 0.101563
| 0
| 0.132813
| 0.085938
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b41f50da86c6a2be3204ada5e6385e678b7b05
| 5,531
|
py
|
Python
|
AppTest/testTCPserver.py
|
STRATOLOGIC/SpacePyLibrary
|
89fc3873c6d787ad4e391f6080d9dd3218ffc4a2
|
[
"MIT"
] | 22
|
2015-01-22T13:40:22.000Z
|
2022-02-19T02:03:12.000Z
|
AppTest/testTCPserver.py
|
STRATOLOGIC/SpacePyLibrary
|
89fc3873c6d787ad4e391f6080d9dd3218ffc4a2
|
[
"MIT"
] | 3
|
2018-09-28T13:14:40.000Z
|
2022-02-08T14:19:13.000Z
|
AppTest/testTCPserver.py
|
STRATOLOGIC/SpacePyLibrary
|
89fc3873c6d787ad4e391f6080d9dd3218ffc4a2
|
[
"MIT"
] | 11
|
2016-06-01T11:53:56.000Z
|
2022-02-08T14:19:34.000Z
|
#!/usr/bin/env python3
#******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Unit Tests *
#******************************************************************************
import sys
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import UTIL.SYS, UTIL.TASK, UTIL.TCP
#############
# constants #
#############
LINEBUFFERLEN = 256
###########
# classes #
###########
# =============================================================================
class TCPserver(UTIL.TCP.SingleClientServer):
"""Subclass of UTIL.TCP.SingleClientServer"""
# ---------------------------------------------------------------------------
def __init__(self, portNr):
"""Initialise attributes only"""
modelTask = UTIL.TASK.s_processingTask
UTIL.TCP.SingleClientServer.__init__(self, modelTask, portNr)
self.tcpLineBuffer = ""
# ---------------------------------------------------------------------------
def receiveCallback(self, socket, stateMask):
"""Callback when a client has send data"""
LOG("*** receiveCallback ***")
# read the next set of byte from the data socket
data = self.recv(LINEBUFFERLEN)
if data == None:
# client is automatically disconnected
return
tcpLineBuffer = self.tcpLineBuffer
tcpLineBuffer += data.decode("ascii")
LOG("tcpLineBuffer: " + tcpLineBuffer)
# handle the input: extract the lines from the line buffer
lines = tcpLineBuffer.split("\n")
# the last line has to be handled in a special way and can not be
# processed directly
lastLine = lines[-1]
lines = lines[:-1]
if lastLine == "":
# read of the data was complete (incl. "\n")
pass
else:
# last line was cutt off and the rest should come with the next read
self.tcpLineBuffer = lastLine
for line in lines:
# remove a terminating "\r" for clients like telnet
if line[-1] == "\r":
line = line[:-1]
# terminate the client connection if exit has been entered (case insensitive)
upperLine = line.upper()
if (upperLine == "X") or (upperLine == "EXIT"):
LOG("Exit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
return
if (upperLine == "Q") or (upperLine == "QUIT"):
LOG("Quit requested")
# send the OK response back to the client
retString = "OK\n"
self.send(retString.encode())
# terminate the client connection
self.disconnectClient();
sys.exit(0)
# delegate the input
pstatus = self.processLine(line);
if pstatus == 0:
LOG("OK")
# send the OK response back to the TECO
retString = "OK\n";
self.send(retString.encode())
else:
LOG_ERROR(str(pstatus))
# set the Error response back to the client:
retString = "Error: execution failed (see log)!\n"
self.send(retString.encode())
# ---------------------------------------------------------------------------
def processLine(self, line):
"""Callback when a client has send a data line"""
LOG("line = " + line)
return 0
#############
# functions #
#############
# -----------------------------------------------------------------------------
def initConfiguration():
"""initialise the system configuration"""
UTIL.SYS.s_configuration.setDefaults([
["HOST", "127.0.0.1"],
["SERVER_PORT", "1234"]])
# -----------------------------------------------------------------------------
def createServer():
"""create the TCP server"""
server = TCPserver(portNr=int(UTIL.SYS.s_configuration.SERVER_PORT))
if not server.openConnectPort(UTIL.SYS.s_configuration.HOST):
sys.exit(-1)
# activate zyclic idle function
idleFunction()
# -----------------------------------------------------------------------------
def idleFunction():
UTIL.TASK.s_processingTask.createTimeHandler(1000, idleFunction)
LOG("--- idle ---")
########
# main #
########
if __name__ == "__main__":
# initialise the system configuration
initConfiguration()
# initialise the console handler
consoleHandler = UTIL.TASK.ConsoleHandler()
# initialise the model
modelTask = UTIL.TASK.ProcessingTask(isParent=True)
# register the console handler
modelTask.registerConsoleHandler(consoleHandler)
# create the TCP server
LOG("Open the TCP server")
createServer()
# start the tasks
LOG("start modelTask...")
modelTask.start()
| 39.22695
| 83
| 0.513108
| 529
| 5,531
| 5.3138
| 0.385633
| 0.01921
| 0.019922
| 0.024191
| 0.16471
| 0.139808
| 0.109925
| 0.088225
| 0.088225
| 0.088225
| 0
| 0.007203
| 0.246972
| 5,531
| 140
| 84
| 39.507143
| 0.667707
| 0.503526
| 0
| 0.164384
| 0
| 0
| 0.088224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0.013699
| 0.041096
| 0
| 0.178082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b42a597f46ffd75065d8212ea8951934240d0a
| 9,253
|
py
|
Python
|
tests/clientlib_test.py
|
yoavcaspi/pre-commit
|
77947f212e7b88a479dbe6feebc60a9f773e8c13
|
[
"MIT"
] | null | null | null |
tests/clientlib_test.py
|
yoavcaspi/pre-commit
|
77947f212e7b88a479dbe6feebc60a9f773e8c13
|
[
"MIT"
] | null | null | null |
tests/clientlib_test.py
|
yoavcaspi/pre-commit
|
77947f212e7b88a479dbe6feebc60a9f773e8c13
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import logging
import cfgv
import pytest
import pre_commit.constants as C
from pre_commit.clientlib import check_type_tag
from pre_commit.clientlib import CONFIG_HOOK_DICT
from pre_commit.clientlib import CONFIG_REPO_DICT
from pre_commit.clientlib import CONFIG_SCHEMA
from pre_commit.clientlib import DEFAULT_LANGUAGE_VERSION
from pre_commit.clientlib import MANIFEST_SCHEMA
from pre_commit.clientlib import MigrateShaToRev
from pre_commit.clientlib import validate_config_main
from pre_commit.clientlib import validate_manifest_main
from testing.fixtures import sample_local_config
def is_valid_according_to_schema(obj, obj_schema):
try:
cfgv.validate(obj, obj_schema)
return True
except cfgv.ValidationError:
return False
@pytest.mark.parametrize('value', ('definitely-not-a-tag', 'fiel'))
def test_check_type_tag_failures(value):
with pytest.raises(cfgv.ValidationError):
check_type_tag(value)
@pytest.mark.parametrize(
('config_obj', 'expected'), (
(
{
'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [{'id': 'pyflakes', 'files': '\\.py$'}],
}],
},
True,
),
(
{
'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
'args': ['foo', 'bar', 'baz'],
},
],
}],
},
True,
),
(
{
'repos': [{
'repo': 'git@github.com:pre-commit/pre-commit-hooks',
'rev': 'cd74dc150c142c3be70b24eaf0b02cae9d235f37',
'hooks': [
{
'id': 'pyflakes',
'files': '\\.py$',
# Exclude pattern must be a string
'exclude': 0,
'args': ['foo', 'bar', 'baz'],
},
],
}],
},
False,
),
),
)
def test_config_valid(config_obj, expected):
ret = is_valid_according_to_schema(config_obj, CONFIG_SCHEMA)
assert ret is expected
def test_local_hooks_with_rev_fails():
config_obj = {'repos': [dict(sample_local_config(), rev='foo')]}
with pytest.raises(cfgv.ValidationError):
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_with_local_hooks_definition_passes():
config_obj = {'repos': [sample_local_config()]}
cfgv.validate(config_obj, CONFIG_SCHEMA)
def test_config_schema_does_not_contain_defaults():
"""Due to the way our merging works, if this schema has any defaults they
will clobber potentially useful values in the backing manifest. #227
"""
for item in CONFIG_HOOK_DICT.items:
assert not isinstance(item, cfgv.Optional)
def test_validate_manifest_main_ok():
assert not validate_manifest_main(('.pre-commit-hooks.yaml',))
def test_validate_config_main_ok():
assert not validate_config_main(('.pre-commit-config.yaml',))
def test_validate_config_old_list_format_ok(tmpdir):
f = tmpdir.join('cfg.yaml')
f.write('- {repo: meta, hooks: [{id: identity}]}')
assert not validate_config_main((f.strpath,))
def test_validate_warn_on_unknown_keys_at_repo_level(tmpdir, caplog):
f = tmpdir.join('cfg.yaml')
f.write(
'- repo: https://gitlab.com/pycqa/flake8\n'
' rev: 3.7.7\n'
' hooks:\n'
' - id: flake8\n'
' args: [--some-args]\n',
)
ret_val = validate_config_main((f.strpath,))
assert not ret_val
assert caplog.record_tuples == [
(
'pre_commit',
logging.WARNING,
'Unexpected config key(s): args',
),
]
def test_validate_warn_on_unknown_keys_at_top_level(tmpdir, caplog):
f = tmpdir.join('cfg.yaml')
f.write(
'repos:\n'
'- repo: https://gitlab.com/pycqa/flake8\n'
' rev: 3.7.7\n'
' hooks:\n'
' - id: flake8\n'
'foo:\n'
' id: 1.0.0\n',
)
ret_val = validate_config_main((f.strpath,))
assert not ret_val
assert caplog.record_tuples == [
(
'pre_commit',
logging.WARNING,
'Unexpected config key(s): foo',
),
]
@pytest.mark.parametrize('fn', (validate_config_main, validate_manifest_main))
def test_mains_not_ok(tmpdir, fn):
not_yaml = tmpdir.join('f.notyaml')
not_yaml.write('{')
not_schema = tmpdir.join('notconfig.yaml')
not_schema.write('{}')
assert fn(('does-not-exist',))
assert fn((not_yaml.strpath,))
assert fn((not_schema.strpath,))
@pytest.mark.parametrize(
('manifest_obj', 'expected'),
(
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': r'\.py$',
}],
True,
),
(
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'language_version': 'python3.4',
'files': r'\.py$',
}],
True,
),
(
# A regression in 0.13.5: always_run and files are permissible
[{
'id': 'a',
'name': 'b',
'entry': 'c',
'language': 'python',
'files': '',
'always_run': True,
}],
True,
),
),
)
def test_valid_manifests(manifest_obj, expected):
ret = is_valid_according_to_schema(manifest_obj, MANIFEST_SCHEMA)
assert ret is expected
@pytest.mark.parametrize(
'dct',
(
{'repo': 'local'}, {'repo': 'meta'},
{'repo': 'wat', 'sha': 'wat'}, {'repo': 'wat', 'rev': 'wat'},
),
)
def test_migrate_sha_to_rev_ok(dct):
MigrateShaToRev().check(dct)
def test_migrate_sha_to_rev_dont_specify_both():
with pytest.raises(cfgv.ValidationError) as excinfo:
MigrateShaToRev().check({'repo': 'a', 'sha': 'b', 'rev': 'c'})
msg, = excinfo.value.args
assert msg == 'Cannot specify both sha and rev'
@pytest.mark.parametrize(
'dct',
(
{'repo': 'a'},
{'repo': 'meta', 'sha': 'a'}, {'repo': 'meta', 'rev': 'a'},
),
)
def test_migrate_sha_to_rev_conditional_check_failures(dct):
with pytest.raises(cfgv.ValidationError):
MigrateShaToRev().check(dct)
def test_migrate_to_sha_apply_default():
dct = {'repo': 'a', 'sha': 'b'}
MigrateShaToRev().apply_default(dct)
assert dct == {'repo': 'a', 'rev': 'b'}
def test_migrate_to_sha_ok():
dct = {'repo': 'a', 'rev': 'b'}
MigrateShaToRev().apply_default(dct)
assert dct == {'repo': 'a', 'rev': 'b'}
@pytest.mark.parametrize(
'config_repo',
(
# i-dont-exist isn't a valid hook
{'repo': 'meta', 'hooks': [{'id': 'i-dont-exist'}]},
# invalid to set a language for a meta hook
{'repo': 'meta', 'hooks': [{'id': 'identity', 'language': 'python'}]},
# name override must be string
{'repo': 'meta', 'hooks': [{'id': 'identity', 'name': False}]},
),
)
def test_meta_hook_invalid(config_repo):
with pytest.raises(cfgv.ValidationError):
cfgv.validate(config_repo, CONFIG_REPO_DICT)
@pytest.mark.parametrize(
'mapping',
(
# invalid language key
{'pony': '1.0'},
# not a string for version
{'python': 3},
),
)
def test_default_language_version_invalid(mapping):
with pytest.raises(cfgv.ValidationError):
cfgv.validate(mapping, DEFAULT_LANGUAGE_VERSION)
def test_minimum_pre_commit_version_failing():
with pytest.raises(cfgv.ValidationError) as excinfo:
cfg = {'repos': [], 'minimum_pre_commit_version': '999'}
cfgv.validate(cfg, CONFIG_SCHEMA)
assert str(excinfo.value) == (
'\n'
'==> At Config()\n'
'==> At key: minimum_pre_commit_version\n'
'=====> pre-commit version 999 is required but version {} is '
'installed. Perhaps run `pip install --upgrade pre-commit`.'.format(
C.VERSION,
)
)
def test_minimum_pre_commit_version_passing():
cfg = {'repos': [], 'minimum_pre_commit_version': '0'}
cfgv.validate(cfg, CONFIG_SCHEMA)
@pytest.mark.parametrize('schema', (CONFIG_SCHEMA, CONFIG_REPO_DICT))
def test_warn_additional(schema):
allowed_keys = {item.key for item in schema.items if hasattr(item, 'key')}
warn_additional, = [
x for x in schema.items if isinstance(x, cfgv.WarnAdditionalKeys)
]
assert allowed_keys == set(warn_additional.keys)
| 29.189274
| 78
| 0.556576
| 1,016
| 9,253
| 4.84252
| 0.201772
| 0.04939
| 0.02378
| 0.040244
| 0.511382
| 0.394715
| 0.321138
| 0.263211
| 0.204675
| 0.172561
| 0
| 0.015342
| 0.302605
| 9,253
| 316
| 79
| 29.281646
| 0.747094
| 0.041392
| 0
| 0.433594
| 0
| 0
| 0.18262
| 0.041926
| 0
| 0
| 0
| 0
| 0.070313
| 1
| 0.089844
| false
| 0.007813
| 0.058594
| 0
| 0.15625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b520f65af9148fce1413d5d355fa797126e985
| 5,494
|
py
|
Python
|
ikalog/ui/options.py
|
fetus-hina/IkaLog
|
bd476da541fcc296f792d4db76a6b9174c4777ad
|
[
"Apache-2.0"
] | 285
|
2015-08-15T14:38:38.000Z
|
2022-02-18T15:00:06.000Z
|
ikalog/ui/options.py
|
fetus-hina/IkaLog
|
bd476da541fcc296f792d4db76a6b9174c4777ad
|
[
"Apache-2.0"
] | 323
|
2015-09-24T12:21:34.000Z
|
2018-05-06T16:34:54.000Z
|
ikalog/ui/options.py
|
fetus-hina/IkaLog
|
bd476da541fcc296f792d4db76a6b9174c4777ad
|
[
"Apache-2.0"
] | 72
|
2015-08-22T00:18:54.000Z
|
2022-02-18T14:44:20.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gettext
import wx
import wx.lib.scrolledpanel
import ikalog.outputs
from ikalog.ui.events import *
from ikalog.ui.panel import *
from ikalog.ui import VideoCapture
from ikalog.utils import *
_ = Localization.gettext_translation('IkaUI', fallback=True).gettext
class OptionsGUI(object):
def __init__(self, ikalog_gui):
self.ikalog_gui = ikalog_gui
self.frame = None
self._init_frame()
def _init_frame(self):
if self.frame:
return
self.frame = wx.Frame(
self.ikalog_gui.frame, wx.ID_ANY, _("Options"), size=(640, 500))
self.notebook = wx.Notebook(self.frame, wx.ID_ANY)
# Apply button
button_apply = wx.Button(self.frame, wx.ID_ANY, _(u'Apply'))
# Use a bold font.
apply_font = button_apply.GetFont()
apply_font.SetWeight(wx.FONTWEIGHT_BOLD)
button_apply.SetFont(apply_font)
button_cancel = wx.Button(self.frame, wx.ID_ANY, _(u'Cancel'))
button_load_default = wx.Button(
self.frame, wx.ID_ANY, _(u'Load default'))
buttons_sizer = wx.BoxSizer(wx.HORIZONTAL)
buttons_sizer.Add(button_apply)
buttons_sizer.Add(button_cancel)
buttons_sizer.Add(button_load_default)
top_sizer = wx.BoxSizer(wx.VERTICAL)
top_sizer.Add(self.notebook)
top_sizer.Add(buttons_sizer)
self.frame.SetSizer(top_sizer)
# Set event handlers for buttons.
button_apply.Bind(wx.EVT_BUTTON, self.on_button_apply)
button_cancel.Bind(wx.EVT_BUTTON, self.on_button_cancel)
button_load_default.Bind(wx.EVT_BUTTON, self.on_button_load_default)
outputs = [self.ikalog_gui.capture] + self.ikalog_gui.outputs
self._init_outputs(outputs)
# self.capture.panel is a part of self.frame. This Bind propagates
# capture's source change to the preview.
self.ikalog_gui.capture.panel.Bind(
EVT_INPUT_INITIALIZED, self.ikalog_gui.on_input_initialized)
# Refresh UI of each plugin.
self.ikalog_gui.engine.call_plugins(
'on_config_load_from_context', debug=True)
def show(self):
if not self.frame:
self._init_frame()
self.frame.Show()
self.frame.Raise()
def on_button_apply(self, event):
self.ikalog_gui.on_options_apply(event)
def on_button_cancel(self, event):
self.ikalog_gui.on_options_cancel(event)
def on_button_load_default(self, event):
self.ikalog_gui.on_options_load_default(event)
def _init_outputs(self, outputs):
output_dict = {}
for output in outputs:
output_dict[output.__class__] = output
# Keys for outputs in the main page.
keys = [
ikalog.ui.VideoCapture,
ikalog.outputs.OBS,
ikalog.outputs.StatInk,
ikalog.outputs.Twitter
]
# Keys for outputs combined into the misc tab.
misc_keys = [
ikalog.outputs.CSV,
ikalog.outputs.JSON,
ikalog.outputs.Screenshot,
ikalog.outputs.Boyomi,
ikalog.outputs.Slack,
ikalog.outputs.WebSocketServer,
]
for key in output_dict.keys():
if key in misc_keys:
continue
if key not in keys:
keys.append(key)
# Main tabs
index = 0
for key in keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.notebook)
self.notebook.InsertPage(index, output.panel, output.panel_name)
index += 1
# Misc tab
self.misc_panel = wx.lib.scrolledpanel.ScrolledPanel(
self.notebook, wx.ID_ANY, size=(640, 360))
self.misc_panel_sizer = wx.BoxSizer(wx.VERTICAL)
default_font = self.misc_panel.GetFont()
title_font = wx.Font(default_font.GetPointSize(),
wx.FONTFAMILY_DEFAULT,
wx.FONTSTYLE_NORMAL,
wx.FONTWEIGHT_BOLD)
for key in misc_keys:
output = output_dict.get(key)
if not output:
continue
output.on_option_tab_create(self.misc_panel)
title = wx.StaticText(self.misc_panel, wx.ID_ANY, output.panel_name)
title.SetFont(title_font)
self.misc_panel_sizer.Add(title)
self.misc_panel_sizer.Add(
output.panel, flag=wx.EXPAND | wx.ALL, border=10)
self.misc_panel_sizer.Add((-1, 25))
self.misc_panel.SetSizer(self.misc_panel_sizer)
self.misc_panel.SetupScrolling()
self.notebook.InsertPage(index, self.misc_panel, _('Misc.'))
| 32.508876
| 80
| 0.630142
| 697
| 5,494
| 4.760402
| 0.278336
| 0.03255
| 0.047016
| 0.018083
| 0.157324
| 0.118445
| 0.118445
| 0.066004
| 0.0434
| 0.0434
| 0
| 0.007323
| 0.279214
| 5,494
| 168
| 81
| 32.702381
| 0.830556
| 0.167455
| 0
| 0.083333
| 0
| 0
| 0.014741
| 0.005941
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064815
| false
| 0
| 0.074074
| 0
| 0.157407
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b61f77d9db0538ba6a1a1c9673544d53143882
| 584
|
py
|
Python
|
setup.py
|
CyberTKR/Simple-LINELIB
|
8596afb6b201b13675a0ed6314b3151f6bbf208b
|
[
"BSD-3-Clause"
] | 4
|
2022-02-20T11:27:29.000Z
|
2022-03-05T00:50:05.000Z
|
setup.py
|
CyberTKR/Simple-LINELIB
|
8596afb6b201b13675a0ed6314b3151f6bbf208b
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
CyberTKR/Simple-LINELIB
|
8596afb6b201b13675a0ed6314b3151f6bbf208b
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", 'r',encoding="utf-8") as f:
long_description = f.read()
setup(
name='LineBot',
version='0.1.0',
description='Simple-LINELIB',
long_description=long_description,
author='Tolg KR',
author_email='tolgkr@cybertkr.com',
url='https://github.com/CyberTKR/Simple-LINELIB',
packages=find_packages(include=['CyberTK', 'CyberTK.*']),
install_requires=[
'httpx==0.19.0',
'requests',
'thrift',
'CyberTKAPI'
],
extras_require={'httpx': ['http2']}
)
| 25.391304
| 61
| 0.628425
| 68
| 584
| 5.279412
| 0.705882
| 0.125348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01919
| 0.196918
| 584
| 22
| 62
| 26.545455
| 0.746269
| 0
| 0
| 0
| 0
| 0
| 0.294521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b64cfd2fd1152628636d4313ba611c18b0ee8d
| 4,552
|
py
|
Python
|
lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/SeparateDriver/CgwshDeviceDriverSetParameterECDB.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | 1
|
2020-04-02T01:17:43.000Z
|
2020-04-02T01:17:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: CgwshDeviceDriverSetParameterECDB.py
'''
Parameter module for Cgwsh driver configuration
'''
import GlobalModule
from EmCommonLog import decorater_log
from DriverSetParameterECDB import DriverSetParameterECDB
class CgwshDeviceDriverSetParameterECDB(DriverSetParameterECDB):
'''
Parameter class for Cgwsh driver configuration
'''
@decorater_log
def __init__(self,
device_name=None,
ec_message=None,
db_info=None):
'''
Constructor
'''
super(CgwshDeviceDriverSetParameterECDB, self).__init__(device_name,
ec_message,
db_info)
self.ec_message = self.ec_message["device"]
@decorater_log
def get_service_info(self):
'''
Service information is acquired.
'''
pass
@decorater_log
def get_management_info(self):
'''
Management information is acquired.
'''
get_info = {}
get_info["device_name"] = self.ec_message.get("name")
GlobalModule.EM_LOGGER.debug("get management_info = %s" % (get_info,))
return get_info
@decorater_log
def get_static_route_info(self):
'''
Static route information is acquired.
acquired dict:
{
static_route:[{
ip_address:str,
subnet_mask:str,
gateway_address:str
}]
}
'''
get_info = {}
tmp_list = []
routes = self.ec_message.get("serviceInfo", {}).get("staticRoute", ())
for route in routes:
tmp_item = {}
tmp_item["ip_address"] = route.get("ipAddress")
tmp_item["subnet_mask"] = route.get("subnetMask")
tmp_item["gateway_address"] = route.get("gatewayIpAddress")
tmp_list.append(tmp_item)
get_info["static_route"] = tmp_list
GlobalModule.EM_LOGGER.debug("get static_route = %s" % (get_info,))
return get_info
@decorater_log
def get_tunnel_if_info(self):
'''
Tunnel interface information is acquired.
acquired dict:
{
tunnel_if:[{
vrf_name:str,
if_name:str,
uni_if_name:str,
uni_vlan_id:str,
tunnel_source:str,
}]
}
'''
get_info = {}
tmp_list = []
tunnel_uni = self.ec_message.get("serviceInfo", {}).get("uni", ())
tunnel_officeInfo = self.ec_message.get(
"serviceInfo", {}).get("officeInfo", ())
vrf_name = tunnel_uni.get("vrfName")
uni_if_name = tunnel_uni.get("ifName")
uni_vlan_id = tunnel_uni.get("vlanId")
for tunnel in tunnel_officeInfo:
tmp_item = {}
tmp_item["vrf_name"] = vrf_name
tmp_item["if_name"] = tunnel.get("tunnelIfName")
tmp_item["uni_if_name"] = uni_if_name
tmp_item["uni_vlan_id"] = uni_vlan_id
tmp_item["tunnel_source"] = tunnel.get(
"tunnelSrcIpAddress")
tmp_list.append(tmp_item)
get_info["tunnel_if"] = tmp_list
GlobalModule.EM_LOGGER.debug("get tunnel_if = %s" % (get_info,))
return get_info
@decorater_log
def get_pppoe_info(self):
'''
PPPoE information is acquired.
acquired dict:
{
pppoe:[{
username:str,
password:str,
tenant:str,
pp_no:str
}]
}
'''
get_info = {}
tmp_list = []
ppp_infos = self.ec_message.get("serviceInfo", {}).get("pppInfo", ())
for ppp_info in ppp_infos:
tmp_item = {}
tmp_item["username"] = ppp_info.get("connectionId")
tmp_item["password"] = ppp_info.get("connectionPassword")
tmp_item["tenant"] = ppp_info.get("corporationId")
tmp_item["pp_no"] = ppp_info.get("ppId")
tmp_list.append(tmp_item)
get_info["pppoe"] = tmp_list
GlobalModule.EM_LOGGER.debug("get pppoe = %s" % (get_info,))
return get_info
| 32.985507
| 79
| 0.530097
| 450
| 4,552
| 5.051111
| 0.228889
| 0.055433
| 0.040035
| 0.039595
| 0.273647
| 0.195337
| 0.133304
| 0.051474
| 0.051474
| 0.051474
| 0
| 0.00173
| 0.364895
| 4,552
| 137
| 80
| 33.226277
| 0.784504
| 0.185413
| 0
| 0.315068
| 0
| 0
| 0.138828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0.027397
| 0.041096
| 0
| 0.191781
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b73188b11fd452c2465eac91ddbc3efbb01c8c
| 13,767
|
py
|
Python
|
scripts/common_lib/build_lib.py
|
Bhaskers-Blu-Org1/wc-devops-utilities
|
d8131261cb3d67ce872b541c5e2d8ff22fcbf614
|
[
"Apache-2.0"
] | 15
|
2018-06-26T19:48:08.000Z
|
2021-01-18T13:29:16.000Z
|
scripts/common_lib/build_lib.py
|
Bhaskers-Blu-Org1/wc-devops-utilities
|
d8131261cb3d67ce872b541c5e2d8ff22fcbf614
|
[
"Apache-2.0"
] | 16
|
2018-05-29T08:12:38.000Z
|
2022-02-15T15:25:14.000Z
|
scripts/common_lib/build_lib.py
|
IBM/wc-devops-utilities
|
d8131261cb3d67ce872b541c5e2d8ff22fcbf614
|
[
"Apache-2.0"
] | 21
|
2018-05-29T11:54:05.000Z
|
2021-12-20T06:42:54.000Z
|
#!/usr/bin/env python3.6
import os
import subprocess
import json
import argparse
import zipfile
import shutil
import requests
import datetime
import re
import operator
import unicodedata
# global list of error messages to keep track of all error msgs
errorMessages = []
"""
Collection of Common Functions used by Build Scripts
A collection of common functions shared by each individual build scripts.
"""
def get(url, usr, pwd):
"""
HTTP/HTTPS GET requests using external Python module requests
@param url the url of the REST call
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return a JSON response
"""
headers = {
'Accept': 'application/vnd.docker.distribution.manifest.v1+json',
}
# TEMP: Remove the suppressed verification once the docker cert location
# is figured out and we specify it in REQUESTS_CA_BUNDLE
return requests.get(url, auth=(usr, pwd), headers=headers, verify=False)
def get_latest_tag(registry_path, usr, pwd):
"""
Retrieve the latest version of an image based on its tags: vX-YYYYMMDD-HHmm.
The latest, by definition, is defined to be the one with the highest version
number (vX) and the latest timestamp (YYYYMMDD-HHmm).
@param registry_path docker registry path
@param usr the functional username for the docker registry
@param pwd the password for the docker registry functional user
@return the latest image tag
"""
tag_list_url = registry_path + '/tags/list'
request = get(tag_list_url, usr, pwd)
tag_list = json.loads(request.text)
for tag in tag_list['tags']:
if '-' not in tag:
continue
str_version, str_dash, str_timestamp = tag.partition('-')
tag_format="%Y%m%d-%H%M"
try:
dt_timestamp = datetime.datetime.strptime(str_timestamp, tag_format)
except ValueError:
continue
try:
latest_version
latest_timestamp
latest_tag
except NameError:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_version > str_version:
continue
elif latest_version < str_version:
latest_version = str_version
latest_timestamp = dt_timestamp
latest_tag = tag
else:
if latest_timestamp < dt_timestamp:
latest_timestamp = dt_timestamp
latest_tag = tag
return latest_tag
def unzip(zip_file, to_dir):
"""
Generic unzip function for extracting zip files
@param zip_file the zip file to be extracted
@param to_dir the destination directory to extract the zip file to
"""
with zipfile.ZipFile(zip_file, "r") as zip_ref:
zip_ref.extractall(to_dir)
zip_ref.close()
def create_dockerfile(dockerfile_parent_dir, docker_url, image_namespace, image_name, image_tag_latest):
"""
Creates a dockerfile using the correct docker registry URL associated
with the datacenter this script is being run on
:param str dockerfile_parent_dir: path to the parent directory for the Dockerfile
:param str docker_url: the docker registry VIP accessible from the mesos slaves
:param str image_namespace: the name of the image
:param str image_name: the name of the image
:param str image_tag_latest: the latest version tag of the base image
:returns: None
"""
# Form the path for the Dockerfile based on the parent of the caller script
dockerfile_path = os.path.join(dockerfile_parent_dir, "Dockerfile")
# Create the Dockerfile
dockerfile = open(dockerfile_path, "w+")
# Format the FROM command
dockerfile_from_cmd = "FROM " + docker_url + image_namespace + "/" + image_name + ":" + image_tag_latest
# Write the FROM command string to the Dockerfile
dockerfile.write(dockerfile_from_cmd)
# Close the open file instance
dockerfile.close()
def set_docker_client_timeout():
"""
Sets the DOCKER_CLIENT_TIMEOUT environment variable to 300
"""
os.environ['DOCKER_CLIENT_TIMEOUT'] = '300'
print("The timeout set for docker client: " + os.environ['DOCKER_CLIENT_TIMEOUT'] + " seconds")
# ======================= verify bundle Structure ===============================================
def openJSONfile(jsonFile):
"""
Function to open a JSON file
@param jsonFile path to the JSON file
@return the loaded JSON file
"""
try:
with open(jsonFile) as json_data_file:
data = json.load(json_data_file)
except:
addToErrorMessages("The specified JSON file is not valid: " + jsonFile)
raise
return data
def directoryToJSON(directory):
"""
Function to convert objects in a given directory into JSON form.
The parent object is always a dict, it may contain children if type=directory.
A directory is composed of a list and may contain files and/or directories.
@param directory directory to convert
@return JSON representation of a directory
"""
d = {'name': os.path.basename(directory)} # the parent object is dict
if os.path.isdir(directory):
d['type'] = "directory"
# directory may have children
# the children in a directory is a list composed of more files/directories
d['children'] = [directoryToJSON(os.path.join(directory,x)) for x in os.listdir(directory)]
else:
d['type'] = "file"
return d
def verifyBundleStructure(expected, actual, currentPath):
"""
Function to verify if an uploaded bundle follows IBM defined structure
@param expected the JSON representation of the IBM defined structure
@param actual the JSON representation of the actual structure of the uploaded bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@return True if structure of the uploaded bundle follows IBM defined structure. False otherwise.
"""
isMatched = True
if type(expected) is dict:
if matches(expected,actual): # a matching file or directory was found
if expected['type'] == 'directory':
currentPath = currentPath + actual['name'] + "/"
if expected['children'] == "_any":
isMatched = isMatched & True # if the contents of the directory can be anything then do no further checking
else:
isMatched = isMatched & verifyBundleStructure(expected['children'], actual['children'], currentPath) # do further checking
else: # a matching file or directory was not found
if expected['fail-if-not-found'] == "yes":
logBundleStructureErrorMessage(expected, currentPath)
return False
if type(expected) is list:
for k in range(0,len(expected)):
isMatched = isMatched & verifyActualContainsExpectedElement(actual, expected[k], currentPath, isMatched)
return isMatched
def logBundleStructureErrorMessage(expected, currentPath):
"""
Function to adds error messages to the global array.
@param expected the expected element
@param currentPath the current path we are on that has the missing file or directory
"""
addToErrorMessages("A "+ expected['type'] +" is missing from the path: \"" + currentPath + "\"")
addToErrorMessages(expected['error-message-if-fails'])
return
def matches(expectedElement, actualElement):
"""
Function to check if files/directories match. They must have the same name and must both be the same type.
@param expectedElement the expected element. May be defined by regular expression
@param actualElement the actual element
"""
ret = False
if re.fullmatch(expectedElement['name'], actualElement['name']) is not None and expectedElement['type'] == actualElement['type']:
ret = True
return ret
def verifyActualContainsExpectedElement(actual, expectedElement, currentPath, isMatched):
"""
Function to verify if an actual list of objects contains an expected element. Helper method to verifyBundleStructure.
@param actual list of the actual files and directories in the bundle
@param expectedElement the expected element to find in the bundle
@param currentPath the path currently being checked (used to build paths recursively for error msg)
@param isMatched (only used for recursive calls)
@return True if the list of actual objects contain the expected element
"""
# if actual is a dict then verify it and its children
if type(actual) is dict:
isMatched = isMatched & verifyBundleStructure(expectedElement,actual, currentPath)
# if actual is a list then find out if they match anywhere, if so get the matched position
elif type(actual) is list:
matchedPosition = -1
for i in range(0, len(actual)):
if matches(expectedElement,actual[i]):
matchedPosition = i
break
if matchedPosition != -1: # if they match then verify their children too
isMatched = isMatched & verifyBundleStructure(expectedElement, actual[matchedPosition] , currentPath)
else : # if they don't match then log the error msg and return false
if expectedElement['fail-if-not-found'] == "yes": # log error msg and return false if needed
isMatched = False
logBundleStructureErrorMessage(expectedElement, currentPath)
return isMatched
def addToErrorMessages(errorMessage):
"""
Function to add error messages to the global list of errorMessages
@param errorMessage the error message to add
"""
print(errorMessage)
global errorMessges
errorMessages.extend([errorMessage])
return
def unzipRecursively(zipFileName, directoryToUnzipTo):
"""
Function to unzip a ZIP file recursively
@param zipFileName the zip file to be extracted
@param directoryToUnzipTo the destination directory to extract the zip file to
"""
# update
if zipFileName.endswith(".zip"): #check if it's a .zip
unzip(zipFileName,directoryToUnzipTo)
os.remove(zipFileName)
for x in os.listdir(directoryToUnzipTo):
subdirectory = os.path.join(directoryToUnzipTo, os.path.splitext(x)[0])
subfile = os.path.join(directoryToUnzipTo, x )
unzipRecursively(subfile, subdirectory)
return
def zipFileIsGood(filePath):
"""
Function to test if a ZIP file is good or bad
@param filePath the zip file to be tested
@return True if the ZIP file is good. False otherwise.
"""
ret = True
try:
the_zip_file = zipfile.ZipFile(filePath)
badFile = the_zip_file.testzip()
if badFile is not None:
ret = False
else:
ret = True
except:
ret = False
return ret
def verifyZipFile(zipDirectory, nameOfBundle):
"""
Function to verify if an uploaded bundle is:
1) a valid zip file
2) follows IBM defined structure
@param zipDirectory where the bundle ZIP is located
@param nameOfBundle name of the bundle ZIP file
"""
print ('Validating bundle structure...')
bundleIsGood = True
bundleZip = os.path.join(zipDirectory, nameOfBundle)
if zipFileIsGood(bundleZip):
try:
# copy bundle into new working directory -----------------------------------------------------------
directoryToUnzipTo = os.path.join(zipDirectory, "temp")
if not os.path.exists(directoryToUnzipTo):
os.makedirs(directoryToUnzipTo)
shutil.copy(bundleZip, os.path.join(directoryToUnzipTo, nameOfBundle))
# unzip the bundle ----------------------------------------------------------------------------------
unzipRecursively(os.path.join(directoryToUnzipTo, nameOfBundle), os.path.join(directoryToUnzipTo, os.path.splitext(nameOfBundle)[0]))
# verify structure of bundle ------------------------------------------------------------------------
# check package stucture
expectedPackageStructure = openJSONfile(os.path.join(zipDirectory, "bundle-definition.json"))
actualBundleStructure = directoryToJSON(directoryToUnzipTo) # convert the unzipped directory to JSON file
bundleIsGood = verifyBundleStructure(expectedPackageStructure, actualBundleStructure, "")
if not bundleIsGood:
addToErrorMessages("The uploaded bundle does not meet predefined structure. Could not proceed with deployment.")
# clean up unzipped stuff and package structure Json -------------------------------------------------
shutil.rmtree(directoryToUnzipTo)
except:
addToErrorMessages("Exception occurred while verifying bundle structure. Could not proceed with deployment.")
bundleIsGood = False
else:
bundleIsGood = False
addToErrorMessages("The uploaded bundle could not be unzipped. Could not proceed with deployment.")
# out put report value , join all the messages together
print ("report=[" + ". ".join(str(x) for x in errorMessages) + "]")
return bundleIsGood
| 36.81016
| 145
| 0.648217
| 1,591
| 13,767
| 5.547454
| 0.226273
| 0.011897
| 0.01133
| 0.006798
| 0.220032
| 0.143666
| 0.116021
| 0.088149
| 0.081351
| 0.060956
| 0
| 0.001673
| 0.261713
| 13,767
| 373
| 146
| 36.908847
| 0.866686
| 0.39108
| 0
| 0.242775
| 0
| 0
| 0.09839
| 0.017772
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086705
| false
| 0
| 0.063584
| 0
| 0.225434
| 0.023121
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b7df8dc09a874c18b0de4987e789a5a8c1dfcd
| 10,035
|
py
|
Python
|
src/static_grasp_kt.py
|
ivalab/GraspKpNet
|
d4b6186d74ac82a745d778892742d52a204bd1cf
|
[
"MIT"
] | 16
|
2021-05-04T23:08:47.000Z
|
2022-01-19T08:33:14.000Z
|
src/static_grasp_kt.py
|
ivalab/GraspKpNet
|
d4b6186d74ac82a745d778892742d52a204bd1cf
|
[
"MIT"
] | 2
|
2021-06-22T22:54:44.000Z
|
2021-10-04T19:23:35.000Z
|
src/static_grasp_kt.py
|
ivalab/GraspKpNet
|
d4b6186d74ac82a745d778892742d52a204bd1cf
|
[
"MIT"
] | 2
|
2021-07-10T12:51:29.000Z
|
2022-02-17T06:45:54.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import json
import cv2
import cv2.aruco as aruco
import numpy as np
import sys
import rospy
from std_msgs.msg import Bool
from std_msgs.msg import Float64MultiArray
from sensor_msgs.msg import Image, CameraInfo
from cv_bridge import CvBridge, CvBridgeError
import message_filters
import torch
from external.nms import soft_nms
from opts import opts
from logger import Logger
from utils.utils import AverageMeter
from datasets.dataset_factory import dataset_factory
from detectors.detector_factory import detector_factory
# transformation from the robot base to aruco tag
M_BL = np.array([[1., 0., 0., 0.30000],
[0., 1., 0., 0.32000],
[0., 0., 1., -0.0450],
[0., 0., 0., 1.00000]])
# default transformation from the camera to aruco tag
default_M_CL = np.array([[-0.07134498, -0.99639369, 0.0459293, -0.13825178],
[-0.8045912, 0.03027403, -0.59305689, 0.08434352],
[ 0.58952768, -0.07926594, -0.8038495, 0.66103522],
[ 0., 0., 0., 1. ]]
)
# camera intrinsic matrix of Realsense D435
cameraMatrix = np.array([[607.47165, 0.0, 325.90064],
[0.0, 606.30420, 240.91934],
[0.0, 0.0, 1.0]])
# distortion of Realsense D435
distCoeffs = np.array([0.08847, -0.04283, 0.00134, -0.00102, 0.0])
# initialize GKNet Detector
opt = opts().parse()
Dataset = dataset_factory[opt.dataset]
opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
print(opt)
Detector = detector_factory[opt.task]
detector = Detector(opt)
# Publisher of perception result
pub_res = rospy.Publisher('/result', Float64MultiArray, queue_size=10)
def get_M_CL_info(gray, image_init, visualize=False):
# parameters
markerLength_CL = 0.093
aruco_dict_CL = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
# aruco_dict_CL = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners_CL, ids_CL, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict_CL, parameters=parameters)
# for the first frame, it may contain nothing
if ids_CL is None:
return default_M_CL, None
rvec_CL, tvec_CL, _objPoints_CL = aruco.estimatePoseSingleMarkers(corners_CL[0], markerLength_CL,
cameraMatrix, distCoeffs)
dst_CL, jacobian_CL = cv2.Rodrigues(rvec_CL)
M_CL = np.zeros((4, 4))
M_CL[:3, :3] = dst_CL
M_CL[:3, 3] = tvec_CL
M_CL[3, :] = np.array([0, 0, 0, 1])
if visualize:
# print('aruco is located at mean position (%d, %d)' %(mean_x ,mean_y))
aruco.drawAxis(image_init, cameraMatrix, distCoeffs, rvec_CL, tvec_CL, markerLength_CL)
return M_CL, corners_CL[0][0, :, :]
def aruco_tag_remove(rgb_image, corners):
img_out = rgb_image.copy()
# find the top-left and right-bottom corners
min = sys.maxsize
max = -sys.maxsize
tl_pxl = None
br_pxl = None
for corner in corners:
if corner[0] + corner[1] < min:
min = corner[0] + corner[1]
tl_pxl = [int(corner[0]), int(corner[1])]
if corner[0] + corner[1] > max:
max = corner[0] + corner[1]
br_pxl = [int(corner[0]), int(corner[1])]
# get the replacement pixel value
rep_color = img_out[tl_pxl[0] - 10, tl_pxl[1] - 10, :]
for h in range(tl_pxl[1] - 45, br_pxl[1] + 46):
for w in range(tl_pxl[0] - 45, br_pxl[0] + 46):
img_out[h, w, :] = rep_color
return img_out
def project(pixel, depth_image, M_CL, M_BL, cameraMatrix):
'''
project 2d pixel on the image to 3d by depth info
:param pixel: x, y
:param M_CL: trans from camera to aruco tag
:param cameraMatrix: camera intrinsic matrix
:param depth_image: depth image
:param depth_scale: depth scale that trans raw data to mm
:return:
q_B: 3d coordinate of pixel with respect to base frame
'''
depth = depth_image[pixel[1], pixel[0]]
# if the depth of the detected pixel is 0, check the depth of its neighbors
# by counter-clock wise
nei_range = 1
while depth == 0:
for delta_x in range(-nei_range, nei_range + 1):
for delta_y in range(-nei_range, nei_range + 1):
nei = [pixel[0] + delta_x, pixel[1] + delta_y]
depth = depth_image[nei[1], nei[0]]
if depth != 0:
break
if depth != 0:
break
nei_range += 1
pxl = np.linalg.inv(cameraMatrix).dot(
np.array([pixel[0] * depth, pixel[1] * depth, depth]))
q_C = np.array([pxl[0], pxl[1], pxl[2], 1])
q_L = np.linalg.inv(M_CL).dot(q_C)
q_B = M_BL.dot(q_L)
return q_B
def pre_process(rgb_img, depth_img):
inp_image = rgb_img
inp_image[:, :, 0] = depth_img
inp_image = cv2.resize(inp_image, (256, 256))
return inp_image
def kinect_rgbd_callback(rgb_data, depth_data):
"""
Save raw RGB and depth input from Kinect V1
:param rgb_data: RGB image
:param depth_data: raw depth image
:return: None
"""
try:
cv_rgb = cv_bridge.imgmsg_to_cv2(rgb_data, "bgr8")
cv_depth = cv_bridge.imgmsg_to_cv2(depth_data, "32FC1")
cv_rgb_arr = np.array(cv_rgb, dtype=np.uint8)
cv_depth_arr = np.array(cv_depth, dtype=np.float32)
# cv_depth_arr = np.nan_to_num(cv_depth_arr)
cv2.imshow("Depth", cv_depth)
cv2.imshow("RGB", cv_rgb)
img = cv_rgb_arr.copy()
depth_raw = cv_depth_arr.copy()
gray = img.astype(np.uint8)
depth = (depth_raw * 1000).astype(np.uint8)
# get the current transformation from the camera to aruco tag
M_CL, corners = get_M_CL_info(gray, img, False)
# remove aruco tag from input image to avoid mis-detection
if corners is not None:
img_wo_at = aruco_tag_remove(img, corners)
# replace blue channel with the depth channel
inp_image = pre_process(img_wo_at, depth)
# pass the image into the network
ret = detector.run(inp_image[:, :, :])
ret = ret["results"]
loc_ori = KpsToGrasppose(ret, img, depth_raw, M_CL, M_BL, cameraMatrix)
pub_res.publish(loc_ori)
except CvBridgeError as e:
print(e)
def isWithinRange(pxl, w, h):
x, y = pxl[:]
return w/12. <= x <= 11*w/12 and h/12. <= y <= 11*h/12
def KpsToGrasppose(net_output, rgb_img, depth_map, M_CL, M_BL, cameraMatrix, visualize=True):
kps_pr = []
for category_id, preds in net_output.items():
if len(preds) == 0:
continue
for pred in preds:
kps = pred[:4]
score = pred[-1]
kps_pr.append([kps[0], kps[1], kps[2], kps[3], score])
# no detection
if len(kps_pr) == 0:
return [0, 0, 0, 0]
# sort by the confidence score
kps_pr = sorted(kps_pr, key=lambda x: x[-1], reverse=True)
# select the top 1 grasp prediction within the workspace
res = None
for kp_pr in kps_pr:
f_w, f_h = 640. / 512., 480. / 512.
kp_lm = (int(kp_pr[0] * f_w), int(kp_pr[1] * f_h))
kp_rm = (int(kp_pr[2] * f_w), int(kp_pr[3] * f_h))
if isWithinRange(kp_lm, 640, 480) and isWithinRange(kp_rm, 640, 480):
res = kp_pr
break
if res is None:
return [0, 0, 0, 0]
f_w, f_h = 640./512., 480./512.
kp_lm = (int(res[0]*f_w), int(res[1]*f_h))
kp_rm = (int(res[2]*f_w), int(res[3]*f_h))
center = (int((kp_lm[0]+kp_rm[0])/2), int((kp_lm[1]+kp_rm[1])/2))
kp_lm_3d = project(kp_lm, depth_map, M_CL, M_BL, cameraMatrix)
kp_rm_3d = project(kp_rm, depth_map, M_CL, M_BL, cameraMatrix)
center_3d = project(center, depth_map, M_CL, M_BL, cameraMatrix)
orientation = np.arctan2(kp_rm_3d[1] - kp_lm_3d[1], kp_rm_3d[0] - kp_lm_3d[0])
# motor 7 is clockwise
if orientation > np.pi / 2:
orientation = np.pi - orientation
elif orientation < -np.pi / 2:
orientation = -np.pi - orientation
else:
orientation = -orientation
# compute the open width
dist = np.linalg.norm(kp_lm_3d[:2] - kp_rm_3d[:2])
# draw arrow for left-middle and right-middle key-points
lm_ep = (int(kp_lm[0] + (kp_rm[0] - kp_lm[0]) / 5.), int(kp_lm[1] + (kp_rm[1] - kp_lm[1]) / 5.))
rm_ep = (int(kp_rm[0] + (kp_lm[0] - kp_rm[0]) / 5.), int(kp_rm[1] + (kp_lm[1] - kp_rm[1]) / 5.))
rgb_img = cv2.arrowedLine(rgb_img, kp_lm, lm_ep, (0, 0, 0), 2)
rgb_img = cv2.arrowedLine(rgb_img, kp_rm, rm_ep, (0, 0, 0), 2)
# draw left-middle, right-middle and center key-points
rgb_img = cv2.circle(rgb_img, (int(kp_lm[0]), int(kp_lm[1])), 2, (0, 0, 255), 2)
rgb_img = cv2.circle(rgb_img, (int(kp_rm[0]), int(kp_rm[1])), 2, (0, 0, 255), 2)
rgb_img = cv2.circle(rgb_img, (int(center[0]), int(center[1])), 2, (0, 0, 255), 2)
if visualize:
cv2.namedWindow('visual', cv2.WINDOW_AUTOSIZE)
cv2.imshow('visual', rgb_img)
return [center_3d[0], center_3d[1], center_3d[2], orientation, dist]
if __name__ == '__main__':
# initialize ros node
rospy.init_node("Static_grasping")
# Bridge to convert ROS Image type to OpenCV Image type
cv_bridge = CvBridge()
cv2.WITH_QT = False
# Get camera calibration parameters
cam_param = rospy.wait_for_message('/camera/rgb/camera_info', CameraInfo, timeout=None)
# Subscribe to rgb and depth channel
image_sub = message_filters.Subscriber("/camera/rgb/image_rect_color", Image)
depth_sub = message_filters.Subscriber("/camera/depth_registered/image", Image)
ts = message_filters.ApproximateTimeSynchronizer([image_sub, depth_sub], 1, 0.1)
ts.registerCallback(kinect_rgbd_callback)
rospy.spin()
| 34.249147
| 107
| 0.623019
| 1,558
| 10,035
| 3.801027
| 0.22914
| 0.010132
| 0.006079
| 0.006079
| 0.174941
| 0.125971
| 0.114488
| 0.050321
| 0.018575
| 0.018575
| 0
| 0.066293
| 0.252915
| 10,035
| 293
| 108
| 34.249147
| 0.723623
| 0.163528
| 0
| 0.059459
| 0
| 0
| 0.017741
| 0.009776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037838
| false
| 0
| 0.124324
| 0
| 0.210811
| 0.016216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b8dc2efbc6e5399774e3bdb1583b1ec3d22dca
| 13,278
|
py
|
Python
|
source/utils/augmentations.py
|
dovietchinh/multi-task-classification
|
23a70300a7a800bc982f87902b6aa1faaf91b489
|
[
"RSA-MD"
] | null | null | null |
source/utils/augmentations.py
|
dovietchinh/multi-task-classification
|
23a70300a7a800bc982f87902b6aa1faaf91b489
|
[
"RSA-MD"
] | null | null | null |
source/utils/augmentations.py
|
dovietchinh/multi-task-classification
|
23a70300a7a800bc982f87902b6aa1faaf91b489
|
[
"RSA-MD"
] | null | null | null |
import numpy as np
import cv2
import random
def preprocess(img,img_size,padding=True):
"""[summary]
Args:
img (np.ndarray): images
img_size (int,list,tuple): target size. eg: 224 , (224,224) or [224,224]
padding (bool): padding img before resize. Prevent from image distortion. Defaults to True.
Returns:
images (np.ndarray): images in target size
"""
if padding:
height,width,_ = img.shape
delta = height - width
if delta > 0:
img = np.pad(img,[[0,0],[delta//2,delta//2],[0,0]], mode='constant',constant_values =255)
else:
img = np.pad(img,[[-delta//2,-delta//2],[0,0],[0,0]], mode='constant',constant_values =255)
if isinstance(img_size,int):
img_size = (img_size,img_size)
return cv2.resize(img,img_size)
class RandAugment:
def __init__(self, augment_params):
self.num_layers = augment_params['num_layers']
self.AUGMENT_FUNCTION = {
'fliplr' : RandAugment.augment_fliplr if augment_params.get('fliplr') else None,
'augment_hsv' : RandAugment.augment_hsv if augment_params.get('augment_hsv') else None,
'hist_equalize' : RandAugment.hist_equalize if augment_params.get('hist_equalize') else None,
'solarize' : RandAugment.solarize if augment_params.get('solarize') else None,
'posterize': RandAugment.posterize if augment_params.get('posterize') else None,
'adjust_brightness': RandAugment.adjust_brightness if augment_params.get('adjust_brightness') else None,
'invert' : RandAugment.invert if augment_params.get('invert') else None,
'contrast': RandAugment.contrast if augment_params.get('contrast') else None,
'shearX' : RandAugment.shear_x if augment_params.get('shearX') else None,
'shearY' : RandAugment.shear_y if augment_params.get('shearY') else None,
'translateX' : RandAugment.translate_x if augment_params.get('translateX') else None,
'translateY' : RandAugment.translate_y if augment_params.get('translateY') else None,
'sharpness' : RandAugment.sharpness if augment_params.get('sharpness') else None,
'cutout' : RandAugment.cutout if augment_params.get('cutout') else None,
'rotate' : RandAugment.rotate if augment_params.get('rotate') else None,
'cut_25_left' : RandAugment.cut_25_left if augment_params.get('cut_25_left') else None,
'cut_25_right': RandAugment.cut_25_right if augment_params.get('cut_25_right') else None,
'cut_25_above': RandAugment.cut_25_above if augment_params.get('cut_25_above') else None,
'cut_25_under': RandAugment.cut_25_under if augment_params.get('cut_25_under') else None,
# 'random_crop':random_crop
}
self.ARGS_LIMIT = {
'fliplr' : augment_params.get('fliplr'),
'augment_hsv': augment_params.get('augment_hsv'),
'hist_equalize' : augment_params.get('hist_equalize'),
'solarize' : augment_params.get('solarize'),
'posterize': augment_params.get('posterize'),
'adjust_brightness': augment_params.get('adjust_brightness'),
'invert' : augment_params.get('invert'),
'contrast': augment_params.get('contrast'),
'shearX' : augment_params.get('shearX'),
'shearY' : augment_params.get('shearY'),
'translateX' : augment_params.get('translateX'),
'translateY' : augment_params.get('translateY'),
'sharpness' : augment_params.get('sharpness'),
'cutout' : augment_params.get('cutout'),
'rotate' : augment_params.get('rotate'),
'cut_25_left' : augment_params.get('cut_25_left'),
'cut_25_right': augment_params.get('cut_25_right'),
'cut_25_above': augment_params.get('cut_25_above'),
'cut_25_under': augment_params.get('cut_25_under')
# 'random_crop':random_crop
}
self.policy = list(k for k,v in self.AUGMENT_FUNCTION.items() if v)
# print(self.policy)
def mixup(img1,img2,factor):
img = img1.astype('float')* factor + img2.astype('float') * (1-factor)
img = np.clip(img, 0,255)
img = img.astype('uint8')
return img
def augment_fliplr(img,level):
if random.random() < level:
return np.fliplr(img)
return img
def augment_hsv(im, level=None, hgain=0.015, sgain=0.7, vgain=0.4):
im = im.copy()
# HSV color-space augmentation
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
return im_hsv
def hist_equalize(im, level=None,clahe=True, bgr=True):
im = im.copy()
# Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def solarize(image, level=128):
threshold = level
image = image.copy()
# For each pixel in the image, select the pixel
# if the value is less than the threshold.
# Otherwise, subtract 255 from the pixel.
return np.where(image <= threshold, image, 255 - image)
def posterize(img, level=3):
bits = level
shift = 8 - bits
# img = img >> shift
img = np.left_shift(img,shift)
img = np.right_shift(img,shift)
return img.astype('uint8')
def adjust_brightness(img,level=0.5):
factor = level
degenerate = np.zeros(img.shape,dtype='uint8')
img = RandAugment.mixup(img,degenerate,factor)
return img
def invert(img,level=None):
return 255-img
def contrast(img,factor=0.5):
degenerate = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
return RandAugment.mixup(img,degenerate,factor)
def shear_x(img,level=0.4,mode='reflect'):
M = np.array([[1, level, 0],
[0, 1 , 0],
[0, 0 , 1]],dtype='float')
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
sheared_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return sheared_img
def shear_y(img,level=0.4,mode='reflect'):
M = np.array([[1, 0 , 0],
[level, 1 , 0],
[0, 0 , 1]],dtype='float')
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
sheared_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return sheared_img
def translate_x(img,level,mode='reflect'):
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
translate_pixel = int(width * level)
M = np.array([[1, 0 , translate_pixel],
[level, 1 , 0],
[0, 0 , 1]],dtype='float')
translate_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return translate_img
def translate_y(img,level,mode='reflect'):
height,width,_ = img.shape
option_mode ={
'reflect' : cv2.BORDER_REPLICATE,
'constant' : cv2.BORDER_CONSTANT
}
mode = option_mode[mode]
translate_pixel = int(width * level)
M = np.array([[1, 0 , 0],
[level, 1 , translate_pixel],
[0, 0 , 1]],dtype='float')
translate_img = cv2.warpPerspective(img, M, (width, height), borderMode=mode)
return translate_img
# def sharpness(img,):
# kernel = np.array(
# [[1, 1, 1],
# [1, 5, 1],
# [1, 1, 1]], dtype=tf.float32,
# shape=[3, 3, 1, 1]) / 13.
# cv2.
def cutout(img,level,**kwargs):
img = img.copy()
height,width ,_ = img.shape
padding_size = int(height*level),int(width*level)
value = kwargs.get('value')
cordinate_h = np.random.randint(0,height-padding_size[0])
cordinate_w = np.random.randint(0,width-padding_size[1])
img[cordinate_h:cordinate_h+padding_size[0],cordinate_w:cordinate_w+padding_size[1],:] = 255
return img
def rotate(image, level=45, center = None, scale = 1.0):
angle=level
(h, w) = image.shape[:2]
if center is None:
center = (w / 2, h / 2)
# Perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
rotated = cv2.warpAffine(image, M, (w, h),borderMode=cv2.BORDER_REPLICATE)
return rotated
def cut_25_under(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_height = int((1-ratio)*height)
img_ = img[:new_height,:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_above(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_height = int(ratio*height)
img_ = img[new_height:,:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_right(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_width = int((1-ratio)*width)
img_ = img[:,:new_width,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def cut_25_left(img,level=0.25):
ratio = level
height,width,_ = img.shape
new_width = int(ratio*width)
img_ = img[:,new_width:,:]
height,width,_ = img_.shape
if height > width :
img2 = np.pad(img_,[[0,0],[(height-width)//2,(height-width)//2],[0,0]],mode='constant',constant_values=255)
else:
img2 = np.pad(img_,[[(width-height)//2,(width-height)//2],[0,0],[0,0]],mode='constant',constant_values=255)
img2 = cv2.resize(img2,(224,224))
return img2
def __call__(self,img):
augmenters = random.choices(self.policy, k=self.num_layers)
for augmenter in augmenters:
level = random.random()
# try:
min_arg,max_arg = self.ARGS_LIMIT[augmenter]
level = min_arg + (max_arg - min_arg) * level
img = self.AUGMENT_FUNCTION[augmenter](img,level=level)
# except:
# print(augmenter)
return img
def augmentation_test():
img_org = cv2.imread('test.jpg')
import yaml
augment_params = yaml.safe_load(open('config/default/train_config.yaml')).get('augment_params')
augmenter = RandAugment(augment_params=augment_params)#(num_layers=1)
for _ in range(10000):
img_aug = augmenter(img_org)
img_pad = preprocess(img_aug,224)
# cv2.imshow('a',img_org)
# cv2.imshow('b',img_aug)
# cv2.imshow('c',img_pad)
# if cv2.waitKey(0)==ord('q'):
# exit()
if __name__ =='__main__':
augmentation_test()
| 42.694534
| 131
| 0.581865
| 1,689
| 13,278
| 4.407934
| 0.141504
| 0.07683
| 0.081666
| 0.045937
| 0.391135
| 0.324513
| 0.292814
| 0.292814
| 0.289993
| 0.285158
| 0
| 0.043883
| 0.279184
| 13,278
| 311
| 132
| 42.694534
| 0.733988
| 0.076141
| 0
| 0.348361
| 0
| 0
| 0.082111
| 0.002622
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094262
| false
| 0
| 0.016393
| 0.004098
| 0.204918
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13b9d127851e263bb83cf946e93cc967e190ce5a
| 453
|
py
|
Python
|
CalculatingPi/pi_linear_plot.py
|
davidmallasen/Hello_MPI
|
8a5b5694ffc1515d2bb2dee45355f92f1b68fbed
|
[
"MIT"
] | null | null | null |
CalculatingPi/pi_linear_plot.py
|
davidmallasen/Hello_MPI
|
8a5b5694ffc1515d2bb2dee45355f92f1b68fbed
|
[
"MIT"
] | null | null | null |
CalculatingPi/pi_linear_plot.py
|
davidmallasen/Hello_MPI
|
8a5b5694ffc1515d2bb2dee45355f92f1b68fbed
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
# Read data
size = []
time = []
with open("pi_linear.txt") as file:
for line in file.readlines():
x, y = line.split(',')
size.append(int(x.strip()))
time.append(float(y.strip()))
# Plot data
fig, ax = plt.subplots()
ax.plot(size, time)
ax.set(xlabel='Num. processes', ylabel='Time (s)',
title='Pi linear')
#ax.grid()
fig.savefig("pi_linear.png")
plt.show()
| 19.695652
| 50
| 0.611479
| 69
| 453
| 3.985507
| 0.623188
| 0.087273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.207506
| 453
| 22
| 51
| 20.590909
| 0.766017
| 0.06181
| 0
| 0
| 0
| 0
| 0.137767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13bd80e7104701ce224a3004f95e9aa8f8c681e9
| 2,293
|
py
|
Python
|
myapp/processes/plotter.py
|
cp4cds/cp4cds-wps-template
|
ed170fcee72146dc07c64f76ec71cc289672fd32
|
[
"Apache-2.0"
] | null | null | null |
myapp/processes/plotter.py
|
cp4cds/cp4cds-wps-template
|
ed170fcee72146dc07c64f76ec71cc289672fd32
|
[
"Apache-2.0"
] | null | null | null |
myapp/processes/plotter.py
|
cp4cds/cp4cds-wps-template
|
ed170fcee72146dc07c64f76ec71cc289672fd32
|
[
"Apache-2.0"
] | null | null | null |
from pywps import Process, LiteralInput, ComplexInput, ComplexOutput
from pywps import Format
import logging
LOGGER = logging.getLogger('PYWPS')
import matplotlib
# no X11 server ... must be run first
# https://github.com/matplotlib/matplotlib/issues/3466/
matplotlib.use('Agg')
import matplotlib.pylab as plt
import cartopy.crs as ccrs
from netCDF4 import Dataset
AIR_DS = 'https://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.derived/surface/air.mon.ltm.nc'
def simple_plot(resource, variable=None, output=None):
output = output or 'plot.png'
ds = Dataset(resource)
values = ds.variables[variable]
fig = plt.figure(figsize=(20, 10))
ax = plt.axes(projection=ccrs.PlateCarree())
plt.contourf(values[0, :, :])
ax.stock_img()
ax.coastlines()
plt.colorbar()
fig.savefig(output)
plt.close()
return output
class SimplePlot(Process):
def __init__(self):
inputs = [
ComplexInput('dataset', 'Dataset', supported_formats=[Format('application/x-netcdf')],
default=AIR_DS,
abstract='Example: {0}'.format(AIR_DS)),
LiteralInput('variable', 'Variable', data_type='string',
default='air',
abstract='Please enter the variable name to be plotted, example: air'),
]
outputs = [
ComplexOutput('output', 'Simple Plot', supported_formats=[Format('image/png')],
as_reference=True),
]
super(SimplePlot, self).__init__(
self._handler,
identifier='simple_plot',
title='Simple Plot',
abstract='Returns a nice and simple plot.',
version='1.0',
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
variable = request.inputs['variable'][0].data
output = simple_plot(
resource=request.inputs['dataset'][0].file,
variable=variable)
LOGGER.info("produced output: %s", output)
response.outputs['output'].file = output
response.update_status("simple_plot done", 100)
return response
| 32.295775
| 115
| 0.613171
| 250
| 2,293
| 5.524
| 0.508
| 0.050688
| 0.021723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.267335
| 2,293
| 70
| 116
| 32.757143
| 0.810119
| 0.038814
| 0
| 0
| 0
| 0.017544
| 0.175909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.122807
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13beebf4acd9b21bb28b852b68ff91457137cd72
| 9,767
|
py
|
Python
|
backend/social_quiz.py
|
jmigual/socialQuiz
|
3d9d0980961619b555732899121d8ce6366fa96f
|
[
"MIT"
] | null | null | null |
backend/social_quiz.py
|
jmigual/socialQuiz
|
3d9d0980961619b555732899121d8ce6366fa96f
|
[
"MIT"
] | null | null | null |
backend/social_quiz.py
|
jmigual/socialQuiz
|
3d9d0980961619b555732899121d8ce6366fa96f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
import os.path
import random
import re
from flask import Flask, send_from_directory
from flask import request, abort
from flaskrun.flaskrun import flask_run
import datab.social_database as db
app = Flask(__name__)
# Regular expression to only accept certain files
fileChecker = re.compile(r"(.*\.js|.*\.html|.*\.png|.*\.css|.*\.map)$")
numberOfAnswers = 4
random.seed(7)
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
@app.route('/')
def root():
return index("index2.html")
@app.route('/<path:filename>')
def index(filename):
if fileChecker.match(filename):
return send_from_directory(os.path.join(root_dir(), 'static'), filename)
abort(403)
@app.route('/register')
def register():
# To obtain the mail
email = request.args.get('email')
print(email)
if email is None:
return json.dumps({})
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/join_room')
def join_room():
room_id = request.args.get('room_id')
email = request.args.get('email')
user_id = db.register_or_get_email(email)
db.exec_query("REPLACE INTO room_members (room_id, user_id) VALUES (%s,%s)", [room_id, user_id])
return json.dumps({"id": user_id})
@app.route('/answered_room')
def answered_room():
room_id = request.args.get('room_id')
user_id = request.args.get('user_id')
values = db.exec_query("SELECT a.id "
"FROM answer a INNER JOIN question q "
"WHERE a.question_id = q.id AND q.room_id = %s AND a.user_id= %s",
[room_id, user_id])
return json.dumps({"answered": len(values) > 0})
@app.route('/get_user_id')
def get_user_id():
email = request.args.get('email')
id_user = db.register_or_get_email(email)
return json.dumps({"id": id_user})
@app.route('/create_room')
def create_room():
user_id = request.args.get('user_id')
room_id = db.exec_query("INSERT INTO room (creator) VALUES (%s)", [user_id])
return json.dumps({"id": room_id})
@app.route('/get_rooms')
def get_rooms():
user_id = request.args.get('user_id')
values = db.exec_query("SELECT r.id, r.status FROM room r WHERE r.creator=%s", [user_id])
response = []
for val in values:
response.append({"id": val[0], "status": val[1]})
return json.dumps(response)
@app.route('/fill_room', methods=['POST'])
def fill_room():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"})
else:
room_id = json_data["room_id"]
questions = json_data["question"]
for q in questions:
db.exec_query("INSERT INTO question (room_id, question) VALUES (%s, %s)", [room_id, q])
return json.dumps({"info": "Data received"})
@app.route('/open_room')
def open_room():
room_id = request.args.get('room_id')
print(room_id)
db.exec_query("UPDATE room r SET r.status='started' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been opened successfully", "status": "started"})
@app.route('/close_room')
def close_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='closed' WHERE r.id = %s", [room_id])
return json.dumps({"info": "The room has been closed successfully", "status": "closed"})
@app.route('/finish_room')
def finish_room():
room_id = request.args.get('room_id')
db.exec_query("UPDATE room r SET r.status='finished' WHERE r.id = %s", [room_id])
# for
# SELECT id, COUNT(a.id), COUNT(a.id) FROM Room r INNER JOIN
values = db.exec_query("SELECT u.email , COUNT(qq.id) "
"FROM quiz_question qq "
"INNER JOIN users u ON (qq.asked_user_id = u.id) "
"INNER JOIN room_members rm ON (u.id = rm.user_id) "
"WHERE qq.correct_answer_id = qq.answered_id AND rm.room_id = %s "
"GROUP BY u.email "
"ORDER BY COUNT(qq.id) DESC",
[room_id])
ranking = []
for row in values:
ranking.append({"email": row[0], "correct": row[1]})
return json.dumps({"ranking": ranking})
@app.route('/room_status')
def status_room():
room_id = request.args.get('room_id')
# SELECT status FROM Room WHERE id = 1
values = db.exec_query("SELECT status FROM room WHERE id = %s", [room_id])
return json.dumps({
"status": values[0][0]
})
@app.route('/get_room_questions')
def get_room_question():
room_id = request.args.get('room_id')
values = db.exec_query("SELECT id, question FROM question WHERE room_id = %s", [room_id])
response = []
for val in values:
response.append({"id": val[0], "text": val[1]})
return json.dumps({"questions": response})
@app.route('/post_room_answers', methods=['POST'])
def post_room_answers():
json_data = request.get_json()
if json_data is None:
return json.dumps({"error": "no JSON found"}), 404
user_id = json_data["user_id"]
values = []
for a in json_data["answers"]:
values.append((a["id"], user_id, a["text"]))
print(values[len(values) - 1])
db.exec_many_query("INSERT INTO answer (question_id, user_id, answer) VALUES(%s,%s,%s)", values)
return json.dumps({"info": "Data received"})
@app.route('/get_quiz_question')
def get_question():
room_id = int(request.args.get('room_id'))
user_id = int(request.args.get('user_id'))
possible_questions = db.get_non_answered_questions(room_id, user_id)
possible_users_to_ask = db.get_non_answered_people(room_id, user_id)
question_id = []
asked_about_id = []
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
else:
possible_questions = db.get_all_questions(room_id)
if len(possible_questions) > 0:
question_id = random.sample(possible_questions, 1)
if len(possible_users_to_ask) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
else:
possible_users_to_ask = db.get_all_different_people(room_id, user_id)
if len(possible_questions) > 0:
asked_about_id = random.sample(possible_users_to_ask, 1)
if len(question_id) > 0 and 0 < len(asked_about_id):
quiz_question_id = db.insert_quiz_question(user_id, asked_about_id[0], question_id[0])
other_users = db.get_all_different_people(room_id, asked_about_id[0])
random.shuffle(other_users)
answers = []
(answer_id, text_id) = db.get_answer(question_id[0], asked_about_id[0])
db.exec_query("UPDATE quiz_question SET correct_answer_id=%s WHERE id = %s", [answer_id, quiz_question_id])
answers.append((answer_id, text_id))
if min(numberOfAnswers - 1, len(other_users)) > 0:
for i in range(min(numberOfAnswers - 1, len(other_users))):
(answer_id, text_id) = db.get_answer(question_id[0], other_users[i])
answers.append((answer_id, text_id))
# if commented the first answer will be the correct one
random.shuffle(answers)
answer_json = []
for (answer_id, text_id) in answers:
answer_json.append({"id": answer_id, "text": text_id})
print(quiz_question_id)
# SELECT 'question' FROM 'Question' WHERE 'id' = 3
value = db.exec_query("SELECT id "
"FROM quiz_question "
"WHERE asked_user_id = %s AND about_user_id = %s AND question_id = %s",
[user_id, asked_about_id[0], question_id[0]])
quiz_question_id = value[0][0]
value = db.exec_query("SELECT q.question "
"FROM question q "
"WHERE q.id = %s",
[question_id[0]])
question_text = value[0][0]
value = db.exec_query("SELECT u.email "
"FROM users u "
"WHERE u.id=%s",
[asked_about_id[0]])
user_name = value[0][0]
question_text = "What did %s answer to '%s' ?" % (user_name, question_text)
return json.dumps({
"id": quiz_question_id,
"question": question_text,
"answers": answer_json
})
else:
return json.dumps({"error": "Not available questions for this user in this room"})
@app.route('/post_quiz_answer')
def post_answer():
quiz_question_id = request.args.get('quiz_question_id')
quiz_answer_id = request.args.get('quiz_answer_id')
db.exec_query("UPDATE quiz_question SET answered_id = %s WHERE id = %s", [quiz_answer_id, quiz_question_id])
value = db.exec_query("SELECT qq.answered_id, qq.correct_answer_id, qq.question_id "
"FROM quiz_question qq "
"WHERE qq.id = %s", [quiz_question_id])
answered_id = value[0][0]
correct_answer_id = value[0][1]
question_id = value[0][2]
value = db.exec_query("SELECT a.answer FROM answer a WHERE a.id = %s ", [correct_answer_id])
if len(value) > 0:
text = value[0][0]
else:
text = "something when wrong"
if value is None:
return json.dumps({"error": "Internal server error"})
return json.dumps({
"correct": answered_id == correct_answer_id,
"question": question_id,
"correct_answer": {"id": correct_answer_id, "text": text}
})
if __name__ == '__main__':
flask_run(app)
| 33.221088
| 115
| 0.61288
| 1,377
| 9,767
| 4.109659
| 0.130719
| 0.04241
| 0.053013
| 0.033928
| 0.449549
| 0.35766
| 0.291571
| 0.241739
| 0.186605
| 0.176003
| 0
| 0.008184
| 0.249411
| 9,767
| 293
| 116
| 33.334471
| 0.763743
| 0.031535
| 0
| 0.228972
| 0
| 0.004673
| 0.233887
| 0.006667
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084112
| false
| 0
| 0.037383
| 0.009346
| 0.228972
| 0.018692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13bef8558df71652db939d620d20eb4457b48c53
| 10,282
|
py
|
Python
|
astacus/node/snapshotter.py
|
aiven/astacus
|
2d64e1f33e01d50a41127f41d9da3d1ab0ce0387
|
[
"Apache-2.0"
] | 19
|
2020-06-22T12:17:59.000Z
|
2022-02-18T00:12:17.000Z
|
astacus/node/snapshotter.py
|
aiven/astacus
|
2d64e1f33e01d50a41127f41d9da3d1ab0ce0387
|
[
"Apache-2.0"
] | 7
|
2020-06-24T05:16:20.000Z
|
2022-02-28T07:35:31.000Z
|
astacus/node/snapshotter.py
|
aiven/astacus
|
2d64e1f33e01d50a41127f41d9da3d1ab0ce0387
|
[
"Apache-2.0"
] | 2
|
2020-09-05T21:23:08.000Z
|
2022-02-17T15:02:37.000Z
|
"""
Copyright (c) 2020 Aiven Ltd
See LICENSE for details
"""
from astacus.common import magic, utils
from astacus.common.ipc import SnapshotFile, SnapshotHash, SnapshotState
from astacus.common.progress import increase_worth_reporting, Progress
from pathlib import Path
from typing import Optional
import base64
import hashlib
import logging
import os
import threading
logger = logging.getLogger(__name__)
_hash = hashlib.blake2s
def hash_hexdigest_readable(f, *, read_buffer=1_000_000):
h = _hash()
while True:
data = f.read(read_buffer)
if not data:
break
h.update(data)
return h.hexdigest()
class Snapshotter:
"""Snapshotter keeps track of files on disk, and their hashes.
The hash on disk MAY change, which may require subsequent
incremential snapshot and-or ignoring the files which have changed.
The output to outside is just root object's hash, as well as list
of other hashes which correspond to files referred to within the
file list contained in root object.
Note that any call to public API MUST be made with
snapshotter.lock held. This is because Snapshotter is process-wide
utility that is shared across operations, possibly used from
multiple threads, and the single-operation-only mode of operation
is not exactly flawless (the 'new operation can be started with
old running' is intentional feature but new operation should
eventually replace the old). The lock itself might not need to be
built-in to Snapshotter, but having it there enables asserting its
state during public API calls.
"""
def __init__(self, *, src, dst, globs, parallel):
assert globs # model has empty; either plugin or configuration must supply them
self.src = Path(src)
self.dst = Path(dst)
self.globs = globs
self.relative_path_to_snapshotfile = {}
self.hexdigest_to_snapshotfiles = {}
self.parallel = parallel
self.lock = threading.Lock()
def _list_files(self, basepath: Path):
result_files = set()
for glob in self.globs:
for path in basepath.glob(glob):
if not path.is_file() or path.is_symlink():
continue
relpath = path.relative_to(basepath)
for parent in relpath.parents:
if parent.name == magic.ASTACUS_TMPDIR:
break
else:
result_files.add(relpath)
return sorted(result_files)
def _list_dirs_and_files(self, basepath: Path):
files = self._list_files(basepath)
dirs = {p.parent for p in files}
return sorted(dirs), files
def _add_snapshotfile(self, snapshotfile: SnapshotFile):
old_snapshotfile = self.relative_path_to_snapshotfile.get(snapshotfile.relative_path, None)
if old_snapshotfile:
self._remove_snapshotfile(old_snapshotfile)
self.relative_path_to_snapshotfile[snapshotfile.relative_path] = snapshotfile
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles.setdefault(snapshotfile.hexdigest, []).append(snapshotfile)
def _remove_snapshotfile(self, snapshotfile: SnapshotFile):
assert self.relative_path_to_snapshotfile[snapshotfile.relative_path] == snapshotfile
del self.relative_path_to_snapshotfile[snapshotfile.relative_path]
if snapshotfile.hexdigest:
self.hexdigest_to_snapshotfiles[snapshotfile.hexdigest].remove(snapshotfile)
def _snapshotfile_from_path(self, relative_path):
src_path = self.src / relative_path
st = src_path.stat()
return SnapshotFile(relative_path=relative_path, mtime_ns=st.st_mtime_ns, file_size=st.st_size)
def _get_snapshot_hash_list(self, relative_paths):
same = 0
lost = 0
for relative_path in relative_paths:
old_snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
try:
snapshotfile = self._snapshotfile_from_path(relative_path)
except FileNotFoundError:
lost += 1
if increase_worth_reporting(lost):
logger.debug("#%d. lost - %s disappeared before stat, ignoring", lost, self.src / relative_path)
continue
if old_snapshotfile:
snapshotfile.hexdigest = old_snapshotfile.hexdigest
snapshotfile.content_b64 = old_snapshotfile.content_b64
if old_snapshotfile == snapshotfile:
same += 1
if increase_worth_reporting(same):
logger.debug("#%d. same - %r in %s is same", same, old_snapshotfile, relative_path)
continue
yield snapshotfile
def get_snapshot_hashes(self):
assert self.lock.locked()
return [
SnapshotHash(hexdigest=dig, size=sf[0].file_size) for dig, sf in self.hexdigest_to_snapshotfiles.items() if sf
]
def get_snapshot_state(self):
assert self.lock.locked()
return SnapshotState(root_globs=self.globs, files=sorted(self.relative_path_to_snapshotfile.values()))
def _snapshot_create_missing_directories(self, *, src_dirs, dst_dirs):
changes = 0
for i, relative_dir in enumerate(set(src_dirs).difference(dst_dirs), 1):
dst_path = self.dst / relative_dir
dst_path.mkdir(parents=True, exist_ok=True)
if increase_worth_reporting(i):
logger.debug("#%d. new directory: %r", i, relative_dir)
changes += 1
return changes
def _snapshot_remove_extra_files(self, *, src_files, dst_files):
changes = 0
for i, relative_path in enumerate(set(dst_files).difference(src_files), 1):
dst_path = self.dst / relative_path
snapshotfile = self.relative_path_to_snapshotfile.get(relative_path)
if snapshotfile:
self._remove_snapshotfile(snapshotfile)
dst_path.unlink()
if increase_worth_reporting(i):
logger.debug("#%d. extra file: %r", i, relative_path)
changes += 1
return changes
def _snapshot_add_missing_files(self, *, src_files, dst_files):
existing = 0
disappeared = 0
changes = 0
for i, relative_path in enumerate(set(src_files).difference(dst_files), 1):
src_path = self.src / relative_path
dst_path = self.dst / relative_path
try:
os.link(src=src_path, dst=dst_path, follow_symlinks=False)
except FileExistsError:
# This happens only if snapshot is started twice at
# same time. While it is technically speaking upstream
# error, we rather handle it here than leave
# exceptions not handled.
existing += 1
if increase_worth_reporting(existing):
logger.debug("#%d. %s already existed, ignoring", existing, src_path)
continue
except FileNotFoundError:
disappeared += 1
if increase_worth_reporting(disappeared):
logger.debug("#%d. %s disappeared before linking, ignoring", disappeared, src_path)
continue
if increase_worth_reporting(i - disappeared):
logger.debug("#%d. new file: %r", i - disappeared, relative_path)
changes += 1
return changes
def snapshot(self, *, progress: Optional[Progress] = None):
assert self.lock.locked()
if progress is None:
progress = Progress()
src_dirs, src_files = self._list_dirs_and_files(self.src)
progress.start(1)
if self.src == self.dst:
# The src=dst mode should be used if and only if it is
# known that files will not disappear between snapshot and
# upload steps (e.g. Astacus controls the lifecycle of the
# files within). In that case, there is little point in
# making extra symlinks and we can just use the src
# directory contents as-is.
dst_dirs, dst_files = src_dirs, src_files
else:
progress.add_total(3)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
# Create missing directories
changes = self._snapshot_create_missing_directories(src_dirs=src_dirs, dst_dirs=dst_dirs)
progress.add_success()
# Remove extra files
changes += self._snapshot_remove_extra_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# Add missing files
changes += self._snapshot_add_missing_files(src_files=src_files, dst_files=dst_files)
progress.add_success()
# We COULD also remove extra directories, but it is not
# probably really worth it and due to ignored files it
# actually might not even work.
# Then, create/update corresponding snapshotfile objects (old
# ones were already removed)
dst_dirs, dst_files = self._list_dirs_and_files(self.dst)
snapshotfiles = list(self._get_snapshot_hash_list(dst_files))
progress.add_total(len(snapshotfiles))
def _cb(snapshotfile):
# src may or may not be present; dst is present as it is in snapshot
with snapshotfile.open_for_reading(self.dst) as f:
if snapshotfile.file_size <= magic.EMBEDDED_FILE_SIZE:
snapshotfile.content_b64 = base64.b64encode(f.read()).decode()
else:
snapshotfile.hexdigest = hash_hexdigest_readable(f)
return snapshotfile
def _result_cb(*, map_in, map_out):
self._add_snapshotfile(map_out)
progress.add_success()
return True
changes += len(snapshotfiles)
utils.parallel_map_to(iterable=snapshotfiles, fun=_cb, result_callback=_result_cb, n=self.parallel)
# We initially started with 1 extra
progress.add_success()
return changes
| 41.128
| 122
| 0.642774
| 1,236
| 10,282
| 5.134304
| 0.243528
| 0.054838
| 0.022691
| 0.022691
| 0.228175
| 0.19272
| 0.150173
| 0.12953
| 0.081311
| 0.03057
| 0
| 0.006134
| 0.28652
| 10,282
| 249
| 123
| 41.293173
| 0.858915
| 0.181288
| 0
| 0.25731
| 0
| 0
| 0.025354
| 0
| 0
| 0
| 0
| 0
| 0.02924
| 1
| 0.093567
| false
| 0
| 0.05848
| 0
| 0.22807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c016b99333655007d9a8cc82e9391a0d3526d8
| 6,671
|
py
|
Python
|
colcon_gradle/task/gradle/build.py
|
richiware/colcon-gradle
|
00b121def8c15abd1dca310d0ea4e1f34f98f4d1
|
[
"Apache-2.0"
] | null | null | null |
colcon_gradle/task/gradle/build.py
|
richiware/colcon-gradle
|
00b121def8c15abd1dca310d0ea4e1f34f98f4d1
|
[
"Apache-2.0"
] | null | null | null |
colcon_gradle/task/gradle/build.py
|
richiware/colcon-gradle
|
00b121def8c15abd1dca310d0ea4e1f34f98f4d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Esteve Fernandez
# Licensed under the Apache License, Version 2.0
from distutils import dir_util
import glob
import os
from pathlib import Path
import shutil
from colcon_core.environment import create_environment_scripts
from colcon_core.logging import colcon_logger
from colcon_core.plugin_system import satisfies_version
from colcon_core.shell import create_environment_hook
from colcon_core.shell import get_command_environment
from colcon_core.task import run
from colcon_core.task import TaskExtensionPoint
from colcon_gradle.task.gradle import get_wrapper_executable
from colcon_gradle.task.gradle import GRADLE_EXECUTABLE
from colcon_gradle.task.gradle import has_wrapper_executable
logger = colcon_logger.getChild(__name__)
class GradleBuildTask(TaskExtensionPoint):
"""Build gradle packages."""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(TaskExtensionPoint.EXTENSION_POINT_VERSION, '^1.0')
def _build_file_tree(self, start_path):
out_dirnames = set()
out_filenames = set()
for dirname, dirnames, filenames in os.walk(start_path):
for subdirname in dirnames:
out_dirnames.add(
os.path.relpath(
os.path.join(dirname, subdirname), start=start_path))
for filename in filenames:
out_filenames.add(
os.path.relpath(
os.path.join(dirname, filename), start=start_path))
return (out_dirnames, out_filenames)
def add_arguments(self, *, parser): # noqa: D102
parser.add_argument(
'--gradle-args',
nargs='*', metavar='*', type=str.lstrip,
help='Pass arguments to Gradle projects. '
'Arguments matching other options must be prefixed by a space,\n'
'e.g. --gradle-args " --help"')
parser.add_argument(
'--gradle-task',
help='Run a specific task instead of the default task')
async def build( # noqa: D102
self, *, additional_hooks=None, skip_hook_creation=False
):
pkg = self.context.pkg
args = self.context.args
logger.info(
"Building Gradle package in '{args.path}'".format_map(locals()))
if additional_hooks is None:
additional_hooks = []
# add jars and classes to CLASSPATH with wildcards
# https://docs.oracle.com/javase/8/docs/technotes/tools/windows/classpath.html#A1100762
additional_hooks += create_environment_hook(
'classpath_jars', Path(args.install_base), pkg.name,
'CLASSPATH', os.path.join('share', pkg.name, 'java', '*'),
mode='prepend')
additional_hooks += create_environment_hook(
'classpath_classes', Path(args.install_base), pkg.name,
'CLASSPATH', os.path.join('share', pkg.name, 'java'),
mode='prepend')
try:
env = await get_command_environment(
'build', args.build_base, self.context.dependencies)
except RuntimeError as e:
logger.error(str(e))
return 1
rc = await self._build(args, env)
if rc and rc.returncode:
return rc.returncode
rc = await self._install(args, env)
if rc and rc.returncode:
return rc.returncode
if not skip_hook_creation:
create_environment_scripts(
pkg, args, additional_hooks=additional_hooks)
async def _build(self, args, env):
self.progress('build')
# remove anything on the destination tree but not in the source tree
src_package_src_dir = os.path.join(args.path, 'src')
dst_package_src_dir = os.path.join(args.build_base, 'src')
src_dirnames, src_filenames = self._build_file_tree(
src_package_src_dir)
dst_dirnames, dst_filenames = self._build_file_tree(
dst_package_src_dir)
prune_dirnames = dst_dirnames - src_dirnames
prune_filenames = dst_filenames - src_filenames
for prune_filename in prune_filenames:
os.remove(os.path.join(dst_package_src_dir, prune_filename))
for prune_dirname in prune_dirnames:
if os.path.exists(prune_dirname):
shutil.rmtree(os.path.join(dst_package_src_dir, prune_dirname))
# copy files from the source directory to the build one to avoid
# polluting the latter during the build process
dir_util.copy_tree(args.path, args.build_base, update=1)
# Gradle Executable
if has_wrapper_executable(args):
cmd = [str(get_wrapper_executable(args).absolute())]
elif GRADLE_EXECUTABLE is not None:
cmd = [GRADLE_EXECUTABLE]
else:
raise RuntimeError(
"Could not find 'gradle' or 'wrapper' executable")
# Gradle Task (by default 'assemble')
if args.gradle_task:
cmd += [args.gradle_task]
else:
cmd += ['assemble']
# Gradle Arguments
cmd += (args.gradle_args or [])
cmd += ['--stacktrace']
# Add install_base to environment in GRADLE_INSTALL_PREFIX
env['GRADLE_INSTALL_PREFIX'] = args.install_base
# invoke build step
return await run(
self.context, cmd, cwd=args.build_base, env=env)
async def _install(self, args, env):
self.progress('install')
pkg = self.context.pkg
# remove anything on the destination tree but not in the build tree
bld_package_jar_dir = os.path.join(args.build_base, 'build', 'libs')
dst_package_jar_dir = os.path.join(
args.install_base, 'share', pkg.name, 'java')
os.makedirs(dst_package_jar_dir, exist_ok=True)
bld_dirnames, bld_filenames = self._build_file_tree(
bld_package_jar_dir)
dst_dirnames, dst_filenames = self._build_file_tree(
dst_package_jar_dir)
prune_dirnames = dst_dirnames - bld_dirnames
prune_filenames = dst_filenames - bld_filenames
for prune_filename in prune_filenames:
os.remove(os.path.join(dst_package_jar_dir, prune_filename))
for prune_dirname in prune_dirnames:
if os.path.exists(prune_dirname):
shutil.rmtree(
os.path.join(dst_package_jar_dir, prune_dirname))
for jar in glob.glob(os.path.join(bld_package_jar_dir, '*.jar')):
jar_filename = os.path.basename(jar)
shutil.copy2(jar, os.path.join(dst_package_jar_dir, jar_filename))
| 37.268156
| 95
| 0.644431
| 822
| 6,671
| 4.984185
| 0.244526
| 0.027825
| 0.034171
| 0.023432
| 0.388089
| 0.299243
| 0.269465
| 0.211862
| 0.188919
| 0.188919
| 0
| 0.005739
| 0.268625
| 6,671
| 178
| 96
| 37.477528
| 0.833982
| 0.098486
| 0
| 0.186047
| 0
| 0
| 0.076936
| 0.003505
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0.007752
| 0.116279
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c18896742aca9b72a3db6ff3b991575fad3170
| 5,092
|
py
|
Python
|
model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py
|
eladc-git/model_optimization
|
46d1c893ca23e61d8ef7597184ad2ba6e2ae6e7a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from model_compression_toolkit.common.constants import MIN_THRESHOLD, THRESHOLD
def ste_ceil(x: tf.Tensor) -> tf.Tensor:
"""
Return the ceil values of a tensor.
"""
error = tf.stop_gradient(tf.math.ceil(x) - x)
return error + x
def ste_round(x: tf.Tensor) -> tf.Tensor:
"""
Return the rounded values of a tensor.
"""
error = tf.stop_gradient(tf.math.round(x) - x)
return error + x
def log2(x: tf.Tensor) -> tf.Tensor:
"""
Compute log2 of a tensor.
"""
return tf.math.log(x) / tf.math.log(2.0)
def power_of_two_max(max_tensor: tf.Tensor) -> tf.Tensor:
"""
Compute the power of two threshold for a tensor.
"""
return tf.math.pow(2.0, ste_ceil(log2(tf.maximum(max_tensor, MIN_THRESHOLD))))
def calculate_delta(max_tensor: tf.Tensor,
num_bits: int,
signed: bool) -> tf.Tensor:
"""
Compute the step size for the quantization.
"""
return max_tensor / (2 ** (num_bits - int(signed)))
def adjustable_steps(x: tf.Variable, t: float) -> tf.Tensor:
"""
A function to gradually quantize a float variable to an integer of values [-1, 0 ,1]
Args:
x: input float variable
t: temperature to control quantization
Returns:
semi-quantized variable
"""
return tf.sigmoid(tf.add(x, 1) / t) + tf.sigmoid(tf.add(x, -1) / t) - 1
def ste_clip(x: [tf.Tensor, tf.Variable], max_val=1, min_val=None) -> tf.Tensor:
"""
clip a variable between fixed values such that min_val<=output<=max_val
Args:
x: input variable
max_val: maximum value for clipping
min_val: minimum value for clipping (defaults to -max_val)
Returns:
clipped variable
"""
min_val = -max_val if min_val is None else min_val
return tf.stop_gradient(tf.math.minimum(tf.math.maximum(x, min_val), max_val) - x) + x
def symmetric_quantizer(input_tensor: tf.Tensor,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool) -> tf.Tensor:
"""
Quantize a tensor symmetrically.
Args:
input_tensor: Tensor to quantize.
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(input_tensor / delta)
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * tf.math.minimum(tf.math.maximum(tensor_q, min_int), max_int)
def symmetric_constrained_quantizer(input_tensor: tf.Tensor,
auxvar_tensor: tf.Variable,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool,
max_lsbs_change: int = 1) -> tf.Tensor:
"""
Quantize a tensor symmetrically with maximum LSBs shift.
Args:
input_tensor: Tensor to quantize. values of this tensor are not changed during gptq.
auxvar_tensor: Tensor that manifests the bit shift the weight due to gptq
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
max_lsbs_change: maximum number of LSBs that the auxvar is allowed to change
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(tf.stop_gradient(tf.round(input_tensor / delta)) + ste_clip(auxvar_tensor, max_val=max_lsbs_change))
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * ste_clip(tensor_q, max_val=max_int, min_val=min_int)
| 34.876712
| 125
| 0.626866
| 706
| 5,092
| 4.36119
| 0.23796
| 0.049367
| 0.045469
| 0.041572
| 0.460864
| 0.405002
| 0.326405
| 0.298474
| 0.298474
| 0.286457
| 0
| 0.008041
| 0.267282
| 5,092
| 145
| 126
| 35.117241
| 0.817207
| 0.430676
| 0
| 0.404255
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.191489
| false
| 0
| 0.042553
| 0
| 0.425532
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c2b5d7ceaee0819464ed2dba5f6801b590f3e0
| 9,421
|
py
|
Python
|
pygments/lexers/tnt.py
|
btashton/pygments
|
ceaad0372055ed0064121020fea032fdda429779
|
[
"BSD-2-Clause"
] | 1
|
2020-05-04T00:34:41.000Z
|
2020-05-04T00:34:41.000Z
|
pygments/lexers/tnt.py
|
btashton/pygments
|
ceaad0372055ed0064121020fea032fdda429779
|
[
"BSD-2-Clause"
] | 1
|
2019-03-08T20:01:19.000Z
|
2019-03-08T20:01:19.000Z
|
pygments/lexers/tnt.py
|
btashton/pygments
|
ceaad0372055ed0064121020fea032fdda429779
|
[
"BSD-2-Clause"
] | 1
|
2019-03-08T19:44:02.000Z
|
2019-03-08T19:44:02.000Z
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.tnt
~~~~~~~~~~~~~~~~~~~
Lexer for Typographic Number Theory.
:copyright: Copyright 2019-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer
from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
Punctuation, Error
__all__ = ['TNTLexer']
class TNTLexer(Lexer):
"""
Lexer for Typographic Number Theory, as described in the book
Gödel, Escher, Bach, by Douglas R. Hofstadter,
or as summarized here:
https://github.com/Kenny2github/language-tnt/blob/master/README.md#summary-of-tnt
.. versionadded:: 2.7
"""
name = 'Typographic Number Theory'
aliases = ['tnt']
filenames = ['*.tnt']
cur = []
LOGIC = set('⊃→]&∧^|∨Vv')
OPERATORS = set('+.⋅*')
VARIABLES = set('abcde')
PRIMES = set("'′")
NEGATORS = set('~!')
QUANTIFIERS = set('AE∀∃')
NUMBERS = set('0123456789')
WHITESPACE = set('\t \v\n')
RULES = re.compile('''(?xi)
joining | separation | double-tilde | fantasy\\ rule
| carry[- ]over(?:\\ of)?(?:\\ line)?\\ ([0-9]+) | detachment
| contrapositive | De\\ Morgan | switcheroo
| specification | generalization | interchange
| existence | symmetry | transitivity
| add\\ S | drop\\ S | induction
| axiom\\ ([1-5]) | premise | push | pop
''')
LINENOS = re.compile(r'(?:[0-9]+)(?:(?:, ?|,? and )(?:[0-9]+))*')
COMMENT = re.compile(r'\[[^\n\]]+\]')
def whitespace(self, start, text, required=False):
"""Tokenize whitespace."""
end = start
try:
while text[end] in self.WHITESPACE:
end += 1
except IndexError:
end = len(text)
if required:
assert end != start
if end != start:
self.cur.append((start, Text, text[start:end]))
return end
def variable(self, start, text):
"""Tokenize a variable."""
assert text[start] in self.VARIABLES
end = start+1
while text[end] in self.PRIMES:
end += 1
self.cur.append((start, Name.Variable, text[start:end]))
return end
def term(self, start, text):
"""Tokenize a term."""
if text[start] == 'S': # S...S(...) or S...0
end = start+1
while text[end] == 'S':
end += 1
self.cur.append((start, Number.Integer, text[start:end]))
return self.term(end, text)
if text[start] == '0': # the singleton 0
self.cur.append((start, Number.Integer, text[start]))
return start+1
if text[start] in self.VARIABLES: # a''...
return self.variable(start, text)
if text[start] == '(': # (...+...)
self.cur.append((start, Punctuation, text[start]))
start = self.term(start+1, text)
assert text[start] in self.OPERATORS
self.cur.append((start, Operator, text[start]))
start = self.term(start+1, text)
assert text[start] == ')'
self.cur.append((start, Punctuation, text[start]))
return start+1
raise AssertionError # no matches
def formula(self, start, text):
"""Tokenize a formula."""
if text[start] in '[]': # fantasy push or pop
self.cur.append((start, Keyword, text[start]))
return start+1
if text[start] in self.NEGATORS: # ~<...>
end = start+1
while text[end] in self.NEGATORS:
end += 1
self.cur.append((start, Operator, text[start:end]))
return self.formula(end, text)
if text[start] in self.QUANTIFIERS: # Aa:<...>
self.cur.append((start, Keyword.Declaration, text[start]))
start = self.variable(start+1, text)
assert text[start] == ':'
self.cur.append((start, Punctuation, text[start]))
return self.formula(start+1, text)
if text[start] == '<': # <...&...>
self.cur.append((start, Punctuation, text[start]))
start = self.formula(start+1, text)
assert text[start] in self.LOGIC
self.cur.append((start, Operator, text[start]))
start = self.formula(start+1, text)
assert text[start] == '>'
self.cur.append((start, Punctuation, text[start]))
return start+1
# ...=...
start = self.term(start, text)
assert text[start] == '='
self.cur.append((start, Operator, text[start]))
start = self.term(start+1, text)
return start
def rule(self, start, text):
"""Tokenize a rule."""
match = self.RULES.match(text, start)
assert match is not None
groups = sorted(match.regs[1:]) # exclude whole match
for group in groups:
if group[0] >= 0: # this group matched
self.cur.append((start, Keyword, text[start:group[0]]))
self.cur.append((group[0], Number.Integer,
text[group[0]:group[1]]))
if group[1] != match.end():
self.cur.append((group[1], Keyword,
text[group[1]:match.end()]))
break
else:
self.cur.append((start, Keyword, text[start:match.end()]))
return match.end()
def lineno(self, start, text):
"""Tokenize a line marker."""
end = start
while text[end] not in self.NUMBERS:
end += 1
self.cur.append((start, Punctuation, text[start]))
self.cur.append((start+1, Text, text[start+1:end]))
start = end
match = self.LINENOS.match(text, start)
assert match is not None
assert text[match.end()] == ')'
self.cur.append((match.start(), Number.Integer, match.group(0)))
self.cur.append((match.end(), Punctuation, text[match.end()]))
return match.end() + 1
def error_till_line_end(self, start, text):
"""Mark everything from ``start`` to the end of the line as Error."""
end = start
try:
while text[end] != '\n': # there's whitespace in rules
end += 1
except IndexError:
end = len(text)
if end != start:
self.cur.append((start, Error, text[start:end]))
end = self.whitespace(end, text)
return end
def get_tokens_unprocessed(self, text):
"""Returns a list of TNT tokens."""
self.cur = []
start = end = self.whitespace(0, text)
while start <= end < len(text):
# try line number
while text[end] in self.NUMBERS:
end += 1
if end != start: # actual number present
self.cur.append((start, Number.Integer, text[start:end]))
# whitespace is required after a line number
orig = len(self.cur)
try:
start = end = self.whitespace(end, text, True)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(end, text)
continue
# at this point it could be a comment
match = self.COMMENT.match(text, start)
if match is not None:
self.cur.append((start, Comment, text[start:match.end()]))
start = end = match.end()
# anything after the closing bracket is invalid
start = end = self.error_till_line_end(start, text)
# do not attempt to process the rest
continue
del match
# one formula, possibly containing subformulae
orig = len(self.cur)
try:
start = end = self.formula(start, text)
except AssertionError: # not well-formed
del self.cur[orig:]
while text[end] not in self.WHITESPACE:
end += 1
self.cur.append((start, Error, text[start:end]))
start = end
# skip whitespace after formula
orig = len(self.cur)
try:
start = end = self.whitespace(end, text, True)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
# rule proving this formula a theorem
orig = len(self.cur)
try:
start = end = self.rule(start, text)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
# skip whitespace after rule
start = end = self.whitespace(end, text)
# line marker
if text[start] == '(':
orig = len(self.cur)
try:
start = end = self.lineno(start, text)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
start = end = self.whitespace(start, text)
return self.cur
| 37.987903
| 85
| 0.519478
| 1,070
| 9,421
| 4.558879
| 0.194393
| 0.077491
| 0.071956
| 0.084871
| 0.523575
| 0.415334
| 0.365724
| 0.324313
| 0.241287
| 0.210742
| 0
| 0.011373
| 0.346672
| 9,421
| 247
| 86
| 38.1417
| 0.779854
| 0.128224
| 0
| 0.433673
| 0
| 0
| 0.066263
| 0
| 0
| 0
| 0
| 0
| 0.086735
| 1
| 0.040816
| false
| 0
| 0.015306
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c2bfaf0d362a6c304791ee3c1accf9f548727b
| 1,233
|
py
|
Python
|
contacts/urls.py
|
cheradenine/Django-CRM
|
692572ced050d314c1f880af8b4000c97cbf7440
|
[
"MIT"
] | 2
|
2019-08-30T14:42:45.000Z
|
2019-09-01T01:49:38.000Z
|
contacts/urls.py
|
cheradenine/Django-CRM
|
692572ced050d314c1f880af8b4000c97cbf7440
|
[
"MIT"
] | 8
|
2020-06-05T20:58:52.000Z
|
2022-03-11T23:48:48.000Z
|
contacts/urls.py
|
gthreepwood/Django-CRM
|
12de7e6c622d9d7483c210212c8b7fe3dbde2739
|
[
"MIT"
] | 1
|
2019-05-31T16:06:24.000Z
|
2019-05-31T16:06:24.000Z
|
from django.urls import path
from contacts.views import (
ContactsListView, CreateContactView, ContactDetailView,
UpdateContactView, RemoveContactView,
GetContactsView, AddCommentView, UpdateCommentView,
DeleteCommentView, AddAttachmentsView, DeleteAttachmentsView)
app_name = 'contacts'
urlpatterns = [
path('list/', ContactsListView.as_view(), name='list'),
path('create/', CreateContactView.as_view(), name='add_contact'),
path('<int:pk>/view/', ContactDetailView.as_view(), name="view_contact"),
path('<int:pk>/edit/', UpdateContactView.as_view(), name="edit_contact"),
path('<int:pk>/delete/',
RemoveContactView.as_view(),
name="remove_contact"),
path('get/list/', GetContactsView.as_view(), name="get_contacts"),
path('comment/add/', AddCommentView.as_view(), name="add_comment"),
path('comment/edit/', UpdateCommentView.as_view(), name="edit_comment"),
path('comment/remove/',
DeleteCommentView.as_view(),
name="remove_comment"),
path('attachment/add/',
AddAttachmentsView.as_view(),
name="add_attachment"),
path('attachment/remove/', DeleteAttachmentsView.as_view(),
name="remove_attachment"),
]
| 36.264706
| 77
| 0.691809
| 123
| 1,233
| 6.756098
| 0.284553
| 0.079422
| 0.132371
| 0.046931
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152474
| 1,233
| 33
| 78
| 37.363636
| 0.795215
| 0
| 0
| 0
| 0
| 0
| 0.226277
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c55ddf22e3a453950de6b6142214790512cd06
| 4,269
|
py
|
Python
|
LIM_scripts/func_curry.py
|
Bhare8972/LOFAR-LIM
|
89f25be8c02cb8980c2e237da3eaac279d40a06a
|
[
"MIT"
] | 3
|
2019-04-21T13:13:02.000Z
|
2020-10-15T12:44:23.000Z
|
LIM_scripts/func_curry.py
|
Bhare8972/LOFAR-LIM
|
89f25be8c02cb8980c2e237da3eaac279d40a06a
|
[
"MIT"
] | null | null | null |
LIM_scripts/func_curry.py
|
Bhare8972/LOFAR-LIM
|
89f25be8c02cb8980c2e237da3eaac279d40a06a
|
[
"MIT"
] | 2
|
2018-11-06T18:34:33.000Z
|
2019-04-04T14:16:57.000Z
|
#!/usr/bin/env python3
# Coded by Massimiliano Tomassoli, 2012.
#
# - Thanks to b49P23TIvg for suggesting that I should use a set operation
# instead of repeated membership tests.
# - Thanks to Ian Kelly for pointing out that
# - "minArgs = None" is better than "minArgs = -1",
# - "if args" is better than "if len(args)", and
# - I should use "isdisjoint".
#
def genCur(func, unique = True, minArgs = None):
""" Generates a 'curried' version of a function. """
def g(*myArgs, **myKwArgs):
def f(*args, **kwArgs):
if args or kwArgs: # some more args!
# Allocates data to assign to the next 'f'.
newArgs = myArgs + args
newKwArgs = dict.copy(myKwArgs)
# If unique is True, we don't want repeated keyword arguments.
if unique and not kwArgs.keys().isdisjoint(newKwArgs):
raise ValueError("Repeated kw arg while unique = True")
# Adds/updates keyword arguments.
newKwArgs.update(kwArgs)
# Checks whether it's time to evaluate func.
if minArgs is not None and minArgs <= len(newArgs) + len(newKwArgs):
return func(*newArgs, **newKwArgs) # time to evaluate func
else:
return g(*newArgs, **newKwArgs) # returns a new 'f'
else: # the evaluation was forced
return func(*myArgs, **myKwArgs)
return f
return g
def cur(f, minArgs = None):
return genCur(f, True, minArgs)
def curr(f, minArgs = None):
return genCur(f, False, minArgs)
if __name__ == "__main__":
# Simple Function.
def func(a, b, c, d, e, f, g = 100):
print(a, b, c, d, e, f, g)
# NOTE: '<====' means "this line prints to the screen".
# Example 1.
f = cur(func) # f is a "curried" version of func
c1 = f(1)
c2 = c1(2, d = 4) # Note that c is still unbound
c3 = c2(3)(f = 6)(e = 5) # now c = 3
c3() # () forces the evaluation <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(30)(f = 60)(e = 50) # now c = 30
c4() # () forces the evaluation <====
# it prints "1 2 30 4 50 60 100"
print("\n------\n")
# Example 2.
f = curr(func) # f is a "curried" version of func
# curr = cur with possibly repeated
# keyword args
c1 = f(1, 2)(3, 4)
c2 = c1(e = 5)(f = 6)(e = 10)() # ops... we repeated 'e' because we <====
# changed our mind about it!
# again, () forces the evaluation
# it prints "1 2 3 4 10 6 100"
print("\n------\n")
# Example 3.
f = cur(func, 6) # forces the evaluation after 6 arguments
c1 = f(1, 2, 3) # num args = 3
c2 = c1(4, f = 6) # num args = 5
c3 = c2(5) # num args = 6 ==> evalution <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(5, g = -1) # num args = 7 ==> evaluation <====
# we can specify more than 6 arguments, but
# 6 are enough to force the evaluation
# it prints "1 2 3 4 5 6 -1"
print("\n------\n")
# Example 4.
def printTree(func, level = None):
if level is None:
printTree(cur(func), 0)
elif level == 6:
func(g = '')() # or just func('')()
else:
printTree(func(0), level + 1)
printTree(func(1), level + 1)
printTree(func)
print("\n------\n")
def f2(*args):
print(", ".join(["%3d"%(x) for x in args]))
def stress(f, n):
if n: stress(f(n), n - 1)
else: f() # enough is enough
stress(cur(f2), 100)
| 38.809091
| 84
| 0.444601
| 517
| 4,269
| 3.655706
| 0.31528
| 0.007407
| 0.009524
| 0.026455
| 0.166138
| 0.141799
| 0.115344
| 0.092593
| 0.062963
| 0.043386
| 0
| 0.056203
| 0.437339
| 4,269
| 110
| 85
| 38.809091
| 0.730641
| 0.369876
| 0
| 0.12069
| 0
| 0
| 0.033397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155172
| false
| 0
| 0
| 0.034483
| 0.275862
| 0.189655
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c625629058a335547038a4cdc3550a5d9f78a2
| 3,572
|
py
|
Python
|
Prediction.py
|
khayam-hafezi/CRNN-keras-persian
|
3f99838e5b3b0e0ca79899e25b0648940b7fdfac
|
[
"MIT"
] | null | null | null |
Prediction.py
|
khayam-hafezi/CRNN-keras-persian
|
3f99838e5b3b0e0ca79899e25b0648940b7fdfac
|
[
"MIT"
] | null | null | null |
Prediction.py
|
khayam-hafezi/CRNN-keras-persian
|
3f99838e5b3b0e0ca79899e25b0648940b7fdfac
|
[
"MIT"
] | null | null | null |
import cv2
import itertools, os, time
import numpy as np
from Model import get_Model
from parameter import letters
import argparse
from keras import backend as K
K.set_learning_phase(0)
Region = {"A": "서울 ", "B": "경기 ", "C": "인천 ", "D": "강원 ", "E": "충남 ", "F": "대전 ",
"G": "충북 ", "H": "부산 ", "I": "울산 ", "J": "대구 ", "K": "경북 ", "L": "경남 ",
"M": "전남 ", "N": "광주 ", "O": "전북 ", "P": "제주 "}
Hangul = {"dk": "아", "dj": "어", "dh": "오", "dn": "우", "qk": "바", "qj": "버", "qh": "보", "qn": "부",
"ek": "다", "ej": "더", "eh": "도", "en": "두", "rk": "가", "rj": "거", "rh": "고", "rn": "구",
"wk": "자", "wj": "저", "wh": "조", "wn": "주", "ak": "마", "aj": "머", "ah": "모", "an": "무",
"sk": "나", "sj": "너", "sh": "노", "sn": "누", "fk": "라", "fj": "러", "fh": "로", "fn": "루",
"tk": "사", "tj": "서", "th": "소", "tn": "수", "gj": "허"}
def decode_label(out):
# out : (1, 32, 42)
out_best = list(np.argmax(out[0, 2:], axis=1)) # get max index -> len = 32
out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value
outstr = ''
for i in out_best:
if i < len(letters):
outstr += letters[i]
return outstr
def label_to_hangul(label): # eng -> hangul
region = label[0]
two_num = label[1:3]
hangul = label[3:5]
four_num = label[5:]
try:
region = Region[region] if region != 'Z' else ''
except:
pass
try:
hangul = Hangul[hangul]
except:
pass
return region + two_num + hangul + four_num
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--weight", help="weight file directory",
type=str, default="models/weights.best.hdf5")
parser.add_argument("-t", "--test_img", help="Test image directory",
type=str, default="./DB/test/")
args = parser.parse_args()
# Get CRNN model
model = get_Model(training=False)
try:
model.load_weights(args.weight)
print("...Previous weight data...")
except:
raise Exception("No weight file!")
test_dir =args.test_img
test_imgs = os.listdir(args.test_img)
total = 0
acc = 0
letter_total = 0
letter_acc = 0
start = time.time()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = model.predict(img_pred)
pred_texts = decode_label(net_out_value)
for i in range(min(len(pred_texts), len(test_img[0:-4]))):
if pred_texts[i] == test_img[i]:
letter_acc += 1
letter_total += max(len(pred_texts), len(test_img[0:-4]))
predOk = "True"
if pred_texts == test_img[0:-4]:
acc += 1
else:
predOk = "False"
total += 1
# print('Predicted: %s / True: %s / net_out_value: %s / ' % (label_to_hangul(pred_texts), label_to_hangul(test_img[0:-4])))
print('Predicted: %s / True: %s / predOk: %s ' % (pred_texts, test_img[0:-4], predOk ))
# cv2.rectangle(img, (0,0), (150, 30), (0,0,0), -1)
# cv2.putText(img, pred_texts, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
#cv2.imshow("q", img)
#if cv2.waitKey(0) == 27:
# break
#cv2.destroyAllWindows()
end = time.time()
total_time = (end - start)
print("Time : ",total_time / total)
print("ACC : ", acc / total)
print("letter ACC : ", letter_acc / letter_total)
| 32.472727
| 129
| 0.555151
| 550
| 3,572
| 3.463636
| 0.436364
| 0.047769
| 0.020997
| 0.023622
| 0.102887
| 0.075591
| 0.056693
| 0.056693
| 0
| 0
| 0
| 0.033309
| 0.235162
| 3,572
| 109
| 130
| 32.770642
| 0.663982
| 0.119821
| 0
| 0.096386
| 0
| 0
| 0.124241
| 0.007665
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024096
| false
| 0.024096
| 0.084337
| 0
| 0.13253
| 0.060241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c705bea50bc8d33e8f2c2e57d0e51683dbf67b
| 8,038
|
py
|
Python
|
torcharrow/_interop.py
|
OswinC/torcharrow
|
45a57c45afeffee488c51e3387179292b3504a6c
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/_interop.py
|
OswinC/torcharrow
|
45a57c45afeffee488c51e3387179292b3504a6c
|
[
"BSD-3-Clause"
] | null | null | null |
torcharrow/_interop.py
|
OswinC/torcharrow
|
45a57c45afeffee488c51e3387179292b3504a6c
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow dataframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.DataFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.DataFrame(res, device=device)
def from_pandas_dataframe(
df,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert pandas dataframe to torcharrow dataframe (drops indices).
Parameters
----------
df : Pandas dataframe
dtype : dtype, default None
Data type to force, if None will automatically infer.
columns : array-like
List of column names to extract from df.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import pandas as pd
>>> import torcharrow as ta
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gdf = ta.from_pandas_dataframe(pdf)
>>> gdf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_pandas_series(
pd.Series(df[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in df.columns:
if columns is None or n in columns:
res[n] = from_pandas_series(pd.Series(df[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_pandas_series(series, dtype=None, scope=None, device=""):
""" "
Convert pandas series array to a torcharrow column (drops indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(series.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero copy).
"""
scope = scope or Scope.default
device = device or scope.device
if isinstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif isinstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
def _is_not_str(s):
return not isinstance(s, str)
def _from_numpy_ma(data, mask, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype).with_null()
else:
assert dt.is_primitive_type(dtype)
assert dtype == dt.typeof_np_dtype(data.dtype).with_null()
# TODO if not, adopt the type or?
# Something like ma.array
# np.array([np.nan, np.nan, 3.]).astype(np.int64),
# mask = np.isnan([np.nan, np.nan, 3.]))
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
assert not np.all(np.isnan(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype) or dtype == "object":
assert np.all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError(f"cannot convert masked numpy array of type {data.dtype}")
def _from_numpy_nd(data, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype)
if dtype is None:
dtype = dt.string
else:
assert dt.is_primitive(dtype)
# TODO Check why teh following assert isn't the case
# assert dtype == dt.typeof_np_dtype(data.dtype)
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
mask = np.isnan(data)
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype):
mask = np.vectorize(_is_not_str)(data)
if np.any(mask):
dtype = dtype.with_null()
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError("can not convert numpy array of type {data.dtype,}")
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
def _arrow_scalar_to_py(array):
for i in array:
yield i.as_py()
def _pandatype_to_dtype(t, nullable):
return dt.typeof_nptype(t, nullable)
def _arrowtype_to_dtype(t, nullable):
if pa.types.is_boolean(t):
return dt.Boolean(nullable)
if pa.types.is_int8(t):
return dt.Int8(nullable)
if pa.types.is_int16(t):
return dt.Int16(nullable)
if pa.types.is_int32(t):
return dt.Int32(nullable)
if pa.types.is_int64(t):
return dt.Int64(nullable)
if pa.types.is_float32(t):
return dt.Float32(nullable)
if pa.types.is_float64(t):
return dt.Float64(nullable)
if pa.types.is_list(t):
return List(t.value_type, nullable)
if pa.types.is_struct(t):
return _pandatype_to_dtype(t.to_pandas_dtype(), True)
if pa.types.is_null(t):
return dt.Void()
if pa.types.is_string(t):
return dt.String(nullable)
if pa.types.is_map(t):
return dt.Map(t.item_type, t.key_type, nullable)
raise NotImplementedError("unsupported case")
| 31.155039
| 99
| 0.61682
| 1,115
| 8,038
| 4.336323
| 0.162332
| 0.009928
| 0.022337
| 0.027301
| 0.508583
| 0.396898
| 0.312099
| 0.273216
| 0.264736
| 0.255429
| 0
| 0.010431
| 0.272456
| 8,038
| 257
| 100
| 31.276265
| 0.816347
| 0.24098
| 0
| 0.363636
| 0
| 0
| 0.029337
| 0
| 0
| 0
| 0
| 0.003891
| 0.064935
| 1
| 0.071429
| false
| 0
| 0.045455
| 0.012987
| 0.292208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13c7d55115d132308c18e527238726863764f8de
| 3,883
|
py
|
Python
|
research/gan/image_compression/eval.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
research/gan/image_compression/eval.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
research/gan/image_compression/eval.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a TFGAN trained compression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from research.gan.image_compression import data_provider
from research.gan.image_compression import networks
from research.gan.image_compression import summaries
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', 'testdata', 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s' % (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
if __name__ == '__main__':
app.run(_)
| 38.83
| 80
| 0.702807
| 509
| 3,883
| 5.121807
| 0.373281
| 0.037975
| 0.034522
| 0.023015
| 0.135788
| 0.11239
| 0
| 0
| 0
| 0
| 0
| 0.010473
| 0.188514
| 3,883
| 99
| 81
| 39.222222
| 0.816884
| 0.235385
| 0
| 0
| 0
| 0
| 0.196671
| 0.008492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.137931
| 0
| 0.155172
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ca0867e3b5094c9f6e2eb05d9af7e3c93bd96a
| 16,159
|
py
|
Python
|
kivy/loader.py
|
geojeff/kivy
|
25ab20e5b0e87269531abe1f8cc76bf270bcc755
|
[
"MIT"
] | 1
|
2017-11-15T08:59:23.000Z
|
2017-11-15T08:59:23.000Z
|
kivy/loader.py
|
5y/kivy
|
6bee66946f5434ca92921a8bc9559d82ec955896
|
[
"MIT"
] | null | null | null |
kivy/loader.py
|
5y/kivy
|
6bee66946f5434ca92921a8bc9559d82ec955896
|
[
"MIT"
] | 3
|
2015-07-18T11:03:59.000Z
|
2018-03-17T01:32:42.000Z
|
'''
Asynchronous data loader
========================
This is the Asynchronous Loader. You can use it to load an image
and use it, even if data are not yet available. You must specify a default
loading image for using a such loader::
from kivy import *
image = Loader.image('mysprite.png')
You can also load image from url::
image = Loader.image('http://mysite.com/test.png')
If you want to change the default loading image, you can do::
Loader.loading_image = Image('another_loading.png')
Tweaking the asynchronous loader
--------------------------------
.. versionadded:: 1.6.0
You can now tweak the loader to have a better user experience or more
performance, depending of the images you're gonna to load. Take a look at the
parameters:
- :data:`Loader.num_workers` - define the number of threads to start for
loading images
- :data:`Loader.max_upload_per_frame` - define the maximum image uploads in
GPU to do per frames.
'''
__all__ = ('Loader', 'LoaderBase', 'ProxyImage')
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.cache import Cache
from kivy.core.image import ImageLoader, Image
from kivy.compat import PY2
from collections import deque
from time import sleep
from os.path import join
from os import write, close, unlink, environ
import threading
# Register a cache for loader
Cache.register('kv.loader', limit=500, timeout=60)
class ProxyImage(Image):
'''Image returned by the Loader.image() function.
:Properties:
`loaded`: bool, default to False
It can be True if the image is already cached
:Events:
`on_load`
Fired when the image is loaded and changed
'''
__events__ = ('on_load', )
def __init__(self, arg, **kwargs):
kwargs.setdefault('loaded', False)
super(ProxyImage, self).__init__(arg, **kwargs)
self.loaded = kwargs.get('loaded')
def on_load(self):
pass
class LoaderBase(object):
'''Common base for Loader and specific implementation.
By default, Loader will be the best available loader implementation.
The _update() function is called every 1 / 25.s or each frame if we have
less than 25 FPS.
'''
def __init__(self):
self._loading_image = None
self._error_image = None
self._num_workers = 2
self._max_upload_per_frame = 2
self._paused = False
self._resume_cond = threading.Condition()
self._q_load = deque()
self._q_done = deque()
self._client = []
self._running = False
self._start_wanted = False
self._trigger_update = Clock.create_trigger(self._update)
def __del__(self):
try:
Clock.unschedule(self._update)
except Exception:
pass
def _set_num_workers(self, num):
if num < 2:
raise Exception('Must have at least 2 workers')
self._num_workers = num
def _get_num_workers(self):
return self._num_workers
num_workers = property(_get_num_workers, _set_num_workers)
'''Number of workers to use while loading. (used only if the loader
implementation support it.). This setting impact the loader only at the
beginning. Once the loader is started, the setting has no impact::
from kivy.loader import Loader
Loader.num_workers = 4
The default value is 2 for giving a smooth user experience. You could
increase the number of workers, then all the images will be loaded faster,
but the user will not been able to use the application while loading.
Prior to 1.6.0, the default number was 20, and loading many full-hd images
was blocking completly the application.
.. versionadded:: 1.6.0
'''
def _set_max_upload_per_frame(self, num):
if num is not None and num < 1:
raise Exception('Must have at least 1 image processing per image')
self._max_upload_per_frame = num
def _get_max_upload_per_frame(self):
return self._max_upload_per_frame
max_upload_per_frame = property(_get_max_upload_per_frame,
_set_max_upload_per_frame)
'''Number of image to upload per frame. By default, we'll upload only 2
images in the GPU per frame. If you are uploading many tiny images, you can
easily increase this parameter to 10, or more.
If you are loading multiples Full-HD images, the upload time can be
consequent, and can stuck the application during the upload. If you want a
smooth experience, let the default.
As matter of fact, a Full-HD RGB image will take ~6MB in memory, so it will
take times. If you have activated mipmap=True too, then the GPU must
calculate the mipmap of this big images too, in real time. Then it can be
smart to reduce the :data:`max_upload_per_frame` to 1 or 2. If you get ride
of that (or reduce it a lot), take a look at the DDS format.
.. versionadded:: 1.6.0
'''
def _get_loading_image(self):
if not self._loading_image:
loading_png_fn = join(kivy_data_dir, 'images', 'image-loading.gif')
self._loading_image = ImageLoader.load(filename=loading_png_fn)
return self._loading_image
def _set_loading_image(self, image):
if isinstance(image, basestring):
self._loading_image = ImageLoader.load(filename=image)
else:
self._loading_image = image
loading_image = property(_get_loading_image, _set_loading_image)
'''Image used for loading.
You can change it by doing::
Loader.loading_image = 'loading.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def _get_error_image(self):
if not self._error_image:
error_png_fn = join(
'atlas://data/images/defaulttheme/image-missing')
self._error_image = ImageLoader.load(filename=error_png_fn)
return self._error_image
def _set_error_image(self, image):
if isinstance(image, basestring):
self._error_image = ImageLoader.load(filename=image)
else:
self._error_image = image
error_image = property(_get_error_image, _set_error_image)
'''Image used for error.
You can change it by doing::
Loader.error_image = 'error.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def start(self):
'''Start the loader thread/process'''
self._running = True
def run(self, *largs):
'''Main loop for the loader.'''
pass
def stop(self):
'''Stop the loader thread/process'''
self._running = False
def pause(self):
'''Pause the loader, can be useful during interactions
.. versionadded:: 1.6.0
'''
self._paused = True
def resume(self):
'''Resume the loader, after a :meth:`pause`.
.. versionadded:: 1.6.0
'''
self._paused = False
self._resume_cond.acquire()
self._resume_cond.notify_all()
self._resume_cond.release()
def _wait_for_resume(self):
while self._running and self._paused:
self._resume_cond.acquire()
self._resume_cond.wait(0.25)
self._resume_cond.release()
def _load(self, kwargs):
'''(internal) Loading function, called by the thread.
Will call _load_local() if the file is local,
or _load_urllib() if the file is on Internet
'''
while len(self._q_done) >= (
self.max_upload_per_frame * self._num_workers):
sleep(0.1)
self._wait_for_resume()
filename = kwargs['filename']
load_callback = kwargs['load_callback']
post_callback = kwargs['post_callback']
try:
proto = filename.split(':', 1)[0]
except:
#if blank filename then return
return
if load_callback is not None:
data = load_callback(filename)
elif proto in ('http', 'https', 'ftp', 'smb'):
data = self._load_urllib(filename, kwargs['kwargs'])
else:
data = self._load_local(filename, kwargs['kwargs'])
if post_callback:
data = post_callback(data)
self._q_done.appendleft((filename, data))
self._trigger_update()
def _load_local(self, filename, kwargs):
'''(internal) Loading a local file'''
# With recent changes to CoreImage, we must keep data otherwise,
# we might be unable to recreate the texture afterwise.
return ImageLoader.load(filename, keep_data=True, **kwargs)
def _load_urllib(self, filename, kwargs):
'''(internal) Loading a network file. First download it, save it to a
temporary file, and pass it to _load_local()'''
if PY2:
import urllib2 as urllib_request
else:
import urllib.request as urllib_request
proto = filename.split(':', 1)[0]
if proto == 'smb':
try:
# note: it's important to load SMBHandler every time
# otherwise the data is occasionaly not loaded
from smb.SMBHandler import SMBHandler
except ImportError:
Logger.warning(
'Loader: can not load PySMB: make sure it is installed')
return
import tempfile
data = fd = _out_osfd = None
try:
_out_filename = ''
suffix = '.%s' % (filename.split('.')[-1])
_out_osfd, _out_filename = tempfile.mkstemp(
prefix='kivyloader', suffix=suffix)
if proto == 'smb':
# read from samba shares
fd = urllib_request.build_opener(SMBHandler).open(filename)
else:
# read from internet
fd = urllib_request.urlopen(filename)
idata = fd.read()
fd.close()
fd = None
# write to local filename
write(_out_osfd, idata)
close(_out_osfd)
_out_osfd = None
# load data
data = self._load_local(_out_filename, kwargs)
# FIXME create a clean API for that
for imdata in data._data:
imdata.source = filename
except Exception:
Logger.exception('Failed to load image <%s>' % filename)
# close file when remote file not found or download error
try:
close(_out_osfd)
except OSError:
pass
return self.error_image
finally:
if fd:
fd.close()
if _out_osfd:
close(_out_osfd)
if _out_filename != '':
unlink(_out_filename)
return data
def _update(self, *largs):
'''(internal) Check if a data is loaded, and pass to the client'''
# want to start it ?
if self._start_wanted:
if not self._running:
self.start()
self._start_wanted = False
# in pause mode, don't unqueue anything.
if self._paused:
self._trigger_update()
return
for x in range(self.max_upload_per_frame):
try:
filename, data = self._q_done.pop()
except IndexError:
return
# create the image
image = data # ProxyImage(data)
if not image.nocache:
Cache.append('kv.loader', filename, image)
# update client
for c_filename, client in self._client[:]:
if filename != c_filename:
continue
# got one client to update
client.image = image
client.loaded = True
client.dispatch('on_load')
self._client.remove((c_filename, client))
self._trigger_update()
def image(self, filename, load_callback=None, post_callback=None, **kwargs):
'''Load a image using the Loader. A ProxyImage is returned with a
loading image. You can use it as follows::
from kivy.app import App
from kivy.uix.image import Image
from kivy.loader import Loader
class TestApp(App):
def _image_loaded(self, proxyImage):
if proxyImage.image.texture:
self.image.texture = proxyImage.image.texture
def build(self):
proxyImage = Loader.image("myPic.jpg")
proxyImage.bind(on_load=self._image_loaded)
self.image = Image()
return self.image
TestApp().run()
In order to cancel all background loading, call *Loader.stop()*.
'''
data = Cache.get('kv.loader', filename)
if data not in (None, False):
# found image, if data is not here, need to reload.
return ProxyImage(data,
loading_image=self.loading_image,
loaded=True, **kwargs)
client = ProxyImage(self.loading_image,
loading_image=self.loading_image, **kwargs)
self._client.append((filename, client))
if data is None:
# if data is None, this is really the first time
self._q_load.appendleft({
'filename': filename,
'load_callback': load_callback,
'post_callback': post_callback,
'kwargs': kwargs})
if not kwargs.get('nocache', False):
Cache.append('kv.loader', filename, False)
self._start_wanted = True
self._trigger_update()
else:
# already queued for loading
pass
return client
#
# Loader implementation
#
if 'KIVY_DOC' in environ:
Loader = None
else:
#
# Try to use pygame as our first choice for loader
#
from kivy.compat import queue
from threading import Thread
class _Worker(Thread):
'''Thread executing tasks from a given tasks queue
'''
def __init__(self, pool, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.pool = pool
self.start()
def run(self):
while self.pool.running:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class _ThreadPool(object):
'''Pool of threads consuming tasks from a queue
'''
def __init__(self, num_threads):
super(_ThreadPool, self).__init__()
self.running = True
self.tasks = queue.Queue()
for _ in range(num_threads):
_Worker(self, self.tasks)
def add_task(self, func, *args, **kargs):
'''Add a task to the queue
'''
self.tasks.put((func, args, kargs))
def stop(self):
self.running = False
self.tasks.join()
class LoaderThreadPool(LoaderBase):
def __init__(self):
super(LoaderThreadPool, self).__init__()
self.pool = None
def start(self):
super(LoaderThreadPool, self).start()
self.pool = _ThreadPool(self._num_workers)
Clock.schedule_interval(self.run, 0)
def stop(self):
super(LoaderThreadPool, self).stop()
Clock.unschedule(self.run)
self.pool.stop()
def run(self, *largs):
while self._running:
try:
parameters = self._q_load.pop()
except:
return
self.pool.add_task(self._load, parameters)
Loader = LoaderThreadPool()
Logger.info('Loader: using a thread pool of {} workers'.format(
Loader.num_workers))
| 31.684314
| 80
| 0.590692
| 1,972
| 16,159
| 4.659736
| 0.20284
| 0.027424
| 0.019806
| 0.0222
| 0.140929
| 0.085972
| 0.041136
| 0.018718
| 0.008924
| 0
| 0
| 0.005843
| 0.322173
| 16,159
| 509
| 81
| 31.746562
| 0.833105
| 0.219444
| 0
| 0.242537
| 0
| 0
| 0.046848
| 0.004407
| 0
| 0
| 0
| 0.001965
| 0
| 1
| 0.119403
| false
| 0.018657
| 0.067164
| 0.007463
| 0.276119
| 0.003731
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13caf57909dc254d637b57702b6b442c435e3b48
| 2,327
|
py
|
Python
|
buildsettings.py
|
randomizax/polygon-label
|
5091bd54aee5166d418b240f34d7a5c336685c06
|
[
"MIT"
] | null | null | null |
buildsettings.py
|
randomizax/polygon-label
|
5091bd54aee5166d418b240f34d7a5c336685c06
|
[
"MIT"
] | null | null | null |
buildsettings.py
|
randomizax/polygon-label
|
5091bd54aee5166d418b240f34d7a5c336685c06
|
[
"MIT"
] | null | null | null |
# settings file for builds.
# if you want to have custom builds, copy this file to "localbuildsettings.py" and make changes there.
# possible fields:
# resourceBaseUrl - optional - the URL base for external resources (all resources embedded in standard IITC)
# distUrlBase - optional - the base URL to use for update checks
# buildMobile - optional - if set, mobile builds are built with 'ant'. requires the Android SDK and appropriate mobile/local.properties file configured
# preBuild - optional - an array of strings to run as commands, via os.system, before building the scripts
# postBuild - optional - an array of string to run as commands, via os.system, after all builds are complete
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'randomizax': {
'resourceUrlBase': None,
'distUrlBase': 'https://randomizax.github.io/polygon-label',
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
# defaultBuild - the name of the default build to use if none is specified on the build.py command line
# (in here as an example - it only works in localbuildsettings.py)
#defaultBuild = 'local'
| 42.309091
| 151
| 0.701762
| 323
| 2,327
| 5.055728
| 0.486068
| 0.007348
| 0.007348
| 0.013472
| 0.051439
| 0.051439
| 0.051439
| 0
| 0
| 0
| 0
| 0.013065
| 0.210572
| 2,327
| 54
| 152
| 43.092593
| 0.875885
| 0.784272
| 0
| 0.266667
| 0
| 0
| 0.389474
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cc4a79cdbfb09ff64440ffca1bacc5cc651798
| 4,192
|
py
|
Python
|
thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py
|
heavenlysf/thesis
|
646553c45860f337c91a48ab7f666a174784472f
|
[
"MIT"
] | null | null | null |
thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py
|
heavenlysf/thesis
|
646553c45860f337c91a48ab7f666a174784472f
|
[
"MIT"
] | null | null | null |
thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py
|
heavenlysf/thesis
|
646553c45860f337c91a48ab7f666a174784472f
|
[
"MIT"
] | null | null | null |
import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
RENDER_RATIO = 2
class CakePaddle(pygame.sprite.Sprite):
def __init__(self, speed=12):
# surf is the right-most (largest) tier of the cake
self.surf = pygame.Surface((30 // RENDER_RATIO, 120 // RENDER_RATIO))
self.rect = self.surf.get_rect()
self.surf2 = pygame.Surface((30 // RENDER_RATIO, 80 // RENDER_RATIO))
self.rect2 = self.surf2.get_rect()
self.surf3 = pygame.Surface((30 // RENDER_RATIO, 40 // RENDER_RATIO))
self.rect3 = self.surf3.get_rect()
self.surf4 = pygame.Surface((30 // RENDER_RATIO, 10 // RENDER_RATIO))
self.rect4 = self.surf4.get_rect()
self.speed = speed
def reset(self):
# self.rect is set from envs class
self.rect2.midright = self.rect.midleft
self.rect3.midright = self.rect2.midleft
self.rect4.midright = self.rect3.midleft
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), self.rect)
pygame.draw.rect(screen, (255, 255, 255), self.rect2)
pygame.draw.rect(screen, (255, 255, 255), self.rect3)
pygame.draw.rect(screen, (255, 255, 255), self.rect4)
def update(self, area, action):
# action: 1 - up, 2 - down
movepos = [0, 0]
if action == 1:
movepos[1] = movepos[1] - self.speed
elif action == 2:
movepos[1] = movepos[1] + self.speed
newpos = self.rect.move(movepos)
if area.contains(newpos):
self.rect = newpos
# move other rects too
self.rect2 = self.rect2.move(movepos)
self.rect3 = self.rect3.move(movepos)
self.rect4 = self.rect4.move(movepos)
def process_collision(self, b_rect, dx, dy, b_speed, paddle_type):
"""
Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
ignore paddle type
Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
"""
if self.rect4.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect4.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect4.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect4.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect3.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect3.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect3.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect3.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect2.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect2.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect2.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect2.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
return False, b_rect, b_speed
| 34.644628
| 77
| 0.530057
| 550
| 4,192
| 3.881818
| 0.167273
| 0.08993
| 0.052459
| 0.029977
| 0.499297
| 0.450585
| 0.427166
| 0.427166
| 0.36534
| 0.36534
| 0
| 0.051666
| 0.362834
| 4,192
| 120
| 78
| 34.933333
| 0.74766
| 0.103053
| 0
| 0.372093
| 0
| 0
| 0.008226
| 0.007129
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.023256
| 0
| 0.151163
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cd2bb4addf837c7a09ba721bc230b691ca3e1b
| 2,080
|
py
|
Python
|
src/internal_representation_analysis/decoder/StateDataset.py
|
aidkilda/understanding-drl-navigation
|
0d637c2390a935ec1182d4f2d5165644d98d6404
|
[
"MIT"
] | null | null | null |
src/internal_representation_analysis/decoder/StateDataset.py
|
aidkilda/understanding-drl-navigation
|
0d637c2390a935ec1182d4f2d5165644d98d6404
|
[
"MIT"
] | null | null | null |
src/internal_representation_analysis/decoder/StateDataset.py
|
aidkilda/understanding-drl-navigation
|
0d637c2390a935ec1182d4f2d5165644d98d6404
|
[
"MIT"
] | null | null | null |
import random
from internal_representation_analysis.network import ActorCriticFFNetwork
from internal_representation_analysis.scene_loader import THORDiscreteEnvironment as Environment
from internal_representation_analysis.constants import MINI_BATCH_SIZE
class StateDataset(object):
def __init__(self, states):
self.all_states = states
self.train_set = None
self.validation_set = None
self.test_set = None
def __eq__(self, other):
return self.all_states == other.all_states
def split_datasets(self, seed, all_targets=False, test_target_eq_obs=False):
all_states = self.all_states[:]
random.seed(seed)
random.shuffle(all_states)
if test_target_eq_obs:
for s in all_states:
s.embedding = s.target_eq_obs
if not all_targets:
self.train_set = all_states[0:int(0.6 * len(all_states))]
self.validation_set = all_states[int(0.6 * len(all_states)):int(
0.8 * len(all_states))]
self.test_set = all_states[int(0.8 * len(all_states)):]
else:
unique_state_ids = list(set([s.state_id for s in all_states]))
random.shuffle(unique_state_ids)
train_ids = set(unique_state_ids[0:int(0.6 * len(unique_state_ids))])
val_ids = set(unique_state_ids[int(0.6 * len(unique_state_ids)):int(
0.8 * len(unique_state_ids))])
test_ids = set(unique_state_ids[int(0.8 * len(unique_state_ids)):])
self.train_set = [s for s in all_states if s.state_id in train_ids]
self.validation_set = [s for s in all_states if s.state_id in val_ids]
self.test_set = [s for s in all_states if s.state_id in test_ids]
def shuffle_train_set(self):
random.shuffle(self.train_set)
def get_train_mini_batch(self, start_index):
return self.train_set[start_index:start_index + MINI_BATCH_SIZE]
def filter_by_indexes(self, indexList):
self.all_states = [self.all_states[i] for i in indexList]
| 38.518519
| 96
| 0.669712
| 307
| 2,080
| 4.214984
| 0.218241
| 0.139104
| 0.097372
| 0.034776
| 0.316074
| 0.230294
| 0.210974
| 0.167697
| 0.127512
| 0.127512
| 0
| 0.0114
| 0.240865
| 2,080
| 53
| 97
| 39.245283
| 0.808106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.1
| 0.05
| 0.325
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cd2ed4d981d4b892a318dfe3960eb2c118e4ce
| 3,147
|
py
|
Python
|
test_dataset_model.py
|
ferrine/PerceptualSimilarity
|
2ff66e86b12dbfbc337991def71b09e3b86d4b12
|
[
"BSD-2-Clause"
] | null | null | null |
test_dataset_model.py
|
ferrine/PerceptualSimilarity
|
2ff66e86b12dbfbc337991def71b09e3b86d4b12
|
[
"BSD-2-Clause"
] | null | null | null |
test_dataset_model.py
|
ferrine/PerceptualSimilarity
|
2ff66e86b12dbfbc337991def71b09e3b86d4b12
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
from models import dist_model as dm
from data import data_loader as dl
import argparse
from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_mode", type=str, default="2afc", help="[2afc,jnd]")
parser.add_argument(
"--datasets",
type=str,
nargs="+",
default=[
"val/traditional",
"val/cnn",
"val/superres",
"val/deblur",
"val/color",
"val/frameinterp",
],
help="datasets to test - for jnd mode: [val/traditional],[val/cnn]; for 2afc mode: [train/traditional],[train/cnn],[train/mix],[val/traditional],[val/cnn],[val/color],[val/deblur],[val/frameinterp],[val/superres]",
)
parser.add_argument(
"--model",
type=str,
default="net-lin",
help="distance model type [net-lin] for linearly calibrated net, [net] for off-the-shelf network, [l2] for euclidean distance, [ssim] for Structured Similarity Image Metric",
)
parser.add_argument(
"--net",
type=str,
default="alex",
help="[squeeze], [alex], or [vgg] for network architectures",
)
parser.add_argument(
"--colorspace",
type=str,
default="Lab",
help="[Lab] or [RGB] for colorspace to use for l2, ssim model types",
)
parser.add_argument(
"--batch_size", type=int, default=50, help="batch size to test image patches in"
)
parser.add_argument("--use_gpu", action="store_true", help="turn on flag to use GPU")
parser.add_argument(
"--model_path",
type=str,
default=None,
help="location of model, will default to ./weights/v[version]/[net_name].pth",
)
parser.add_argument(
"--from_scratch", action="store_true", help="model was initialized from scratch"
)
parser.add_argument(
"--train_trunk", action="store_true", help="model trunk was trained/tuned"
)
parser.add_argument(
"--version",
type=str,
default="0.1",
help="v0.1 is latest, v0.0 was original release",
)
opt = parser.parse_args()
if opt.model in ["l2", "ssim"]:
opt.batch_size = 1
# initialize model
model = dm.DistModel()
# model.initialize(model=opt.model,net=opt.net,colorspace=opt.colorspace,model_path=opt.model_path,use_gpu=opt.use_gpu)
model.initialize(
model=opt.model,
net=opt.net,
colorspace=opt.colorspace,
model_path=opt.model_path,
use_gpu=opt.use_gpu,
pnet_rand=opt.from_scratch,
pnet_tune=opt.train_trunk,
version=opt.version,
)
if opt.model in ["net-lin", "net"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.net))
elif opt.model in ["l2", "ssim"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.colorspace))
# embed()
# initialize data loader
for dataset in opt.datasets:
data_loader = dl.CreateDataLoader(
dataset, dataset_mode=opt.dataset_mode, batch_size=opt.batch_size
)
# evaluate model on data
if opt.dataset_mode == "2afc":
(score, results_verbose) = dm.score_2afc_dataset(data_loader, model.forward)
elif opt.dataset_mode == "jnd":
(score, results_verbose) = dm.score_jnd_dataset(data_loader, model.forward)
# print results
print(" Dataset [%s]: %.2f" % (dataset, 100.0 * score))
| 30.553398
| 218
| 0.67048
| 439
| 3,147
| 4.687927
| 0.28246
| 0.048105
| 0.090865
| 0.029155
| 0.237123
| 0.122449
| 0.122449
| 0.122449
| 0.093294
| 0.093294
| 0
| 0.008846
| 0.173816
| 3,147
| 102
| 219
| 30.852941
| 0.782692
| 0.064188
| 0
| 0.170455
| 0
| 0.022727
| 0.36079
| 0.065351
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.056818
| 0
| 0.056818
| 0.034091
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cde4f4ec9916ff8a799bc071fde32fd8bf29b3
| 2,461
|
py
|
Python
|
plotter.py
|
StrangeTcy/pathnet-pytorch
|
58c8088b992ad2f36b843186d93edc872d547c7b
|
[
"BSD-3-Clause"
] | 86
|
2017-04-05T13:03:13.000Z
|
2022-03-28T12:38:48.000Z
|
plotter.py
|
StrangeTcy/pathnet-pytorch
|
58c8088b992ad2f36b843186d93edc872d547c7b
|
[
"BSD-3-Clause"
] | 7
|
2017-04-30T20:59:46.000Z
|
2019-02-09T10:56:40.000Z
|
plotter.py
|
StrangeTcy/pathnet-pytorch
|
58c8088b992ad2f36b843186d93edc872d547c7b
|
[
"BSD-3-Clause"
] | 21
|
2017-04-05T23:42:39.000Z
|
2021-11-17T21:17:22.000Z
|
import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
| 28.952941
| 95
| 0.603007
| 356
| 2,461
| 4
| 0.323034
| 0.029494
| 0.023174
| 0.031601
| 0.205056
| 0.078652
| 0.078652
| 0
| 0
| 0
| 0
| 0.035928
| 0.253555
| 2,461
| 84
| 96
| 29.297619
| 0.739249
| 0
| 0
| 0.220588
| 0
| 0
| 0.101178
| 0.024787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.073529
| 0
| 0.088235
| 0.014706
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ce147cc376d9100195efcbf75606622c35be95
| 2,805
|
py
|
Python
|
platypus/tests/test_operators.py
|
sctiwari/EZFF_ASE
|
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
|
[
"MIT"
] | 2
|
2021-05-10T16:28:50.000Z
|
2021-12-15T04:03:34.000Z
|
platypus/tests/test_operators.py
|
sctiwari/EZFF_ASE
|
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
|
[
"MIT"
] | null | null | null |
platypus/tests/test_operators.py
|
sctiwari/EZFF_ASE
|
94710d4cf778ff2db5e6df0cd6d10d92e1b98afe
|
[
"MIT"
] | null | null | null |
# Copyright 2015-2018 David Hadka
#
# This file is part of Platypus, a Python module for designing and using
# evolutionary algorithms (EAs) and multiobjective evolutionary algorithms
# (MOEAs).
#
# Platypus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Platypus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Platypus. If not, see <http://www.gnu.org/licenses/>.
import unittest
from mock import patch
from ..core import Problem, Solution
from ..types import Permutation
from ..operators import Swap
class TestSwap(unittest.TestCase):
def test_swap10(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(10))
solution = Solution(problem)
solution.variables[0] = list(range(10))
with patch('random.randrange', side_effect=[2, 4]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][2], 4)
self.assertEqual(result.variables[0][4], 2)
self.assertEqual(solution.variables[0][2], 2)
self.assertEqual(solution.variables[0][4], 4)
def test_swap2a(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(2))
solution = Solution(problem)
solution.variables[0] = list(range(2))
with patch('random.randrange', side_effect=[0, 1]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 1)
self.assertEqual(result.variables[0][1], 0)
def test_swap2b(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(2))
solution = Solution(problem)
solution.variables[0] = list(range(2))
with patch('random.randrange', side_effect=[1, 1, 0]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 1)
self.assertEqual(result.variables[0][1], 0)
def test_swap1(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(1))
solution = Solution(problem)
solution.variables[0] = list(range(1))
with patch('random.randrange', side_effect=[0, 0]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 0)
| 35.961538
| 74
| 0.636364
| 362
| 2,805
| 4.90884
| 0.325967
| 0.073157
| 0.082724
| 0.118177
| 0.58188
| 0.549803
| 0.460889
| 0.44063
| 0.384356
| 0.384356
| 0
| 0.039962
| 0.250624
| 2,805
| 78
| 75
| 35.961538
| 0.805423
| 0.278788
| 0
| 0.465116
| 0
| 0
| 0.03192
| 0
| 0
| 0
| 0
| 0
| 0.209302
| 1
| 0.093023
| false
| 0
| 0.116279
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cea3eb1b8257abf1a1958d34086a311a9082d4
| 6,241
|
py
|
Python
|
fusion_net/bilinear_sampler.py
|
ClovisChen/LearningCNN
|
cd9102a3d71f602024558d818039f5b759c92fa5
|
[
"Apache-2.0"
] | null | null | null |
fusion_net/bilinear_sampler.py
|
ClovisChen/LearningCNN
|
cd9102a3d71f602024558d818039f5b759c92fa5
|
[
"Apache-2.0"
] | null | null | null |
fusion_net/bilinear_sampler.py
|
ClovisChen/LearningCNN
|
cd9102a3d71f602024558d818039f5b759c92fa5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-are not covered by the UCLB ACP-A Licence,
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def bilinear_sampler_1d_h(input_images, x_offset, wrap_mode='border', name='bilinear_sampler', **kwargs):
'''
一维双线性采样: x_offset--输入X上偏移量的图
重复函数 : 先将一维的x后面扩展一个维度, 然后在扩展的维度上复制相应的值, 随后将其转成一维的值, exsamples:[1,2,3] --> [1,1,2,2,3,3]
'''
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.tile(tf.expand_dims(x, 1), [1, n_repeats])
return tf.reshape(rep, [-1])
def _interpolate(im, x, y): #插值函数
with tf.variable_scope('_interpolate'):
# handle both texture border types
_edge_size = 0
# 如果包围方式是border, 那么边界长度是1, 在h和w维两侧加一排0
if _wrap_mode == 'border':
_edge_size = 1
im = tf.pad(im, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT')
x = x + _edge_size
y = y + _edge_size
elif _wrap_mode == 'edge':
_edge_size = 0
else:
return None
# 修剪偏移量x, 让它在0到width-1+2*edge_size之间(因为偏移量不能太大,要小于等于padding之后).
x = tf.clip_by_value(x, 0.0, _width_f - 1 + 2 * _edge_size)
# 向下取整x,y然后x加1向上取整x
x0_f = tf.floor(x)
y0_f = tf.floor(y)
x1_f = x0_f + 1
# 将向下取整的x y变成整数, 向上取整的x不能大于padding之后的宽度减1
# cast: 类型转换
x0 = tf.cast(x0_f, tf.int32)
y0 = tf.cast(y0_f, tf.int32)
x1 = tf.cast(tf.minimum(x1_f, _width_f - 1 + 2 * _edge_size), tf.int32)
# 第二维也就是宽度维的宽是padding之后的宽
dim2 = (_width + 2 * _edge_size)
# 第一维也就是图像维的宽是padding之后的分辨率
dim1 = (_width + 2 * _edge_size) * (_height + 2 * _edge_size)
# 计算偏移量索引的基,先得到[0,1,2,...,batch],再将它乘宽度,变成
# [0,dim1,2*dim1,...,batch*dim1],然后重复原图分辨率,变成
# [0,0,......,0,dim1,dim1,......,dim1,2*dim1,2*dim1,......,2*dim1 . . batch * dim, batch * dim, ......, batch * dim]
# 这样就变成基底了,表达的是有batch个图的基
base = _repeat(tf.range(_num_batch) * dim1, _height * _width)
# 将y的偏移乘以dim2,也就是乘以宽度,这样就得到加上y之后的基
# y0是[0,0,...,0,1,1,....,1, . . h + 2 * e, h + 2 * e, ..., h + 2 * e]
# 乘了dim2之后变成
# [0, 0, ..., 0, w+2*e, w+2*e, ..., w+2*e, . . (h + 2 * e) * (w + 2 * e), ..., (h + 2 * e) * (w + 2 * e)]
# 加上base之后得到了考虑了batch,height之后的索引
base_y0 = base + y0 * dim2
# 这个索引加上向上下取整的x索引和向上取整的x索引就得到了现在点的左侧点和右侧点
idx_l = base_y0 + x0
idx_r = base_y0 + x1
# 将图变成[batch*w*h,channel]的形状
im_flat = tf.reshape(im, tf.stack([-1, _num_channels]))
# 利用tf.gather根据左右侧点的索引重新排列图,得到重排之后的左右像素
pix_l = tf.gather(im_flat, idx_l)
pix_r = tf.gather(im_flat, idx_r)
# 计算双线性差值的系数x1-1和x-x0
weight_l = tf.expand_dims(x1_f - x, 1)
weight_r = tf.expand_dims(x - x0_f, 1)
# 利用双线性差值方法计算像素值
return weight_l * pix_l + weight_r * pix_r
# get_disp函数生成视差图后,调用插值函数获得更好的图.
def _transform(input_images, x_offset):
'''
转换函数首先调用meshgrid生成关于X轴和Y轴的索引
exsamples:
假设_width=3,经过linspace(0.0,_width_f-1.0,_width)是[ 0., 1., 2.]。height同理
>>> x = tf.linspace(0.0, 2.0, 3)
>>> sess.run(x)
array([0., 1., 2. ], dtype = float32)
>>> x = tf.linspace(0.0, 2.0, 3)
>>> y = tf.linspace(0.0, 4.0, 5)
>>> x_t, y_t = tf.meshgrid(x, y)
>>> sess.run(x_t)
array([0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.]], dtype=float32)
>>> sess.run(y_t)
array([0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.],
[4., 4., 4.]], dtype=float32)
>>> x_t_flat = tf.reshape(x_t, (1, -1))
>>> y_t_flat = tf.reshape(y_t, (1, -1))
>>> sess.run(x_t_flat)
array([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
>>> sess.run(y_t_flat)
array([[0., 0., 0., 1., 1., 1., 2., 2., 2., 3., 3., 3., 4., 4., 4.]], dtype=float32)
>>> x_t_flat = tf.tile(x_t_flat, tf.stack([2,1]))
>>> sess.run(x_t_flat)
arraay([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.], [0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
>>> x_t_flat = tf.reshape(x_t_flat, (1, -1))
>>> sess.run(x_t_flat)
array([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
'''
with tf.variable_scope('transform'):
# grid of (x_t, y_t, 1), eq (1) in ref [1]
x_t, y_t = tf.meshgrid(tf.linspace(0.0, _width_f - 1.0, _width),
tf.linspace(0.0 , _height_f - 1.0 , _height))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
x_t_flat = tf.tile(x_t_flat, tf.stack([_num_batch, 1]))
y_t_flat = tf.tile(y_t_flat, tf.stack([_num_batch, 1]))
x_t_flat = tf.reshape(x_t_flat, [-1])
y_t_flat = tf.reshape(y_t_flat, [-1])
x_t_flat = x_t_flat + tf.reshape(x_offset, [-1]) * _width_f
input_transformed = _interpolate(input_images, x_t_flat, y_t_flat)
output = tf.reshape(
input_transformed, tf.stack([_num_batch, _height, _width, _num_channels]))
return output
with tf.variable_scope(name):
'''
[num_batch, height, width, num_channels]
'''
_num_batch = tf.shape(input_images)[0]
_height = tf.shape(input_images)[1]
_width = tf.shape(input_images)[2]
_num_channels = tf.shape(input_images)[3]
_height_f = tf.cast(_height, tf.float32)
_width_f = tf.cast(_width, tf.float32)
_wrap_mode = wrap_mode
output = _transform(input_images, x_offset)
return output
| 41.331126
| 155
| 0.497196
| 890
| 6,241
| 3.250562
| 0.198876
| 0.027653
| 0.034221
| 0.035949
| 0.298652
| 0.256481
| 0.192879
| 0.17214
| 0.155202
| 0.140339
| 0
| 0.074531
| 0.324948
| 6,241
| 150
| 156
| 41.606667
| 0.612153
| 0.387438
| 0
| 0.063492
| 0
| 0
| 0.019802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.063492
| false
| 0
| 0.031746
| 0
| 0.174603
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13cebdf8d097569317951da787d81aebd898d39b
| 7,125
|
py
|
Python
|
Supernovae.py
|
adamamiller/iptf16hvw-1
|
d674114e94b5b20398d2e4208b55eb8e2394dce9
|
[
"MIT"
] | null | null | null |
Supernovae.py
|
adamamiller/iptf16hvw-1
|
d674114e94b5b20398d2e4208b55eb8e2394dce9
|
[
"MIT"
] | null | null | null |
Supernovae.py
|
adamamiller/iptf16hvw-1
|
d674114e94b5b20398d2e4208b55eb8e2394dce9
|
[
"MIT"
] | 1
|
2018-08-21T15:17:48.000Z
|
2018-08-21T15:17:48.000Z
|
#import relevant libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import ascii
import json
from IPython.display import display, Image
from specutils import Spectrum1D
from astropy import units
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import scipy.integrate as integrate
from astropy.time import Time
from Supernovae import *
#speed of light (km/s)
c = 3e5
#Define class to hold releveant information for spectra data
class Spectra:
#Initialization function
def __init__(self, Spectra, epoch, z , MJD_max):
'''
Spectra (string) - path to JSON formatted spectra file
epoch (float) - MJD date
z (float) - redshift of corresponding SN
MJD_max (float) - date of B band maximum brightness for SN in MJD
'''
#correct flux for redshift, change wavelength to SN restframe, Normalize flux and store in Spectra
self.data= Unpack_Spectra(Spectra, z)
#store epoch of obseravation
self.epoch = float(epoch)
#store phase of observation
self.phase = float(epoch) - float(MJD_max)
class Lightcurve():
def __init__(self, times, fluxes, error, band):
self.band = band
self.data = pd.DataFrame(list(zip(times, fluxes, error)), columns = ['times', 'flux', 'err'])
#Create Supernovae class to store Spectral objects
class Supernovae(object):
#Initialization function
def __init__(self, name, redshift, maximum):
'''
name (str) - String of SN name
redshift (float) - redshift of SN
maximum (float) - date of B band maximum in MJD
'''
#Store name of SN
self.name = name
#Store redshift of SN
self.redshift = redshift
#Store date of B band maximum brightness
self.maximum = maximum
#initiate empty list to hold Spectra objects
self.spectra = []
self.lightcurves = []
#define function to return spectra closest to given phase
def find_spectra(self, phase1):
'''
Args:
phase1 (float )- phase of interest
Returns:
Spectra object - Spectra object with phase closest to phase1
'''
index = np.argmin([ abs(x.phase - phase1) for x in self.spectra])
return self.spectra[index]
#define function to store new spectra
def store_spectra(self, spectra_object):
'''
Args:
spectra_object (Spectra) - Spectra object to store
'''
#Make sure there are no duplicates and that spectra are sorted by date
if spectra_object in self.spectra:
self.spectra.sort(key= lambda x: x.phase)
print('already exists')
elif spectra_object.epoch in [x.epoch for x in self.spectra]:
self.spectra.sort(key= lambda x: x.phase)
pass
else:
self.spectra.append(spectra_object)
self.spectra.sort(key= lambda x: x.phase)
#define function to store lightcurve
def store_lightcurve(self, lightcurve_object):
if lightcurve_object in self.lightcurves:
print('already exists')
else:
self.lightcurves.append(lightcurve_object)
#define function that converts wavlengths to restframe and corrects flux for redshift, and normalizes flux
def Unpack_Spectra(Spectra, z, normalization = [5000,6000]):
'''
Args:
Spectra - one epoch of spectral data in JSON format from OSN
z (float) - redshift of SN
normalizationn (list) - 2 item list containing boundaries of region used for normalization
Returns:
Pandas DataFrame - 2 column dataframe: wavelength and flux
Flux is corrected for redshift and normalized
Wavelength is converted to SN restframe
'''
#Extract Wavelengths
wavelengths = [float(x[0]) for x in Spectra]
#Extract Fluxes
fluxes = [float(x[1]) for x in Spectra]
#correct fluxes for redshift
fluxes = [correct_flux(flux, z) for flux in fluxes]
#Extract fluxes in normalization range
rel_flux_range = [x for x in Spectra if (float(x[0])>normalization[0]) & (float(x[0])<normalization[1])]
#Make sure there rel_flux_range isnt empty
if len(rel_flux_range) == 0:
#print('No wavelengths in normalization region, not including spectra')
return None
#Calculate average flux in this range
flux_sum = 0
for x in rel_flux_range:
flux_sum += float(x[1])
average_flux = flux_sum / float(len(rel_flux_range))
#Normalize flux
fluxes = [float(flux) / average_flux for flux in fluxes]
#convert wavelength to restframe
wavelengths = [wavelength / float(1 + z) for wavelength in wavelengths]
#store in pandas dataframe
df = pd.DataFrame()
df['Flux'] = fluxes
df['Wavelength'] = wavelengths
return df
def correct_flux(flux_obs, z):
'''
Args:
flux_obs (int) - observed flux
z (int) - redshift
Returns:
int - redshift corrected flux
'''
flux_emit = (z * flux_obs) + flux_obs
return flux_emit
#Define function to get relevant spectra from OSN JSON data file
def create_SN_object(JSON, MJD_max, z):
'''
Function to create Supernovae object for given JSON data file from OSN
Args:
JSON (str) - path to OSN JSON file of interest
MJD_max (int) - number of days past maximum brightness
phase (int) - phase for spectra of interest
Returns:
Supernovae - Supernovae object with spectra list filled
'''
supernovae = Supernovae(str(JSON[0:-5]), z, MJD_max)
#Load OSN json data
file = open('../Data/OSN_data/' + str(JSON))
json_data = json.load(file)
spectra_data = json_data[JSON[0:-5]]['spectra']
spectra_data = np.array(spectra_data)
for i in range(len(spectra_data)):
spectra = Spectra(spectra_data[i]['data'], float(spectra_data[i]['time']) / (1+z), z, MJD_max)
if spectra.data is None:
continue
else:
supernovae.store_spectra(spectra)
return supernovae
#Define function to convert calendar date to MJD
def convert_date_toMJD(date):
'''
Args:
date (str) - string of calendar date (e.g. '2002-8-17')
Returns:
float - MJD value of given calendar date
'''
t = Time(date)
t.format = 'mjd'
return t.value
#Define function to calculate absorption velocities
def calc_abs_velc(restframe, dopplershifted):
'''
Args:
restframe (float) - restframe wavelength of absorption
dopplershifted (float) - dopplershifted wavelength of absorption
Returns:
float - corresponding absorption velocity
'''
velocity = ((restframe - dopplershifted) / np.float(restframe))* c
return velocity
| 28.27381
| 108
| 0.629474
| 890
| 7,125
| 4.958427
| 0.22809
| 0.027419
| 0.021754
| 0.007478
| 0.065715
| 0.046
| 0.026966
| 0.026966
| 0.019941
| 0.019941
| 0
| 0.007921
| 0.291228
| 7,125
| 251
| 109
| 28.386454
| 0.865941
| 0.386667
| 0
| 0.090909
| 0
| 0
| 0.022652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0.011364
| 0.147727
| 0
| 0.386364
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d20c58618bae1fd9184241e64ff9b913dd727d
| 9,313
|
py
|
Python
|
connector/ADBConnector.py
|
qiutongxue/ArknightsAutoHelper
|
6b97b289e9ea4e5e3f39561ef8c2217657f6ff60
|
[
"MIT"
] | 1
|
2020-12-16T06:19:02.000Z
|
2020-12-16T06:19:02.000Z
|
connector/ADBConnector.py
|
qiutongxue/ArknightsAutoHelper
|
6b97b289e9ea4e5e3f39561ef8c2217657f6ff60
|
[
"MIT"
] | null | null | null |
connector/ADBConnector.py
|
qiutongxue/ArknightsAutoHelper
|
6b97b289e9ea4e5e3f39561ef8c2217657f6ff60
|
[
"MIT"
] | null | null | null |
import os
import logging.config
from random import randint
import zlib
import struct
import socket
import time
from PIL import Image
import config
# from config import ADB_ROOT, ADB_HOST, SCREEN_SHOOT_SAVE_PATH, ShellColor, CONFIG_PATH,enable_adb_host_auto_detect, ADB_SERVER
from .ADBClientSession import ADBClientSession
from util.socketutil import recvall
from . import revconn
# from numpy import average, dot, linalg
logger = logging.getLogger(__name__)
def _screencap_to_image(cap):
w, h, pixels = cap
return Image.frombytes('RGBA', (w, h), pixels)
def _ensure_pil_image(imgorfile):
if isinstance(imgorfile, Image.Image):
return imgorfile
return Image.open(imgorfile)
def check_adb_alive():
try:
sess = ADBClientSession(config.ADB_SERVER)
version = int(sess.service('host:version').read_response().decode(), 16)
logger.debug('ADB server version %d', version)
return True
except ConnectionRefusedError:
return False
except RuntimeError:
return False
def ensure_adb_alive():
if check_adb_alive():
return
logger.info('尝试启动 adb server')
import subprocess
adbbin = config.get('device/adb_binary', None)
if adbbin is None:
adb_binaries = ['adb', os.path.join(config.ADB_ROOT, 'adb')]
else:
adb_binaries = [adbbin]
for adbbin in adb_binaries:
try:
logger.debug('trying %r', adbbin)
subprocess.run([adbbin, 'start-server'], check=True)
return True
except FileNotFoundError:
pass
except subprocess.CalledProcessError:
pass
raise OSError("can't start adb server")
class ADBConnector:
def __init__(self, adb_serial=None):
# os.chdir(ADB_ROOT)
self.ADB_ROOT = config.ADB_ROOT
self.adb_serial = adb_serial
self.host_session_factory = lambda: ADBClientSession(config.ADB_SERVER)
self.rch = None
if self.adb_serial is None:
self.adb_serial = self.__adb_device_name_detector()
self.device_session_factory = lambda: self.host_session_factory().device(self.adb_serial)
self.cache_screenshot = config.get('device/cache_screenshot', True)
self.last_screenshot_timestamp = 0
self.last_screenshot_duration = 0
self.last_screenshot = None
if config.get('device/try_emulator_enhanced_mode', True):
loopbacks = self._detect_loopbacks()
if len(loopbacks):
logger.debug('possible loopback addresses: %s', repr(loopbacks))
self.rch = revconn.ReverseConnectionHost()
self.rch.start()
if self._test_reverse_connection(loopbacks):
logger.info('正在使用模拟器优化模式')
self.screencap = self._reverse_connection_screencap
else:
self.rch.stop()
else:
self.loopback = None
def __del__(self):
if self.rch and self.rch.is_alive():
self.rch.stop()
def __adb_device_name_detector(self):
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
if len(devices) == 0:
auto_connect = config.get('device/adb_auto_connect', None)
if auto_connect is not None:
logger.info('没有已连接设备,尝试连接 %s', auto_connect)
try:
self.host_session_factory().disconnect(auto_connect)
except:
pass
self.host_session_factory().connect(auto_connect)
else:
raise RuntimeError('找不到可用设备')
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
always_use_device = config.get('device/adb_always_use_device', None)
if always_use_device is not None:
if always_use_device not in (x[0] for x in devices):
raise RuntimeError('设备 %s 未连接' % always_use_device)
return always_use_device
if len(devices) == 1:
device_name = devices[0][0]
elif len(devices) > 1:
logger.info("检测到多台设备")
num = 0
while True:
try:
num = int(input("请输入序号选择设备: "))
if not 0 <= num < len(devices):
raise ValueError()
break
except ValueError:
logger.error("输入不合法,请重新输入")
device_name = devices[num][0]
else:
raise RuntimeError('找不到可用设备')
logger.info("确认设备名称:" + device_name)
return device_name
def run_device_cmd(self, cmd, DEBUG_LEVEL=2):
output = self.device_session_factory().exec(cmd)
logger.debug("command: %s", cmd)
logger.debug("output: %s", repr(output))
return output
def get_sub_screen(self, image, screen_range):
return image.crop(
(
screen_range[0][0],
screen_range[0][1],
screen_range[0][0] + screen_range[1][0],
screen_range[0][1] + screen_range[1][1]
)
)
def _detect_loopbacks(self):
board = self.device_session_factory().exec('getprop ro.product.board')
if b'goldfish' in board:
return ['10.0.2.2']
modules = self.device_session_factory().exec('grep -o vboxguest /proc/modules')
if b'vboxguest' in modules:
arp = self.device_session_factory().exec('cat /proc/net/arp')
return [x[:x.find(b' ')].decode() for x in arp.splitlines()[1:]]
return []
def _test_reverse_connection(self, loopbacks):
for addr in loopbacks:
logger.debug('testing loopback address %s', addr)
future = self.rch.register_cookie()
with future:
cmd = 'echo -n %sOKAY | nc -w 1 %s %d' % (future.cookie.decode(), addr, self.rch.port)
logger.debug(cmd)
control_sock = self.device_session_factory().exec_stream(cmd)
with control_sock:
conn = future.get(2)
if conn is not None:
data = recvall(conn)
conn.close()
if data == b'OKAY':
self.loopback = addr
logger.debug('found loopback address %s', addr)
return True
return False
def screencap_png(self):
"""returns PNG bytes"""
s = self.device_session_factory().exec_stream('screencap -p')
data = recvall(s, 4194304)
return data
def screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
s = self.device_session_factory().exec_stream('screencap|gzip -1')
data = recvall(s, 4194304)
s.close()
data = zlib.decompress(data, zlib.MAX_WBITS | 16, 8388608)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:])
def _reverse_connection_screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
future = self.rch.register_cookie()
with future:
control_sock = self.device_session_factory().exec_stream('(echo -n %s; screencap) | nc %s %d' % (future.cookie.decode(), self.loopback, self.rch.port))
with control_sock:
with future.get() as conn:
data = recvall(conn, 8388608, True)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:].tobytes())
def screenshot(self, cached=True):
t0 = time.monotonic()
if cached and self.cache_screenshot:
if self.last_screenshot is not None and t0 - self.last_screenshot_timestamp < self.last_screenshot_duration:
return self.last_screenshot
rawcap = self.screencap()
img = _screencap_to_image(rawcap)
t1 = time.monotonic()
self.last_screenshot_timestamp = t1
self.last_screenshot_duration = t1 - t0
self.last_screenshot = img
return img
def touch_swipe2(self, origin, movement, duration=None):
# sleep(1)
x1, y1, x2, y2 = origin[0], origin[1], origin[0] + movement[0], origin[1] + movement[1]
logger.debug("滑动初始坐标:({},{}); 移动距离dX:{}, dy:{}".format(*origin, *movement))
command = "input swipe {} {} {} {} ".format(x1, y1, x2, y2)
if duration is not None:
command += str(int(duration))
self.run_device_cmd(command)
def touch_tap(self, XY=None, offsets=None):
# sleep(10)
# sleep(0.5)
if offsets is not None:
final_X = XY[0] + randint(-offsets[0], offsets[0])
final_Y = XY[1] + randint(-offsets[1], offsets[1])
else:
final_X = XY[0] + randint(-1, 1)
final_Y = XY[1] + randint(-1, 1)
# 如果你遇到了问题,可以把这百年输出并把日志分享到群里。
logger.debug("点击坐标:({},{})".format(final_X, final_Y))
command = "input tap {} {}".format(final_X,
final_Y)
self.run_device_cmd(command)
| 36.521569
| 163
| 0.57962
| 1,101
| 9,313
| 4.722979
| 0.228883
| 0.040385
| 0.034615
| 0.041538
| 0.200192
| 0.125192
| 0.120385
| 0.096538
| 0.062308
| 0.062308
| 0
| 0.017618
| 0.311285
| 9,313
| 254
| 164
| 36.665354
| 0.793109
| 0.040696
| 0
| 0.186603
| 0
| 0
| 0.080036
| 0.012028
| 0
| 0
| 0
| 0
| 0.009569
| 1
| 0.08134
| false
| 0.014354
| 0.062201
| 0.004785
| 0.253589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d2e35293cf4b36df0d8aa584ec383e80a8a174
| 263
|
py
|
Python
|
main.py
|
Gloriel621/MgallManager
|
7d5c02ab6bdc2f6c6922d4a7e021faef33d868bb
|
[
"MIT"
] | 9
|
2021-12-22T11:37:23.000Z
|
2022-03-09T02:25:35.000Z
|
main.py
|
Gloriel621/MgallManager
|
7d5c02ab6bdc2f6c6922d4a7e021faef33d868bb
|
[
"MIT"
] | 4
|
2021-12-16T14:26:01.000Z
|
2022-02-16T02:05:41.000Z
|
main.py
|
Gloriel621/MgallManager
|
7d5c02ab6bdc2f6c6922d4a7e021faef33d868bb
|
[
"MIT"
] | 1
|
2021-12-22T12:59:57.000Z
|
2021-12-22T12:59:57.000Z
|
import sys
from PyQt5.QtWidgets import QApplication
from gui import MgallManager
def main():
app = QApplication(sys.argv)
ex = MgallManager()
app.aboutToQuit.connect(ex.ExitHandler)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 16.4375
| 43
| 0.69962
| 32
| 263
| 5.46875
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004695
| 0.190114
| 263
| 15
| 44
| 17.533333
| 0.816901
| 0
| 0
| 0
| 0
| 0
| 0.030418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d3859368c4908cf2a507d1bd62d989795acc1a
| 54,009
|
py
|
Python
|
pysc2/lib/actions.py
|
javierrcc522/starcraft2_api_machineLear
|
5833ba1344ab5445c4f09fafc33e6058070ebe6c
|
[
"Apache-2.0"
] | 2
|
2020-04-30T09:07:25.000Z
|
2021-03-21T22:58:22.000Z
|
pysc2/lib/actions.py
|
javierrcc522/starcraft2_api_machineLear
|
5833ba1344ab5445c4f09fafc33e6058070ebe6c
|
[
"Apache-2.0"
] | null | null | null |
pysc2/lib/actions.py
|
javierrcc522/starcraft2_api_machineLear
|
5833ba1344ab5445c4f09fafc33e6058070ebe6c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of types and actions for SC2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from pysc2.lib import point
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
def no_op(action):
del action
def move_camera(action, minimap):
"""Move the camera."""
minimap.assign_to(action.action_feature_layer.camera_move.center_minimap)
def select_point(action, select_point_act, screen):
"""Select a unit at a point."""
select = action.action_feature_layer.unit_selection_point
screen.assign_to(select.selection_screen_coord)
select.type = select_point_act
def select_rect(action, select_add, screen, screen2):
"""Select units within a rectangle."""
select = action.action_feature_layer.unit_selection_rect
out_rect = select.selection_screen_coord.add()
screen_rect = point.Rect(screen, screen2)
screen_rect.tl.assign_to(out_rect.p0)
screen_rect.br.assign_to(out_rect.p1)
select.selection_add = bool(select_add)
def select_idle_worker(action, select_worker):
"""Select an idle worker."""
action.action_ui.select_idle_worker.type = select_worker
def select_army(action, select_add):
"""Select the entire army."""
action.action_ui.select_army.selection_add = select_add
def select_warp_gates(action, select_add):
"""Select all warp gates."""
action.action_ui.select_warp_gates.selection_add = select_add
def select_larva(action):
"""Select all larva."""
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
def select_unit(action, select_unit_act, select_unit_id):
"""Select a specific unit from the multi-unit selection."""
select = action.action_ui.multi_panel
select.type = select_unit_act
select.unit_index = select_unit_id
def control_group(action, control_group_act, control_group_id):
"""Act on a control group, selecting, setting, etc."""
select = action.action_ui.control_group
select.action = control_group_act
select.control_group_index = control_group_id
def unload(action, unload_id):
"""Unload a unit from a transport/bunker/nydus/etc."""
action.action_ui.cargo_panel.unit_index = unload_id
def build_queue(action, build_queue_id):
"""Cancel a unit in the build queue."""
action.action_ui.production_panel.unit_index = build_queue_id
def cmd_quick(action, ability_id, queued):
"""Do a quick command like 'Stop' or 'Stim'."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
def cmd_screen(action, ability_id, queued, screen):
"""Do a command that needs a point on the screen."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
screen.assign_to(action_cmd.target_screen_coord)
def cmd_minimap(action, ability_id, queued, minimap):
"""Do a command that needs a point on the minimap."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
minimap.assign_to(action_cmd.target_minimap_coord)
def autocast(action, ability_id):
"""Toggle autocast."""
action.action_ui.toggle_autocast.ability_id = ability_id
class ArgumentType(collections.namedtuple(
"ArgumentType", ["id", "name", "sizes", "fn"])):
"""Represents a single argument type.
Attributes:
id: The argument id. This is unique.
name: The name of the argument, also unique.
sizes: The max+1 of each of the dimensions this argument takes.
fn: The function to convert the list of integers into something more
meaningful to be set in the protos to send to the game.
"""
__slots__ = ()
def __str__(self):
return "%s/%s %s" % (self.id, self.name, list(self.sizes))
@classmethod
def enum(cls, options):
"""Create an ArgumentType where you choose one of a set of known values."""
return cls(-1, "<none>", (len(options),), lambda a: options[a[0]])
@classmethod
def scalar(cls, value):
"""Create an ArgumentType with a single scalar in range(value)."""
return cls(-1, "<none>", (value,), lambda a: a[0])
@classmethod
def point(cls): # No range because it's unknown at this time.
"""Create an ArgumentType that is represented by a point.Point."""
return cls(-1, "<none>", (0, 0), lambda a: point.Point(*a).floor())
@classmethod
def spec(cls, id_, name, sizes):
"""Create an ArgumentType to be used in ValidActions."""
return cls(id_, name, sizes, None)
class Arguments(collections.namedtuple("Arguments", [
"screen", "minimap", "screen2", "queued", "control_group_act",
"control_group_id", "select_point_act", "select_add", "select_unit_act",
"select_unit_id", "select_worker", "build_queue_id", "unload_id"])):
"""The full list of argument types.
Take a look at TYPES and FUNCTION_TYPES for more details.
Attributes:
screen: A point on the screen.
minimap: A point on the minimap.
screen2: The second point for a rectangle. This is needed so that no
function takes the same type twice.
queued: Whether the action should be done now or later.
control_group_act: What to do with the control group.
control_group_id: Which control group to do it with.
select_point_act: What to do with the unit at the point.
select_add: Whether to add the unit to the selection or replace it.
select_unit_act: What to do when selecting a unit by id.
select_unit_id: Which unit to select by id.
select_worker: What to do when selecting a worker.
build_queue_id: Which build queue index to target.
unload_id: Which unit to target in a transport/nydus/command center.
"""
___slots__ = ()
@classmethod
def types(cls, **kwargs):
"""Create an Arguments of the possible Types."""
named = {name: type_._replace(id=Arguments._fields.index(name), name=name)
for name, type_ in six.iteritems(kwargs)}
return cls(**named)
# The list of known types.
TYPES = Arguments.types(
screen=ArgumentType.point(),
minimap=ArgumentType.point(),
screen2=ArgumentType.point(),
queued=ArgumentType.enum([False, True]), # (now vs add to queue)
control_group_act=ArgumentType.enum([
sc_ui.ActionControlGroup.Recall,
sc_ui.ActionControlGroup.Set,
sc_ui.ActionControlGroup.Append,
sc_ui.ActionControlGroup.SetAndSteal,
sc_ui.ActionControlGroup.AppendAndSteal,
]),
control_group_id=ArgumentType.scalar(10),
select_point_act=ArgumentType.enum([
sc_spatial.ActionSpatialUnitSelectionPoint.Select,
sc_spatial.ActionSpatialUnitSelectionPoint.Toggle,
sc_spatial.ActionSpatialUnitSelectionPoint.AllType,
sc_spatial.ActionSpatialUnitSelectionPoint.AddAllType,
]),
select_add=ArgumentType.enum([False, True]), # (select vs select_add)
select_unit_act=ArgumentType.enum([
sc_ui.ActionMultiPanel.SingleSelect,
sc_ui.ActionMultiPanel.DeselectUnit,
sc_ui.ActionMultiPanel.SelectAllOfType,
sc_ui.ActionMultiPanel.DeselectAllOfType,
]),
select_unit_id=ArgumentType.scalar(500), # Depends on current selection.
select_worker=ArgumentType.enum([
sc_ui.ActionSelectIdleWorker.Set,
sc_ui.ActionSelectIdleWorker.Add,
sc_ui.ActionSelectIdleWorker.All,
sc_ui.ActionSelectIdleWorker.AddAll,
]),
build_queue_id=ArgumentType.scalar(10), # Depends on current build queue.
unload_id=ArgumentType.scalar(500), # Depends on the current loaded units.
)
# Which argument types do each function need?
FUNCTION_TYPES = {
no_op: [],
move_camera: [TYPES.minimap],
select_point: [TYPES.select_point_act, TYPES.screen],
select_rect: [TYPES.select_add, TYPES.screen, TYPES.screen2],
select_unit: [TYPES.select_unit_act, TYPES.select_unit_id],
control_group: [TYPES.control_group_act, TYPES.control_group_id],
select_idle_worker: [TYPES.select_worker],
select_army: [TYPES.select_add],
select_warp_gates: [TYPES.select_add],
select_larva: [],
unload: [TYPES.unload_id],
build_queue: [TYPES.build_queue_id],
cmd_quick: [TYPES.queued],
cmd_screen: [TYPES.queued, TYPES.screen],
cmd_minimap: [TYPES.queued, TYPES.minimap],
autocast: [],
}
# Which ones need an ability?
ABILITY_FUNCTIONS = {cmd_quick, cmd_screen, cmd_minimap, autocast}
# Which ones require a point?
POINT_REQUIRED_FUNCS = {
False: {cmd_quick, autocast},
True: {cmd_screen, cmd_minimap, autocast}}
always = lambda _: True
class Function(collections.namedtuple(
"Function", ["id", "name", "ability_id", "general_id", "function_type",
"args", "avail_fn"])):
"""Represents a function action.
Attributes:
id: The function id, which is what the agent will use.
name: The name of the function. Should be unique.
ability_id: The ability id to pass to sc2.
general_id: 0 for normal abilities, and the ability_id of another ability if
it can be represented by a more general action.
function_type: One of the functions in FUNCTION_TYPES for how to construct
the sc2 action proto out of python types.
args: A list of the types of args passed to function_type.
avail_fn: For non-abilities, this function returns whether the function is
valid.
"""
__slots__ = ()
@classmethod
def ui_func(cls, id_, name, function_type, avail_fn=always):
"""Define a function representing a ui action."""
return cls(id_, name, 0, 0, function_type, FUNCTION_TYPES[function_type],
avail_fn)
@classmethod
def ability(cls, id_, name, function_type, ability_id, general_id=0):
"""Define a function represented as a game ability."""
assert function_type in ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], None)
@classmethod
def spec(cls, id_, name, args):
"""Create a Function to be used in ValidActions."""
return cls(id_, name, None, None, None, args, None)
def __hash__(self): # So it can go in a set().
return self.id
def __str__(self):
return self.str()
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return "%s/%s (%s)" % (str(self.id).rjust(space and 4),
self.name.ljust(space and 50),
"; ".join(str(a) for a in self.args))
class Functions(object):
"""Represents the full set of functions.
Can't use namedtuple since python3 has a limit of 255 function arguments, so
build something similar.
"""
def __init__(self, functions):
self._func_list = functions
self._func_dict = {f.name: f for f in functions}
if len(self._func_dict) != len(self._func_list):
raise ValueError("Function names must be unique.")
def __getattr__(self, name):
return self._func_dict[name]
def __getitem__(self, key):
if isinstance(key, numbers.Number):
return self._func_list[key]
return self._func_dict[key]
def __iter__(self):
return iter(self._func_list)
def __len__(self):
return len(self._func_list)
# pylint: disable=line-too-long
FUNCTIONS = Functions([
Function.ui_func(0, "no_op", no_op),
Function.ui_func(1, "move_camera", move_camera),
Function.ui_func(2, "select_point", select_point),
Function.ui_func(3, "select_rect", select_rect),
Function.ui_func(4, "select_control_group", control_group),
Function.ui_func(5, "select_unit", select_unit,
lambda obs: obs.ui_data.HasField("multi")),
Function.ui_func(6, "select_idle_worker", select_idle_worker,
lambda obs: obs.player_common.idle_worker_count > 0),
Function.ui_func(7, "select_army", select_army,
lambda obs: obs.player_common.army_count > 0),
Function.ui_func(8, "select_warp_gates", select_warp_gates,
lambda obs: obs.player_common.warp_gate_count > 0),
Function.ui_func(9, "select_larva", select_larva,
lambda obs: obs.player_common.larva_count > 0),
Function.ui_func(10, "unload", unload,
lambda obs: obs.ui_data.HasField("cargo")),
Function.ui_func(11, "build_queue", build_queue,
lambda obs: obs.ui_data.HasField("production")),
# Everything below here is generated with gen_actions.py
Function.ability(12, "Attack_screen", cmd_screen, 3674),
Function.ability(13, "Attack_minimap", cmd_minimap, 3674),
Function.ability(14, "Attack_Attack_screen", cmd_screen, 23, 3674),
Function.ability(15, "Attack_Attack_minimap", cmd_minimap, 23, 3674),
Function.ability(16, "Attack_AttackBuilding_screen", cmd_screen, 2048, 3674),
Function.ability(17, "Attack_AttackBuilding_minimap", cmd_minimap, 2048, 3674),
Function.ability(18, "Attack_Redirect_screen", cmd_screen, 1682, 3674),
Function.ability(19, "Scan_Move_screen", cmd_screen, 19, 3674),
Function.ability(20, "Scan_Move_minimap", cmd_minimap, 19, 3674),
Function.ability(21, "Behavior_BuildingAttackOff_quick", cmd_quick, 2082),
Function.ability(22, "Behavior_BuildingAttackOn_quick", cmd_quick, 2081),
Function.ability(23, "Behavior_CloakOff_quick", cmd_quick, 3677),
Function.ability(24, "Behavior_CloakOff_Banshee_quick", cmd_quick, 393, 3677),
Function.ability(25, "Behavior_CloakOff_Ghost_quick", cmd_quick, 383, 3677),
Function.ability(26, "Behavior_CloakOn_quick", cmd_quick, 3676),
Function.ability(27, "Behavior_CloakOn_Banshee_quick", cmd_quick, 392, 3676),
Function.ability(28, "Behavior_CloakOn_Ghost_quick", cmd_quick, 382, 3676),
Function.ability(29, "Behavior_GenerateCreepOff_quick", cmd_quick, 1693),
Function.ability(30, "Behavior_GenerateCreepOn_quick", cmd_quick, 1692),
Function.ability(31, "Behavior_HoldFireOff_quick", cmd_quick, 3689),
Function.ability(32, "Behavior_HoldFireOff_Ghost_quick", cmd_quick, 38, 3689),
Function.ability(33, "Behavior_HoldFireOff_Lurker_quick", cmd_quick, 2552, 3689),
Function.ability(34, "Behavior_HoldFireOn_quick", cmd_quick, 3688),
Function.ability(35, "Behavior_HoldFireOn_Ghost_quick", cmd_quick, 36, 3688),
Function.ability(36, "Behavior_HoldFireOn_Lurker_quick", cmd_quick, 2550, 3688),
Function.ability(37, "Behavior_PulsarBeamOff_quick", cmd_quick, 2376),
Function.ability(38, "Behavior_PulsarBeamOn_quick", cmd_quick, 2375),
Function.ability(39, "Build_Armory_screen", cmd_screen, 331),
Function.ability(40, "Build_Assimilator_screen", cmd_screen, 882),
Function.ability(41, "Build_BanelingNest_screen", cmd_screen, 1162),
Function.ability(42, "Build_Barracks_screen", cmd_screen, 321),
Function.ability(43, "Build_Bunker_screen", cmd_screen, 324),
Function.ability(44, "Build_CommandCenter_screen", cmd_screen, 318),
Function.ability(45, "Build_CreepTumor_screen", cmd_screen, 3691),
Function.ability(46, "Build_CreepTumor_Queen_screen", cmd_screen, 1694, 3691),
Function.ability(47, "Build_CreepTumor_Tumor_screen", cmd_screen, 1733, 3691),
Function.ability(48, "Build_CyberneticsCore_screen", cmd_screen, 894),
Function.ability(49, "Build_DarkShrine_screen", cmd_screen, 891),
Function.ability(50, "Build_EngineeringBay_screen", cmd_screen, 322),
Function.ability(51, "Build_EvolutionChamber_screen", cmd_screen, 1156),
Function.ability(52, "Build_Extractor_screen", cmd_screen, 1154),
Function.ability(53, "Build_Factory_screen", cmd_screen, 328),
Function.ability(54, "Build_FleetBeacon_screen", cmd_screen, 885),
Function.ability(55, "Build_Forge_screen", cmd_screen, 884),
Function.ability(56, "Build_FusionCore_screen", cmd_screen, 333),
Function.ability(57, "Build_Gateway_screen", cmd_screen, 883),
Function.ability(58, "Build_GhostAcademy_screen", cmd_screen, 327),
Function.ability(59, "Build_Hatchery_screen", cmd_screen, 1152),
Function.ability(60, "Build_HydraliskDen_screen", cmd_screen, 1157),
Function.ability(61, "Build_InfestationPit_screen", cmd_screen, 1160),
Function.ability(62, "Build_Interceptors_quick", cmd_quick, 1042),
Function.ability(63, "Build_Interceptors_autocast", autocast, 1042),
Function.ability(64, "Build_MissileTurret_screen", cmd_screen, 323),
Function.ability(65, "Build_Nexus_screen", cmd_screen, 880),
Function.ability(66, "Build_Nuke_quick", cmd_quick, 710),
Function.ability(67, "Build_NydusNetwork_screen", cmd_screen, 1161),
Function.ability(68, "Build_NydusWorm_screen", cmd_screen, 1768),
Function.ability(69, "Build_PhotonCannon_screen", cmd_screen, 887),
Function.ability(70, "Build_Pylon_screen", cmd_screen, 881),
Function.ability(71, "Build_Reactor_quick", cmd_quick, 3683),
Function.ability(72, "Build_Reactor_screen", cmd_screen, 3683),
Function.ability(73, "Build_Reactor_Barracks_quick", cmd_quick, 422, 3683),
Function.ability(74, "Build_Reactor_Barracks_screen", cmd_screen, 422, 3683),
Function.ability(75, "Build_Reactor_Factory_quick", cmd_quick, 455, 3683),
Function.ability(76, "Build_Reactor_Factory_screen", cmd_screen, 455, 3683),
Function.ability(77, "Build_Reactor_Starport_quick", cmd_quick, 488, 3683),
Function.ability(78, "Build_Reactor_Starport_screen", cmd_screen, 488, 3683),
Function.ability(79, "Build_Refinery_screen", cmd_screen, 320),
Function.ability(80, "Build_RoachWarren_screen", cmd_screen, 1165),
Function.ability(81, "Build_RoboticsBay_screen", cmd_screen, 892),
Function.ability(82, "Build_RoboticsFacility_screen", cmd_screen, 893),
Function.ability(83, "Build_SensorTower_screen", cmd_screen, 326),
Function.ability(84, "Build_SpawningPool_screen", cmd_screen, 1155),
Function.ability(85, "Build_SpineCrawler_screen", cmd_screen, 1166),
Function.ability(86, "Build_Spire_screen", cmd_screen, 1158),
Function.ability(87, "Build_SporeCrawler_screen", cmd_screen, 1167),
Function.ability(88, "Build_Stargate_screen", cmd_screen, 889),
Function.ability(89, "Build_Starport_screen", cmd_screen, 329),
Function.ability(90, "Build_StasisTrap_screen", cmd_screen, 2505),
Function.ability(91, "Build_SupplyDepot_screen", cmd_screen, 319),
Function.ability(92, "Build_TechLab_quick", cmd_quick, 3682),
Function.ability(93, "Build_TechLab_screen", cmd_screen, 3682),
Function.ability(94, "Build_TechLab_Barracks_quick", cmd_quick, 421, 3682),
Function.ability(95, "Build_TechLab_Barracks_screen", cmd_screen, 421, 3682),
Function.ability(96, "Build_TechLab_Factory_quick", cmd_quick, 454, 3682),
Function.ability(97, "Build_TechLab_Factory_screen", cmd_screen, 454, 3682),
Function.ability(98, "Build_TechLab_Starport_quick", cmd_quick, 487, 3682),
Function.ability(99, "Build_TechLab_Starport_screen", cmd_screen, 487, 3682),
Function.ability(100, "Build_TemplarArchive_screen", cmd_screen, 890),
Function.ability(101, "Build_TwilightCouncil_screen", cmd_screen, 886),
Function.ability(102, "Build_UltraliskCavern_screen", cmd_screen, 1159),
Function.ability(103, "BurrowDown_quick", cmd_quick, 3661),
Function.ability(104, "BurrowDown_Baneling_quick", cmd_quick, 1374, 3661),
Function.ability(105, "BurrowDown_Drone_quick", cmd_quick, 1378, 3661),
Function.ability(106, "BurrowDown_Hydralisk_quick", cmd_quick, 1382, 3661),
Function.ability(107, "BurrowDown_Infestor_quick", cmd_quick, 1444, 3661),
Function.ability(108, "BurrowDown_InfestorTerran_quick", cmd_quick, 1394, 3661),
Function.ability(109, "BurrowDown_Lurker_quick", cmd_quick, 2108, 3661),
Function.ability(110, "BurrowDown_Queen_quick", cmd_quick, 1433, 3661),
Function.ability(111, "BurrowDown_Ravager_quick", cmd_quick, 2340, 3661),
Function.ability(112, "BurrowDown_Roach_quick", cmd_quick, 1386, 3661),
Function.ability(113, "BurrowDown_SwarmHost_quick", cmd_quick, 2014, 3661),
Function.ability(114, "BurrowDown_Ultralisk_quick", cmd_quick, 1512, 3661),
Function.ability(115, "BurrowDown_WidowMine_quick", cmd_quick, 2095, 3661),
Function.ability(116, "BurrowDown_Zergling_quick", cmd_quick, 1390, 3661),
Function.ability(117, "BurrowUp_quick", cmd_quick, 3662),
Function.ability(118, "BurrowUp_autocast", autocast, 3662),
Function.ability(119, "BurrowUp_Baneling_quick", cmd_quick, 1376, 3662),
Function.ability(120, "BurrowUp_Baneling_autocast", autocast, 1376, 3662),
Function.ability(121, "BurrowUp_Drone_quick", cmd_quick, 1380, 3662),
Function.ability(122, "BurrowUp_Hydralisk_quick", cmd_quick, 1384, 3662),
Function.ability(123, "BurrowUp_Hydralisk_autocast", autocast, 1384, 3662),
Function.ability(124, "BurrowUp_Infestor_quick", cmd_quick, 1446, 3662),
Function.ability(125, "BurrowUp_InfestorTerran_quick", cmd_quick, 1396, 3662),
Function.ability(126, "BurrowUp_InfestorTerran_autocast", autocast, 1396, 3662),
Function.ability(127, "BurrowUp_Lurker_quick", cmd_quick, 2110, 3662),
Function.ability(128, "BurrowUp_Queen_quick", cmd_quick, 1435, 3662),
Function.ability(129, "BurrowUp_Queen_autocast", autocast, 1435, 3662),
Function.ability(130, "BurrowUp_Ravager_quick", cmd_quick, 2342, 3662),
Function.ability(131, "BurrowUp_Ravager_autocast", autocast, 2342, 3662),
Function.ability(132, "BurrowUp_Roach_quick", cmd_quick, 1388, 3662),
Function.ability(133, "BurrowUp_Roach_autocast", autocast, 1388, 3662),
Function.ability(134, "BurrowUp_SwarmHost_quick", cmd_quick, 2016, 3662),
Function.ability(135, "BurrowUp_Ultralisk_quick", cmd_quick, 1514, 3662),
Function.ability(136, "BurrowUp_Ultralisk_autocast", autocast, 1514, 3662),
Function.ability(137, "BurrowUp_WidowMine_quick", cmd_quick, 2097, 3662),
Function.ability(138, "BurrowUp_Zergling_quick", cmd_quick, 1392, 3662),
Function.ability(139, "BurrowUp_Zergling_autocast", autocast, 1392, 3662),
Function.ability(140, "Cancel_quick", cmd_quick, 3659),
Function.ability(141, "Cancel_AdeptPhaseShift_quick", cmd_quick, 2594, 3659),
Function.ability(142, "Cancel_AdeptShadePhaseShift_quick", cmd_quick, 2596, 3659),
Function.ability(143, "Cancel_BarracksAddOn_quick", cmd_quick, 451, 3659),
Function.ability(144, "Cancel_BuildInProgress_quick", cmd_quick, 314, 3659),
Function.ability(145, "Cancel_CreepTumor_quick", cmd_quick, 1763, 3659),
Function.ability(146, "Cancel_FactoryAddOn_quick", cmd_quick, 484, 3659),
Function.ability(147, "Cancel_GravitonBeam_quick", cmd_quick, 174, 3659),
Function.ability(148, "Cancel_LockOn_quick", cmd_quick, 2354, 3659),
Function.ability(149, "Cancel_MorphBroodlord_quick", cmd_quick, 1373, 3659),
Function.ability(150, "Cancel_MorphGreaterSpire_quick", cmd_quick, 1221, 3659),
Function.ability(151, "Cancel_MorphHive_quick", cmd_quick, 1219, 3659),
Function.ability(152, "Cancel_MorphLair_quick", cmd_quick, 1217, 3659),
Function.ability(153, "Cancel_MorphLurker_quick", cmd_quick, 2333, 3659),
Function.ability(154, "Cancel_MorphLurkerDen_quick", cmd_quick, 2113, 3659),
Function.ability(155, "Cancel_MorphMothership_quick", cmd_quick, 1848, 3659),
Function.ability(156, "Cancel_MorphOrbital_quick", cmd_quick, 1517, 3659),
Function.ability(157, "Cancel_MorphOverlordTransport_quick", cmd_quick, 2709, 3659),
Function.ability(158, "Cancel_MorphOverseer_quick", cmd_quick, 1449, 3659),
Function.ability(159, "Cancel_MorphPlanetaryFortress_quick", cmd_quick, 1451, 3659),
Function.ability(160, "Cancel_MorphRavager_quick", cmd_quick, 2331, 3659),
Function.ability(161, "Cancel_MorphThorExplosiveMode_quick", cmd_quick, 2365, 3659),
Function.ability(162, "Cancel_NeuralParasite_quick", cmd_quick, 250, 3659),
Function.ability(163, "Cancel_Nuke_quick", cmd_quick, 1623, 3659),
Function.ability(164, "Cancel_SpineCrawlerRoot_quick", cmd_quick, 1730, 3659),
Function.ability(165, "Cancel_SporeCrawlerRoot_quick", cmd_quick, 1732, 3659),
Function.ability(166, "Cancel_StarportAddOn_quick", cmd_quick, 517, 3659),
Function.ability(167, "Cancel_StasisTrap_quick", cmd_quick, 2535, 3659),
Function.ability(168, "Cancel_Last_quick", cmd_quick, 3671),
Function.ability(169, "Cancel_HangarQueue5_quick", cmd_quick, 1038, 3671),
Function.ability(170, "Cancel_Queue1_quick", cmd_quick, 304, 3671),
Function.ability(171, "Cancel_Queue5_quick", cmd_quick, 306, 3671),
Function.ability(172, "Cancel_QueueAddOn_quick", cmd_quick, 312, 3671),
Function.ability(173, "Cancel_QueueCancelToSelection_quick", cmd_quick, 308, 3671),
Function.ability(174, "Cancel_QueuePasive_quick", cmd_quick, 1831, 3671),
Function.ability(175, "Cancel_QueuePassiveCancelToSelection_quick", cmd_quick, 1833, 3671),
Function.ability(176, "Effect_Abduct_screen", cmd_screen, 2067),
Function.ability(177, "Effect_AdeptPhaseShift_screen", cmd_screen, 2544),
Function.ability(178, "Effect_AutoTurret_screen", cmd_screen, 1764),
Function.ability(179, "Effect_BlindingCloud_screen", cmd_screen, 2063),
Function.ability(180, "Effect_Blink_screen", cmd_screen, 3687),
Function.ability(181, "Effect_Blink_Stalker_screen", cmd_screen, 1442, 3687),
Function.ability(182, "Effect_ShadowStride_screen", cmd_screen, 2700, 3687),
Function.ability(183, "Effect_CalldownMULE_screen", cmd_screen, 171),
Function.ability(184, "Effect_CausticSpray_screen", cmd_screen, 2324),
Function.ability(185, "Effect_Charge_screen", cmd_screen, 1819),
Function.ability(186, "Effect_Charge_autocast", autocast, 1819),
Function.ability(187, "Effect_ChronoBoost_screen", cmd_screen, 261),
Function.ability(188, "Effect_Contaminate_screen", cmd_screen, 1825),
Function.ability(189, "Effect_CorrosiveBile_screen", cmd_screen, 2338),
Function.ability(190, "Effect_EMP_screen", cmd_screen, 1628),
Function.ability(191, "Effect_Explode_quick", cmd_quick, 42),
Function.ability(192, "Effect_Feedback_screen", cmd_screen, 140),
Function.ability(193, "Effect_ForceField_screen", cmd_screen, 1526),
Function.ability(194, "Effect_FungalGrowth_screen", cmd_screen, 74),
Function.ability(195, "Effect_GhostSnipe_screen", cmd_screen, 2714),
Function.ability(196, "Effect_GravitonBeam_screen", cmd_screen, 173),
Function.ability(197, "Effect_GuardianShield_quick", cmd_quick, 76),
Function.ability(198, "Effect_Heal_screen", cmd_screen, 386),
Function.ability(199, "Effect_Heal_autocast", autocast, 386),
Function.ability(200, "Effect_HunterSeekerMissile_screen", cmd_screen, 169),
Function.ability(201, "Effect_ImmortalBarrier_quick", cmd_quick, 2328),
Function.ability(202, "Effect_ImmortalBarrier_autocast", autocast, 2328),
Function.ability(203, "Effect_InfestedTerrans_screen", cmd_screen, 247),
Function.ability(204, "Effect_InjectLarva_screen", cmd_screen, 251),
Function.ability(205, "Effect_KD8Charge_screen", cmd_screen, 2588),
Function.ability(206, "Effect_LockOn_screen", cmd_screen, 2350),
Function.ability(207, "Effect_LocustSwoop_screen", cmd_screen, 2387),
Function.ability(208, "Effect_MassRecall_screen", cmd_screen, 3686),
Function.ability(209, "Effect_MassRecall_Mothership_screen", cmd_screen, 2368, 3686),
Function.ability(210, "Effect_MassRecall_MothershipCore_screen", cmd_screen, 1974, 3686),
Function.ability(211, "Effect_MedivacIgniteAfterburners_quick", cmd_quick, 2116),
Function.ability(212, "Effect_NeuralParasite_screen", cmd_screen, 249),
Function.ability(213, "Effect_NukeCalldown_screen", cmd_screen, 1622),
Function.ability(214, "Effect_OracleRevelation_screen", cmd_screen, 2146),
Function.ability(215, "Effect_ParasiticBomb_screen", cmd_screen, 2542),
Function.ability(216, "Effect_PhotonOvercharge_screen", cmd_screen, 2162),
Function.ability(217, "Effect_PointDefenseDrone_screen", cmd_screen, 144),
Function.ability(218, "Effect_PsiStorm_screen", cmd_screen, 1036),
Function.ability(219, "Effect_PurificationNova_screen", cmd_screen, 2346),
Function.ability(220, "Effect_Repair_screen", cmd_screen, 3685),
Function.ability(221, "Effect_Repair_autocast", autocast, 3685),
Function.ability(222, "Effect_Repair_Mule_screen", cmd_screen, 78, 3685),
Function.ability(223, "Effect_Repair_Mule_autocast", autocast, 78, 3685),
Function.ability(224, "Effect_Repair_SCV_screen", cmd_screen, 316, 3685),
Function.ability(225, "Effect_Repair_SCV_autocast", autocast, 316, 3685),
Function.ability(226, "Effect_Salvage_quick", cmd_quick, 32),
Function.ability(227, "Effect_Scan_screen", cmd_screen, 399),
Function.ability(228, "Effect_SpawnChangeling_quick", cmd_quick, 181),
Function.ability(229, "Effect_SpawnLocusts_screen", cmd_screen, 2704),
Function.ability(230, "Effect_Spray_screen", cmd_screen, 3684),
Function.ability(231, "Effect_Spray_Protoss_screen", cmd_screen, 30, 3684),
Function.ability(232, "Effect_Spray_Terran_screen", cmd_screen, 26, 3684),
Function.ability(233, "Effect_Spray_Zerg_screen", cmd_screen, 28, 3684),
Function.ability(234, "Effect_Stim_quick", cmd_quick, 3675),
Function.ability(235, "Effect_Stim_Marauder_quick", cmd_quick, 253, 3675),
Function.ability(236, "Effect_Stim_Marauder_Redirect_quick", cmd_quick, 1684, 3675),
Function.ability(237, "Effect_Stim_Marine_quick", cmd_quick, 380, 3675),
Function.ability(238, "Effect_Stim_Marine_Redirect_quick", cmd_quick, 1683, 3675),
Function.ability(239, "Effect_SupplyDrop_screen", cmd_screen, 255),
Function.ability(240, "Effect_TacticalJump_screen", cmd_screen, 2358),
Function.ability(241, "Effect_TimeWarp_screen", cmd_screen, 2244),
Function.ability(242, "Effect_Transfusion_screen", cmd_screen, 1664),
Function.ability(243, "Effect_ViperConsume_screen", cmd_screen, 2073),
Function.ability(244, "Effect_VoidRayPrismaticAlignment_quick", cmd_quick, 2393),
Function.ability(245, "Effect_WidowMineAttack_screen", cmd_screen, 2099),
Function.ability(246, "Effect_WidowMineAttack_autocast", autocast, 2099),
Function.ability(247, "Effect_YamatoGun_screen", cmd_screen, 401),
Function.ability(248, "Hallucination_Adept_quick", cmd_quick, 2391),
Function.ability(249, "Hallucination_Archon_quick", cmd_quick, 146),
Function.ability(250, "Hallucination_Colossus_quick", cmd_quick, 148),
Function.ability(251, "Hallucination_Disruptor_quick", cmd_quick, 2389),
Function.ability(252, "Hallucination_HighTemplar_quick", cmd_quick, 150),
Function.ability(253, "Hallucination_Immortal_quick", cmd_quick, 152),
Function.ability(254, "Hallucination_Oracle_quick", cmd_quick, 2114),
Function.ability(255, "Hallucination_Phoenix_quick", cmd_quick, 154),
Function.ability(256, "Hallucination_Probe_quick", cmd_quick, 156),
Function.ability(257, "Hallucination_Stalker_quick", cmd_quick, 158),
Function.ability(258, "Hallucination_VoidRay_quick", cmd_quick, 160),
Function.ability(259, "Hallucination_WarpPrism_quick", cmd_quick, 162),
Function.ability(260, "Hallucination_Zealot_quick", cmd_quick, 164),
Function.ability(261, "Halt_quick", cmd_quick, 3660),
Function.ability(262, "Halt_Building_quick", cmd_quick, 315, 3660),
Function.ability(263, "Halt_TerranBuild_quick", cmd_quick, 348, 3660),
Function.ability(264, "Harvest_Gather_screen", cmd_screen, 3666),
Function.ability(265, "Harvest_Gather_Drone_screen", cmd_screen, 1183, 3666),
Function.ability(266, "Harvest_Gather_Mule_screen", cmd_screen, 166, 3666),
Function.ability(267, "Harvest_Gather_Probe_screen", cmd_screen, 298, 3666),
Function.ability(268, "Harvest_Gather_SCV_screen", cmd_screen, 295, 3666),
Function.ability(269, "Harvest_Return_quick", cmd_quick, 3667),
Function.ability(270, "Harvest_Return_Drone_quick", cmd_quick, 1184, 3667),
Function.ability(271, "Harvest_Return_Mule_quick", cmd_quick, 167, 3667),
Function.ability(272, "Harvest_Return_Probe_quick", cmd_quick, 299, 3667),
Function.ability(273, "Harvest_Return_SCV_quick", cmd_quick, 296, 3667),
Function.ability(274, "HoldPosition_quick", cmd_quick, 18),
Function.ability(275, "Land_screen", cmd_screen, 3678),
Function.ability(276, "Land_Barracks_screen", cmd_screen, 554, 3678),
Function.ability(277, "Land_CommandCenter_screen", cmd_screen, 419, 3678),
Function.ability(278, "Land_Factory_screen", cmd_screen, 520, 3678),
Function.ability(279, "Land_OrbitalCommand_screen", cmd_screen, 1524, 3678),
Function.ability(280, "Land_Starport_screen", cmd_screen, 522, 3678),
Function.ability(281, "Lift_quick", cmd_quick, 3679),
Function.ability(282, "Lift_Barracks_quick", cmd_quick, 452, 3679),
Function.ability(283, "Lift_CommandCenter_quick", cmd_quick, 417, 3679),
Function.ability(284, "Lift_Factory_quick", cmd_quick, 485, 3679),
Function.ability(285, "Lift_OrbitalCommand_quick", cmd_quick, 1522, 3679),
Function.ability(286, "Lift_Starport_quick", cmd_quick, 518, 3679),
Function.ability(287, "Load_screen", cmd_screen, 3668),
Function.ability(288, "Load_Bunker_screen", cmd_screen, 407, 3668),
Function.ability(289, "Load_Medivac_screen", cmd_screen, 394, 3668),
Function.ability(290, "Load_NydusNetwork_screen", cmd_screen, 1437, 3668),
Function.ability(291, "Load_NydusWorm_screen", cmd_screen, 2370, 3668),
Function.ability(292, "Load_Overlord_screen", cmd_screen, 1406, 3668),
Function.ability(293, "Load_WarpPrism_screen", cmd_screen, 911, 3668),
Function.ability(294, "LoadAll_quick", cmd_quick, 3663),
Function.ability(295, "LoadAll_CommandCenter_quick", cmd_quick, 416, 3663),
Function.ability(296, "Morph_Archon_quick", cmd_quick, 1766),
Function.ability(297, "Morph_BroodLord_quick", cmd_quick, 1372),
Function.ability(298, "Morph_Gateway_quick", cmd_quick, 1520),
Function.ability(299, "Morph_GreaterSpire_quick", cmd_quick, 1220),
Function.ability(300, "Morph_Hellbat_quick", cmd_quick, 1998),
Function.ability(301, "Morph_Hellion_quick", cmd_quick, 1978),
Function.ability(302, "Morph_Hive_quick", cmd_quick, 1218),
Function.ability(303, "Morph_Lair_quick", cmd_quick, 1216),
Function.ability(304, "Morph_LiberatorAAMode_quick", cmd_quick, 2560),
Function.ability(305, "Morph_LiberatorAGMode_screen", cmd_screen, 2558),
Function.ability(306, "Morph_Lurker_quick", cmd_quick, 2332),
Function.ability(307, "Morph_LurkerDen_quick", cmd_quick, 2112),
Function.ability(308, "Morph_Mothership_quick", cmd_quick, 1847),
Function.ability(309, "Morph_OrbitalCommand_quick", cmd_quick, 1516),
Function.ability(310, "Morph_OverlordTransport_quick", cmd_quick, 2708),
Function.ability(311, "Morph_Overseer_quick", cmd_quick, 1448),
Function.ability(312, "Morph_PlanetaryFortress_quick", cmd_quick, 1450),
Function.ability(313, "Morph_Ravager_quick", cmd_quick, 2330),
Function.ability(314, "Morph_Root_screen", cmd_screen, 3680),
Function.ability(315, "Morph_SpineCrawlerRoot_screen", cmd_screen, 1729, 3680),
Function.ability(316, "Morph_SporeCrawlerRoot_screen", cmd_screen, 1731, 3680),
Function.ability(317, "Morph_SiegeMode_quick", cmd_quick, 388),
Function.ability(318, "Morph_SupplyDepot_Lower_quick", cmd_quick, 556),
Function.ability(319, "Morph_SupplyDepot_Raise_quick", cmd_quick, 558),
Function.ability(320, "Morph_ThorExplosiveMode_quick", cmd_quick, 2364),
Function.ability(321, "Morph_ThorHighImpactMode_quick", cmd_quick, 2362),
Function.ability(322, "Morph_Unsiege_quick", cmd_quick, 390),
Function.ability(323, "Morph_Uproot_quick", cmd_quick, 3681),
Function.ability(324, "Morph_SpineCrawlerUproot_quick", cmd_quick, 1725, 3681),
Function.ability(325, "Morph_SporeCrawlerUproot_quick", cmd_quick, 1727, 3681),
Function.ability(326, "Morph_VikingAssaultMode_quick", cmd_quick, 403),
Function.ability(327, "Morph_VikingFighterMode_quick", cmd_quick, 405),
Function.ability(328, "Morph_WarpGate_quick", cmd_quick, 1518),
Function.ability(329, "Morph_WarpPrismPhasingMode_quick", cmd_quick, 1528),
Function.ability(330, "Morph_WarpPrismTransportMode_quick", cmd_quick, 1530),
Function.ability(331, "Move_screen", cmd_screen, 16),
Function.ability(332, "Move_minimap", cmd_minimap, 16),
Function.ability(333, "Patrol_screen", cmd_screen, 17),
Function.ability(334, "Patrol_minimap", cmd_minimap, 17),
Function.ability(335, "Rally_Units_screen", cmd_screen, 3673),
Function.ability(336, "Rally_Units_minimap", cmd_minimap, 3673),
Function.ability(337, "Rally_Building_screen", cmd_screen, 195, 3673),
Function.ability(338, "Rally_Building_minimap", cmd_minimap, 195, 3673),
Function.ability(339, "Rally_Hatchery_Units_screen", cmd_screen, 212, 3673),
Function.ability(340, "Rally_Hatchery_Units_minimap", cmd_minimap, 212, 3673),
Function.ability(341, "Rally_Morphing_Unit_screen", cmd_screen, 199, 3673),
Function.ability(342, "Rally_Morphing_Unit_minimap", cmd_minimap, 199, 3673),
Function.ability(343, "Rally_Workers_screen", cmd_screen, 3690),
Function.ability(344, "Rally_Workers_minimap", cmd_minimap, 3690),
Function.ability(345, "Rally_CommandCenter_screen", cmd_screen, 203, 3690),
Function.ability(346, "Rally_CommandCenter_minimap", cmd_minimap, 203, 3690),
Function.ability(347, "Rally_Hatchery_Workers_screen", cmd_screen, 211, 3690),
Function.ability(348, "Rally_Hatchery_Workers_minimap", cmd_minimap, 211, 3690),
Function.ability(349, "Rally_Nexus_screen", cmd_screen, 207, 3690),
Function.ability(350, "Rally_Nexus_minimap", cmd_minimap, 207, 3690),
Function.ability(351, "Research_AdeptResonatingGlaives_quick", cmd_quick, 1594),
Function.ability(352, "Research_AdvancedBallistics_quick", cmd_quick, 805),
Function.ability(353, "Research_BansheeCloakingField_quick", cmd_quick, 790),
Function.ability(354, "Research_BansheeHyperflightRotors_quick", cmd_quick, 799),
Function.ability(355, "Research_BattlecruiserWeaponRefit_quick", cmd_quick, 1532),
Function.ability(356, "Research_Blink_quick", cmd_quick, 1593),
Function.ability(357, "Research_Burrow_quick", cmd_quick, 1225),
Function.ability(358, "Research_CentrifugalHooks_quick", cmd_quick, 1482),
Function.ability(359, "Research_Charge_quick", cmd_quick, 1592),
Function.ability(360, "Research_ChitinousPlating_quick", cmd_quick, 265),
Function.ability(361, "Research_CombatShield_quick", cmd_quick, 731),
Function.ability(362, "Research_ConcussiveShells_quick", cmd_quick, 732),
Function.ability(363, "Research_DrillingClaws_quick", cmd_quick, 764),
Function.ability(364, "Research_ExtendedThermalLance_quick", cmd_quick, 1097),
Function.ability(365, "Research_GlialRegeneration_quick", cmd_quick, 216),
Function.ability(366, "Research_GraviticBooster_quick", cmd_quick, 1093),
Function.ability(367, "Research_GraviticDrive_quick", cmd_quick, 1094),
Function.ability(368, "Research_GroovedSpines_quick", cmd_quick, 1282),
Function.ability(369, "Research_HiSecAutoTracking_quick", cmd_quick, 650),
Function.ability(370, "Research_HighCapacityFuelTanks_quick", cmd_quick, 804),
Function.ability(371, "Research_InfernalPreigniter_quick", cmd_quick, 761),
Function.ability(372, "Research_InterceptorGravitonCatapult_quick", cmd_quick, 44),
Function.ability(373, "Research_MagFieldLaunchers_quick", cmd_quick, 766),
Function.ability(374, "Research_MuscularAugments_quick", cmd_quick, 1283),
Function.ability(375, "Research_NeosteelFrame_quick", cmd_quick, 655),
Function.ability(376, "Research_NeuralParasite_quick", cmd_quick, 1455),
Function.ability(377, "Research_PathogenGlands_quick", cmd_quick, 1454),
Function.ability(378, "Research_PersonalCloaking_quick", cmd_quick, 820),
Function.ability(379, "Research_PhoenixAnionPulseCrystals_quick", cmd_quick, 46),
Function.ability(380, "Research_PneumatizedCarapace_quick", cmd_quick, 1223),
Function.ability(381, "Research_ProtossAirArmor_quick", cmd_quick, 3692),
Function.ability(382, "Research_ProtossAirArmorLevel1_quick", cmd_quick, 1565, 3692),
Function.ability(383, "Research_ProtossAirArmorLevel2_quick", cmd_quick, 1566, 3692),
Function.ability(384, "Research_ProtossAirArmorLevel3_quick", cmd_quick, 1567, 3692),
Function.ability(385, "Research_ProtossAirWeapons_quick", cmd_quick, 3693),
Function.ability(386, "Research_ProtossAirWeaponsLevel1_quick", cmd_quick, 1562, 3693),
Function.ability(387, "Research_ProtossAirWeaponsLevel2_quick", cmd_quick, 1563, 3693),
Function.ability(388, "Research_ProtossAirWeaponsLevel3_quick", cmd_quick, 1564, 3693),
Function.ability(389, "Research_ProtossGroundArmor_quick", cmd_quick, 3694),
Function.ability(390, "Research_ProtossGroundArmorLevel1_quick", cmd_quick, 1065, 3694),
Function.ability(391, "Research_ProtossGroundArmorLevel2_quick", cmd_quick, 1066, 3694),
Function.ability(392, "Research_ProtossGroundArmorLevel3_quick", cmd_quick, 1067, 3694),
Function.ability(393, "Research_ProtossGroundWeapons_quick", cmd_quick, 3695),
Function.ability(394, "Research_ProtossGroundWeaponsLevel1_quick", cmd_quick, 1062, 3695),
Function.ability(395, "Research_ProtossGroundWeaponsLevel2_quick", cmd_quick, 1063, 3695),
Function.ability(396, "Research_ProtossGroundWeaponsLevel3_quick", cmd_quick, 1064, 3695),
Function.ability(397, "Research_ProtossShields_quick", cmd_quick, 3696),
Function.ability(398, "Research_ProtossShieldsLevel1_quick", cmd_quick, 1068, 3696),
Function.ability(399, "Research_ProtossShieldsLevel2_quick", cmd_quick, 1069, 3696),
Function.ability(400, "Research_ProtossShieldsLevel3_quick", cmd_quick, 1070, 3696),
Function.ability(401, "Research_PsiStorm_quick", cmd_quick, 1126),
Function.ability(402, "Research_RavenCorvidReactor_quick", cmd_quick, 793),
Function.ability(403, "Research_RavenRecalibratedExplosives_quick", cmd_quick, 803),
Function.ability(404, "Research_ShadowStrike_quick", cmd_quick, 2720),
Function.ability(405, "Research_Stimpack_quick", cmd_quick, 730),
Function.ability(406, "Research_TerranInfantryArmor_quick", cmd_quick, 3697),
Function.ability(407, "Research_TerranInfantryArmorLevel1_quick", cmd_quick, 656, 3697),
Function.ability(408, "Research_TerranInfantryArmorLevel2_quick", cmd_quick, 657, 3697),
Function.ability(409, "Research_TerranInfantryArmorLevel3_quick", cmd_quick, 658, 3697),
Function.ability(410, "Research_TerranInfantryWeapons_quick", cmd_quick, 3698),
Function.ability(411, "Research_TerranInfantryWeaponsLevel1_quick", cmd_quick, 652, 3698),
Function.ability(412, "Research_TerranInfantryWeaponsLevel2_quick", cmd_quick, 653, 3698),
Function.ability(413, "Research_TerranInfantryWeaponsLevel3_quick", cmd_quick, 654, 3698),
Function.ability(414, "Research_TerranShipWeapons_quick", cmd_quick, 3699),
Function.ability(415, "Research_TerranShipWeaponsLevel1_quick", cmd_quick, 861, 3699),
Function.ability(416, "Research_TerranShipWeaponsLevel2_quick", cmd_quick, 862, 3699),
Function.ability(417, "Research_TerranShipWeaponsLevel3_quick", cmd_quick, 863, 3699),
Function.ability(418, "Research_TerranStructureArmorUpgrade_quick", cmd_quick, 651),
Function.ability(419, "Research_TerranVehicleAndShipPlating_quick", cmd_quick, 3700),
Function.ability(420, "Research_TerranVehicleAndShipPlatingLevel1_quick", cmd_quick, 864, 3700),
Function.ability(421, "Research_TerranVehicleAndShipPlatingLevel2_quick", cmd_quick, 865, 3700),
Function.ability(422, "Research_TerranVehicleAndShipPlatingLevel3_quick", cmd_quick, 866, 3700),
Function.ability(423, "Research_TerranVehicleWeapons_quick", cmd_quick, 3701),
Function.ability(424, "Research_TerranVehicleWeaponsLevel1_quick", cmd_quick, 855, 3701),
Function.ability(425, "Research_TerranVehicleWeaponsLevel2_quick", cmd_quick, 856, 3701),
Function.ability(426, "Research_TerranVehicleWeaponsLevel3_quick", cmd_quick, 857, 3701),
Function.ability(427, "Research_TunnelingClaws_quick", cmd_quick, 217),
Function.ability(428, "Research_WarpGate_quick", cmd_quick, 1568),
Function.ability(429, "Research_ZergFlyerArmor_quick", cmd_quick, 3702),
Function.ability(430, "Research_ZergFlyerArmorLevel1_quick", cmd_quick, 1315, 3702),
Function.ability(431, "Research_ZergFlyerArmorLevel2_quick", cmd_quick, 1316, 3702),
Function.ability(432, "Research_ZergFlyerArmorLevel3_quick", cmd_quick, 1317, 3702),
Function.ability(433, "Research_ZergFlyerAttack_quick", cmd_quick, 3703),
Function.ability(434, "Research_ZergFlyerAttackLevel1_quick", cmd_quick, 1312, 3703),
Function.ability(435, "Research_ZergFlyerAttackLevel2_quick", cmd_quick, 1313, 3703),
Function.ability(436, "Research_ZergFlyerAttackLevel3_quick", cmd_quick, 1314, 3703),
Function.ability(437, "Research_ZergGroundArmor_quick", cmd_quick, 3704),
Function.ability(438, "Research_ZergGroundArmorLevel1_quick", cmd_quick, 1189, 3704),
Function.ability(439, "Research_ZergGroundArmorLevel2_quick", cmd_quick, 1190, 3704),
Function.ability(440, "Research_ZergGroundArmorLevel3_quick", cmd_quick, 1191, 3704),
Function.ability(441, "Research_ZergMeleeWeapons_quick", cmd_quick, 3705),
Function.ability(442, "Research_ZergMeleeWeaponsLevel1_quick", cmd_quick, 1186, 3705),
Function.ability(443, "Research_ZergMeleeWeaponsLevel2_quick", cmd_quick, 1187, 3705),
Function.ability(444, "Research_ZergMeleeWeaponsLevel3_quick", cmd_quick, 1188, 3705),
Function.ability(445, "Research_ZergMissileWeapons_quick", cmd_quick, 3706),
Function.ability(446, "Research_ZergMissileWeaponsLevel1_quick", cmd_quick, 1192, 3706),
Function.ability(447, "Research_ZergMissileWeaponsLevel2_quick", cmd_quick, 1193, 3706),
Function.ability(448, "Research_ZergMissileWeaponsLevel3_quick", cmd_quick, 1194, 3706),
Function.ability(449, "Research_ZerglingAdrenalGlands_quick", cmd_quick, 1252),
Function.ability(450, "Research_ZerglingMetabolicBoost_quick", cmd_quick, 1253),
Function.ability(451, "Smart_screen", cmd_screen, 1),
Function.ability(452, "Smart_minimap", cmd_minimap, 1),
Function.ability(453, "Stop_quick", cmd_quick, 3665),
Function.ability(454, "Stop_Building_quick", cmd_quick, 2057, 3665),
Function.ability(455, "Stop_Redirect_quick", cmd_quick, 1691, 3665),
Function.ability(456, "Stop_Stop_quick", cmd_quick, 4, 3665),
Function.ability(457, "Train_Adept_quick", cmd_quick, 922),
Function.ability(458, "Train_Baneling_quick", cmd_quick, 80),
Function.ability(459, "Train_Banshee_quick", cmd_quick, 621),
Function.ability(460, "Train_Battlecruiser_quick", cmd_quick, 623),
Function.ability(461, "Train_Carrier_quick", cmd_quick, 948),
Function.ability(462, "Train_Colossus_quick", cmd_quick, 978),
Function.ability(463, "Train_Corruptor_quick", cmd_quick, 1353),
Function.ability(464, "Train_Cyclone_quick", cmd_quick, 597),
Function.ability(465, "Train_DarkTemplar_quick", cmd_quick, 920),
Function.ability(466, "Train_Disruptor_quick", cmd_quick, 994),
Function.ability(467, "Train_Drone_quick", cmd_quick, 1342),
Function.ability(468, "Train_Ghost_quick", cmd_quick, 562),
Function.ability(469, "Train_Hellbat_quick", cmd_quick, 596),
Function.ability(470, "Train_Hellion_quick", cmd_quick, 595),
Function.ability(471, "Train_HighTemplar_quick", cmd_quick, 919),
Function.ability(472, "Train_Hydralisk_quick", cmd_quick, 1345),
Function.ability(473, "Train_Immortal_quick", cmd_quick, 979),
Function.ability(474, "Train_Infestor_quick", cmd_quick, 1352),
Function.ability(475, "Train_Liberator_quick", cmd_quick, 626),
Function.ability(476, "Train_Marauder_quick", cmd_quick, 563),
Function.ability(477, "Train_Marine_quick", cmd_quick, 560),
Function.ability(478, "Train_Medivac_quick", cmd_quick, 620),
Function.ability(479, "Train_MothershipCore_quick", cmd_quick, 1853),
Function.ability(480, "Train_Mutalisk_quick", cmd_quick, 1346),
Function.ability(481, "Train_Observer_quick", cmd_quick, 977),
Function.ability(482, "Train_Oracle_quick", cmd_quick, 954),
Function.ability(483, "Train_Overlord_quick", cmd_quick, 1344),
Function.ability(484, "Train_Phoenix_quick", cmd_quick, 946),
Function.ability(485, "Train_Probe_quick", cmd_quick, 1006),
Function.ability(486, "Train_Queen_quick", cmd_quick, 1632),
Function.ability(487, "Train_Raven_quick", cmd_quick, 622),
Function.ability(488, "Train_Reaper_quick", cmd_quick, 561),
Function.ability(489, "Train_Roach_quick", cmd_quick, 1351),
Function.ability(490, "Train_SCV_quick", cmd_quick, 524),
Function.ability(491, "Train_Sentry_quick", cmd_quick, 921),
Function.ability(492, "Train_SiegeTank_quick", cmd_quick, 591),
Function.ability(493, "Train_Stalker_quick", cmd_quick, 917),
Function.ability(494, "Train_SwarmHost_quick", cmd_quick, 1356),
Function.ability(495, "Train_Tempest_quick", cmd_quick, 955),
Function.ability(496, "Train_Thor_quick", cmd_quick, 594),
Function.ability(497, "Train_Ultralisk_quick", cmd_quick, 1348),
Function.ability(498, "Train_VikingFighter_quick", cmd_quick, 624),
Function.ability(499, "Train_Viper_quick", cmd_quick, 1354),
Function.ability(500, "Train_VoidRay_quick", cmd_quick, 950),
Function.ability(501, "Train_WarpPrism_quick", cmd_quick, 976),
Function.ability(502, "Train_WidowMine_quick", cmd_quick, 614),
Function.ability(503, "Train_Zealot_quick", cmd_quick, 916),
Function.ability(504, "Train_Zergling_quick", cmd_quick, 1343),
Function.ability(505, "TrainWarp_Adept_screen", cmd_screen, 1419),
Function.ability(506, "TrainWarp_DarkTemplar_screen", cmd_screen, 1417),
Function.ability(507, "TrainWarp_HighTemplar_screen", cmd_screen, 1416),
Function.ability(508, "TrainWarp_Sentry_screen", cmd_screen, 1418),
Function.ability(509, "TrainWarp_Stalker_screen", cmd_screen, 1414),
Function.ability(510, "TrainWarp_Zealot_screen", cmd_screen, 1413),
Function.ability(511, "UnloadAll_quick", cmd_quick, 3664),
Function.ability(512, "UnloadAll_Bunker_quick", cmd_quick, 408, 3664),
Function.ability(513, "UnloadAll_CommandCenter_quick", cmd_quick, 413, 3664),
Function.ability(514, "UnloadAll_NydasNetwork_quick", cmd_quick, 1438, 3664),
Function.ability(515, "UnloadAll_NydusWorm_quick", cmd_quick, 2371, 3664),
Function.ability(516, "UnloadAllAt_screen", cmd_screen, 3669),
Function.ability(517, "UnloadAllAt_minimap", cmd_minimap, 3669),
Function.ability(518, "UnloadAllAt_Medivac_screen", cmd_screen, 396, 3669),
Function.ability(519, "UnloadAllAt_Medivac_minimap", cmd_minimap, 396, 3669),
Function.ability(520, "UnloadAllAt_Overlord_screen", cmd_screen, 1408, 3669),
Function.ability(521, "UnloadAllAt_Overlord_minimap", cmd_minimap, 1408, 3669),
Function.ability(522, "UnloadAllAt_WarpPrism_screen", cmd_screen, 913, 3669),
Function.ability(523, "UnloadAllAt_WarpPrism_minimap", cmd_minimap, 913, 3669),
])
# pylint: enable=line-too-long
# Some indexes to support features.py and action conversion.
ABILITY_IDS = collections.defaultdict(set) # {ability_id: {funcs}}
for func in FUNCTIONS:
if func.ability_id >= 0:
ABILITY_IDS[func.ability_id].add(func)
ABILITY_IDS = {k: frozenset(v) for k, v in six.iteritems(ABILITY_IDS)}
FUNCTIONS_AVAILABLE = {f.id: f for f in FUNCTIONS if f.avail_fn}
class FunctionCall(collections.namedtuple(
"FunctionCall", ["function", "arguments"])):
"""Represents a function call action.
Attributes:
function: Store the function id, eg 2 for select_point.
arguments: The list of arguments for that function, each being a list of
ints. For select_point this could be: [[0], [23, 38]].
"""
__slots__ = ()
@classmethod
def all_arguments(cls, function, arguments):
"""Helper function for creating `FunctionCall`s with `Arguments`.
Args:
function: The value to store for the action function.
arguments: The values to store for the arguments of the action. Can either
be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an
iterable is provided, the values will be unpacked into an `Arguments`
object.
Returns:
A new `FunctionCall` instance.
"""
if isinstance(arguments, dict):
arguments = Arguments(**arguments)
elif not isinstance(arguments, Arguments):
arguments = Arguments(*arguments)
return cls(function, arguments)
class ValidActions(collections.namedtuple(
"ValidActions", ["types", "functions"])):
"""The set of types and functions that are valid for an agent to use.
Attributes:
types: A namedtuple of the types that the functions require. Unlike TYPES
above, this includes the sizes for screen and minimap.
functions: A namedtuple of all the functions.
"""
__slots__ = ()
| 57.825482
| 100
| 0.750745
| 6,942
| 54,009
| 5.553011
| 0.229473
| 0.199227
| 0.108589
| 0.003736
| 0.043296
| 0.029573
| 0.01681
| 0.012971
| 0.011103
| 0.007627
| 0
| 0.090114
| 0.1317
| 54,009
| 933
| 101
| 57.88746
| 0.731896
| 0.09317
| 0
| 0.03794
| 0
| 0
| 0.283394
| 0.226535
| 0
| 0
| 0
| 0
| 0.001355
| 1
| 0.04607
| false
| 0.001355
| 0.012195
| 0.00813
| 0.097561
| 0.001355
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d4be5ff6607a768ff6f008bc6f55355c95eab1
| 3,209
|
py
|
Python
|
pywick/meters/aucmeter.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | 2
|
2020-11-28T07:56:09.000Z
|
2021-11-08T09:30:39.000Z
|
pywick/meters/aucmeter.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | null | null | null |
pywick/meters/aucmeter.py
|
ashishpatel26/pywick
|
1afffd1c21c2b188836d3599e802146182757bb5
|
[
"MIT"
] | null | null | null |
import numbers
from . import meter
import numpy as np
import torch
class AUCMeter(meter.Meter):
"""
The AUCMeter measures the area under the receiver-operating characteristic
(ROC) curve for binary classification problems. The area under the curve (AUC)
can be interpreted as the probability that, given a randomly selected positive
example and a randomly selected negative example, the positive example is
assigned a higher score by the classification model than the negative example.
The AUCMeter is designed to operate on one-dimensional Tensors `output`
and `target`, where (1) the `output` contains model output scores that ought to
be higher when the model is more convinced that the example should be positively
labeled, and smaller when the model believes the example should be negatively
labeled (for instance, the output of a signoid function); and (2) the `target`
contains only values 0 (for negative examples) and 1 (for positive examples).
"""
def __init__(self):
super(AUCMeter, self).__init__()
self.reset()
def reset(self):
self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
self.targets = torch.LongTensor(torch.LongStorage()).numpy()
def add(self, output, target):
if torch.is_tensor(output):
output = output.cpu().squeeze().numpy()
if torch.is_tensor(target):
target = target.cpu().squeeze().numpy()
elif isinstance(target, numbers.Number):
target = np.asarray([target])
assert np.ndim(output) == 1, \
'wrong output size (1D expected)'
assert np.ndim(target) == 1, \
'wrong target size (1D expected)'
assert output.shape[0] == target.shape[0], \
'number of outputs and targets does not match'
assert np.all(np.add(np.equal(target, 1), np.equal(target, 0))), \
'targets should be binary (0, 1)'
self.scores = np.append(self.scores, output)
self.targets = np.append(self.targets, target)
def value(self):
# case when number of elements added are 0
if self.scores.shape[0] == 0:
return 0.5
# sorting the arrays
scores, sortind = torch.sort(torch.from_numpy(self.scores), dim=0, descending=True)
scores = scores.numpy()
sortind = sortind.numpy()
# creating the roc curve
tpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)
fpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)
for i in range(1, scores.size + 1):
if self.targets[sortind[i - 1]] == 1:
tpr[i] = tpr[i - 1] + 1
fpr[i] = fpr[i - 1]
else:
tpr[i] = tpr[i - 1]
fpr[i] = fpr[i - 1] + 1
tpr /= (self.targets.sum() * 1.0)
fpr /= ((self.targets - 1.0).sum() * -1.0)
# calculating area under curve using trapezoidal rule
n = tpr.shape[0]
h = fpr[1:n] - fpr[0:n - 1]
sum_h = np.zeros(fpr.shape)
sum_h[0:n - 1] = h
sum_h[1:n] += h
area = (sum_h * tpr).sum() / 2.0
return (area, tpr, fpr)
| 38.662651
| 91
| 0.606108
| 440
| 3,209
| 4.386364
| 0.318182
| 0.034197
| 0.017098
| 0.015544
| 0.057513
| 0.048705
| 0.038342
| 0.038342
| 0.038342
| 0
| 0
| 0.022097
| 0.280773
| 3,209
| 82
| 92
| 39.134146
| 0.814125
| 0.308507
| 0
| 0
| 0
| 0
| 0.063426
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d60376951a923005fa671870c17c7889bfb96b
| 6,140
|
py
|
Python
|
pf/queue.py
|
PiRAT4/py-pf
|
7ffdd0a283d4a36fc4c473433d5f79a84eeb5d31
|
[
"BSD-3-Clause"
] | null | null | null |
pf/queue.py
|
PiRAT4/py-pf
|
7ffdd0a283d4a36fc4c473433d5f79a84eeb5d31
|
[
"BSD-3-Clause"
] | null | null | null |
pf/queue.py
|
PiRAT4/py-pf
|
7ffdd0a283d4a36fc4c473433d5f79a84eeb5d31
|
[
"BSD-3-Clause"
] | null | null | null |
"""Classes to represent Packet Filter's queueing schedulers and statistics."""
import pf._struct
from pf._base import PFObject
from pf.constants import *
from pf._utils import rate2str
__all__ = ["ServiceCurve",
"FlowQueue",
"PFQueue",
"PFQueueStats"]
class ServiceCurve(PFObject):
""" """
_struct_type = pf._struct.pf_queue_scspec
def __init__(self, bandwidth, burst=0, time=0):
""" """
if isinstance(bandwidth, pf._struct.pf_queue_scspec):
self._from_struct(bandwidth)
else:
self.bandwidth = bandwidth
self.burst = burst
self.time = time
def _from_struct(self, sc):
""" """
self.bandwidth = self._get_bandwidth(sc.m2)
self.burst = self._get_bandwidth(sc.m1)
self.time = sc.d
def _to_struct(self):
""" """
sc = pf._struct.pf_queue_scspec()
if (isinstance(self.bandwidth, basestring) and
self.bandwidth.endswith("%")):
sc.m2.percent = int(self.bandwidth[:-1])
else:
sc.m2.absolute = self.bandwidth
if (isinstance(self.burst, basestring) and
self.burst.endswith("%")):
sc.m1.percent = int(self.burst[:-1])
else:
sc.m1.absolute = self.burst
sc.d = self.time
return sc
def _get_bandwidth(self, bw):
""" """
return "{}%".format(bw.percent) if bw.percent else bw.absolute
def _str_bandwidth(self, bw):
""" """
return bw if isinstance(bw, basestring) else rate2str(bw)
def _to_string(self):
""" """
s = self._str_bandwidth(self.bandwidth)
if self.time:
s += " burst {}".format(self._str_bandwidth(self.burst))
s += " for {.time}ms".format(self)
return s
class FlowQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queue_fqspec
def __init__(self, flows, quantum=0, target=0, interval=0):
""" """
if isinstance(flows, pf._struct.pf_queue_fqspec):
self._from_struct(flows)
else:
self.flows = flows
self.quantum = quantum
self.target = target * 1000000
self.interval = interval * 1000000
def _from_struct(self, fq):
""" """
self.flows = fq.flows
self.quantum = fq.quantum
self.target = fq.target
self.interval = fq.interval
def _to_struct(self):
""" """
fq = pf._struct.pf_queue_fqspec()
fq.flows = self.flows
fq.quantum = self.quantum
fq.target = self.target
fq.interval = self.interval
return fq
def _to_string(self):
""" """
s = "flows {.flows}".format(self)
if self.quantum:
s += " quantum {.quantum}".format(self)
if self.interval:
s += " interval {}ms".format(self.interval / 1000000)
if self.target:
s += " target {}ms".format(self.target / 1000000)
return s
class PFQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queuespec
def __init__(self, queue=None, **kw):
""" """
if isinstance(queue, basestring):
queue = pf._struct.pf_queuespec(qname=queue, qlimit=DEFAULT_QLIMIT)
elif queue is None:
queue = pf._struct.pf_queuespec()
super(PFQueue, self).__init__(queue, **kw)
self.stats = PFQueueStats()
def _from_struct(self, q):
""" """
self.qname = q.qname
self.parent = q.parent
self.ifname = q.ifname
self.flags = q.flags
self.qlimit = q.qlimit
self.qid = q.qid
self.parent_qid = q.parent_qid
self.realtime = ServiceCurve(q.realtime)
self.linkshare = ServiceCurve(q.linkshare)
self.upperlimit = ServiceCurve(q.upperlimit)
self.flowqueue = FlowQueue(q.flowqueue)
def _to_struct(self):
""" """
q = pf._struct.pf_queuespec()
q.qname = self.qname
q.parent = self.parent
q.ifname = self.ifname
q.flags = self.flags
q.qlimit = self.qlimit
q.qid = self.qid
q.parent_qid = self.parent_qid
q.realtime = self.realtime._to_struct()
q.linkshare = self.linkshare._to_struct()
q.upperlimit = self.upperlimit._to_struct()
q.flowqueue = self.flowqueue._to_struct()
return q
def _to_string(self):
""" """
s = "queue {.qname}".format(self)
if self.parent and not self.parent.startswith("_"):
s += " parent {.parent}".format(self)
elif self.ifname:
s += " on {.ifname}".format(self)
if self.flags & PFQS_FLOWQUEUE:
s += " {.flowqueue}".format(self)
if self.linkshare.bandwidth or self.linkshare.burst:
s += " bandwidth {}".format(self.linkshare)
if self.realtime.bandwidth:
s += ", min {}".format(self.realtime)
if self.upperlimit.bandwidth:
s += ", max {}".format(self.upperlimit)
if self.flags & PFQS_DEFAULT:
s += " default"
if self.qlimit:
s += " qlimit {.qlimit}".format(self)
return s
class PFQueueStats(PFObject):
""" """
_struct_type = pf._struct.hfsc_class_stats
def __init__(self, stats=None):
""" """
if stats is None:
stats = pf._struct.hfsc_class_stats()
super(PFQueueStats, self).__init__(stats)
def _from_struct(self, s):
""" """
self.qlength = s.qlength
self.qlimit = s.qlimit
self.packets = (s.xmit_cnt.packets, s.drop_cnt.packets)
self.bytes = (s.xmit_cnt.bytes, s.drop_cnt.bytes)
def _to_string(self):
""" """
s = " [ pkts: {0.packets[0]:10} bytes: {0.bytes[0]:10} " + \
"dropped pkts: {0.packets[1]:6} bytes: {0.bytes[1]:6} ]\n" + \
" [ qlength: {0.qlength:3}/{0.qlimit:3} ]"
return s.format(self)
| 29.95122
| 79
| 0.548534
| 703
| 6,140
| 4.607397
| 0.14367
| 0.046311
| 0.030874
| 0.027786
| 0.143563
| 0.029021
| 0.020377
| 0
| 0
| 0
| 0
| 0.014562
| 0.317752
| 6,140
| 204
| 80
| 30.098039
| 0.758654
| 0.011726
| 0
| 0.09589
| 0
| 0.013699
| 0.065776
| 0.004396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116438
| false
| 0
| 0.027397
| 0
| 0.260274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13d981ac5c1effe03cfb8f11663b3250b5130bd2
| 4,546
|
py
|
Python
|
dumpcode/npzbdt.py
|
gkfthddk/keras
|
46d96c65d69c39df298800336bbb4d867a2561fb
|
[
"MIT"
] | null | null | null |
dumpcode/npzbdt.py
|
gkfthddk/keras
|
46d96c65d69c39df298800336bbb4d867a2561fb
|
[
"MIT"
] | null | null | null |
dumpcode/npzbdt.py
|
gkfthddk/keras
|
46d96c65d69c39df298800336bbb4d867a2561fb
|
[
"MIT"
] | null | null | null |
import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import scipy.stats as sts
import xgboost as xgb
from xiter import *
import pandas as pd
import argparse
from datetime import datetime
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="test_",help='save name')
parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--epochs",type=int,default=10,help='num epochs')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1,help='end ratio')
parser.add_argument("--unscale",type=int,default=0,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
params = {
'max_depth': sts.randint(1,6),
'learning_rate': sts.uniform(0.0010,0.500),
'n_estimators': sts.randint(10,101)
}
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist")
if(args.isz==1):
if(args.etabin==1):
loaded=np.load("zqmixed{}pteta.npz".format(args.pt))
print("zqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("zqmixed{}pt.npz".format(args.pt))
print("zqmixed{}pt.npz".format(args.pt))
elif(args.isz==-1):
if(args.etabin==1):
loaded=np.load("qqmixed{}pteta.npz".format(args.pt))
print("qqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("qqmixed{}pt.npz".format(args.pt))
print("qqmixed{}pt.npz".format(args.pt))
elif(args.isz==0):
if(args.etabin==1):
if(args.unscale==1):
loaded=np.load("unscalemixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("mixed{}pteta.npz".format(args.pt))
print("etabin 1")
else:
if(args.unscale==1):
loaded=np.load("unscalemixed{}pt.npz".format(args.pt))
else:
loaded=np.load("mixed{}pt.npz".format(args.pt))
print("etabin 2.4")
data=loaded["bdtset"][:,:5]
label=loaded["label"]
line=int(30000)
endline=int(40000)
if(len(label)<40000):
line=int(len(label)*3./4.)
endline=len(label)
X=data[0:line]
vx=data[line:endline]
Y=label[0:line]
vy=label[line:endline]
Y=np.array(Y)[:,0]
folds = 3
param_comb = 100
skf = KFold(n_splits=folds, shuffle = True, random_state = 173)
#skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 )
# Here we go
start_time = timer(None) # timing starts from this point for "start_time" variable
random_search.fit(X, Y)
timer(start_time)
#print(random_search.predict(X[:10]))
#print('\n All results:')
#print(random_search.cv_results_)
#print('\n Best estimator:')
#print(random_search.best_estimator_)
print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb))
print(random_search.best_score_ * 2 - 1)
#print('\n Best hyperparameters:')
#print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv('xgb/{}-{}.csv'.format(args.save,args.pt), index=False)
#random_search.best_estimator_.save_model("bdt-{}.dat".format(args.pt))
| 38.525424
| 167
| 0.720853
| 713
| 4,546
| 4.475456
| 0.307153
| 0.042306
| 0.079912
| 0.056409
| 0.339079
| 0.278283
| 0.185522
| 0.164212
| 0.099342
| 0.052648
| 0
| 0.026696
| 0.101848
| 4,546
| 117
| 168
| 38.854701
| 0.754837
| 0.095029
| 0
| 0.104167
| 0
| 0
| 0.212823
| 0.018284
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.125
| 0
| 0.145833
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13dbe7e355868c585eddb2ca7609fa83a2860aed
| 12,384
|
py
|
Python
|
rlenv/StockTradingEnv0.py
|
watchsea/RL-Stock
|
53bd13a1bd1760e082c6db2ad9b010adbc3a767b
|
[
"MIT"
] | null | null | null |
rlenv/StockTradingEnv0.py
|
watchsea/RL-Stock
|
53bd13a1bd1760e082c6db2ad9b010adbc3a767b
|
[
"MIT"
] | null | null | null |
rlenv/StockTradingEnv0.py
|
watchsea/RL-Stock
|
53bd13a1bd1760e082c6db2ad9b010adbc3a767b
|
[
"MIT"
] | null | null | null |
import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_VOLUME = 1000e8
MAX_AMOUNT = 3e10
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
MAX_DAY_CHANGE = 1
INITIAL_ACCOUNT_BALANCE = 10000
DATA_HIS_PERIOD = 5
# position constant
FLAT = 0 # no position
LONG = 1 # buy position
SHORT = 2 # sell position
# action constant
HOLD = 0
BUY = 1
SELL = 2
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,show_trade=True):
super(StockTradingEnv, self).__init__()
# show the trade info
self.show_trade = show_trade
self.actions=["FLAT","LONG","SHORT"]
self.fee = 0.0005 # brokage commission
self.df = df
self.closeprices = self.df['close'].values
self.reward_range = (0, MAX_ACCOUNT_BALANCE)
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Discrete(len(self.actions))
# self.action_space = spaces.Box(
# low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(DATA_HIS_PERIOD+1,6), dtype=np.float16)
self.history = []
def _next_observation(self):
obs = np.array([
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'open'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'low'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'volume'].values / MAX_NUM_SHARES,
])
# Append additional data and scale each value to between 0-1
obs = np.append(obs,[[self.balance / MAX_ACCOUNT_BALANCE,
self.max_net_worth / MAX_ACCOUNT_BALANCE,
self.shares_held / MAX_NUM_SHARES,
self.cost_basis / MAX_SHARE_PRICE,
self.total_shares_sold / MAX_NUM_SHARES,
self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]],axis=0)
return obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# current_price = random.uniform(
# self.df.loc[self.current_step, "open"], self.df.loc[self.current_step, "close"])
# Set the current price to the last close price
self.close_price = self.df.loc[self.current_step,"close"]
amount = 0.5 #the old version has this variable, so reserve
# action comes from the agent
# 1 buy, 2 sell, 0 hold
# single position can be opened per trade
# valid action sequence would be
# LONG : buy - hold - hold - sell
# SHORT : sell - hold - hold - buy
# invalid action sequence is just considered hold
# (e.g.) "buy - buy" would be considred "buy - hold"
self.action = HOLD #hold
if action == BUY: #buy
if self.position == FLAT: # if previous position was flat
self.position = LONG #update position to long
self.action = BUY # record action as buy
self.entry_price = self.close_price
# Buy amount % of balance in shares
total_possible = int(self.balance / self.close_price)
shares_bought = int(total_possible * amount)//100 *100
self.krw_balance = shares_bought * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.shares_held = shares_bought
self.balance -= self.krw_balance-commission
#self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought)
elif self.position == SHORT: # if previous position was short
self.position = FLAT # update position to flat
self.action = BUY # record action as buy
self.exit_price = self.close_price
self.reward += ((self.entry_price - self.exit_price) / self.exit_price + 1) * (
1 - self.fee) ** 2 - 1 # calculate reward
#self.krw_balance = self.krw_balance * (1.0 + self.reward) # evaluate cumulative return in krw-won
self.balance += round(self.krw_balance * (1.0 + self.reward),2) # calcuate the total balance
self.n_short += 1 # record number of short
self.total_shares_sold += self.shares_held
self.total_sales_value += self.shares_held * self.close_price
self.entry_price = 0 # clear entry price
self.shares_held = 0 # clear the shares_
elif action == SELL:
if self.position == FLAT:
self.position = SHORT
self.action = SELL
self.entry_price = self.close_price
# Sell amount % of shares held
total_possible = int(self.balance / self.close_price)
self.shares_held = int(total_possible * amount)//100 *100
self.krw_balance = self.shares_held * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.balance -= self.krw_balance-commission
elif self.position == LONG:
self.position = FLAT
self.action = SELL
self.exit_price = self.close_price
self.reward += ((self.exit_price - self.entry_price) / self.entry_price + 1) * (1 - self.fee) ** 2 - 1
#self.krw_balance = self.krw_balance * (1.0 + self.reward)
self.balance += round(self.krw_balance*(1.0+self.reward),2)
self.n_long += 1
self.total_shares_buy += self.shares_held
self.total_buys_value += self.shares_held * self.close_price
self.shares_held = 0
self.entry_price = 0
# [coin + krw_won] total value evaluated in krw won
if (self.position == LONG):
temp_reward = ((self.close_price - self.entry_price) / self.entry_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
elif (self.position == SHORT):
temp_reward = ((self.entry_price - self.close_price) / self.close_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
else:
temp_reward = 0
new_portfolio = 0
self.net_worth = self.balance + new_portfolio
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.shares_held == 0:
self.cost_basis = 0
self.portfolio = round(new_portfolio,2)
def step(self, action):
# Execute one time step within the environment
self._take_action(action)
done = False
self.current_step += 1
delay_modifier = (self.current_step / MAX_STEPS)
# profits
#reward = self.net_worth - INITIAL_ACCOUNT_BALANCE
#reward = 1 if reward > 0 else -100
if self.net_worth <= 0:
done = True
if self.current_step > len(self.df.loc[:, 'open'].values) - 1:
self.current_step = DATA_HIS_PERIOD # loop training
# when loop training, then clear the history
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward = 0
self.portfolio = 0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy = 0
self.total_buys_value = 0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long = 0
self.n_short = 0
self.history=[]
# done = True
if (self.show_trade and self.current_step % 1 == 0):
print("Tick: {0}/ Portfolio (krw-won): {1}, balance: {2}".format(self.current_step, self.portfolio,self.net_worth))
print("Long: {0}/ Short: {1}".format(self.n_long, self.n_short))
# save the history data
self.history.append([
self.action,
self.position,
self.current_step,
self.close_price,
self.krw_balance,
self.balance,
self.max_net_worth,
self.shares_held,
self.portfolio,
self.total_shares_buy,
self.total_buys_value,
self.total_shares_sold,
self.total_sales_value])
#self.history.append((self.action, self.current_step, self.closingPrice, self.portfolio, self.reward))
obs = self._next_observation()
if (self.current_step > (self.df.shape[0]) - 1):
self.done = True
self.reward = self.get_profit() # return reward at end of the game
return obs, self.net_worth, done, {'portfolio': np.array([self.portfolio]),
"history": self.history,
"n_trades": {'long': self.n_long, 'short': self.n_short}}
#return obs, reward, done, {}
def get_profit(self):
if(self.position == LONG):
profit = ((self.close_Price - self.entry_price)/self.entry_price + 1)*(1-self.fee)**2 - 1
elif(self.position == SHORT):
profit = ((self.entry_price - self.close_Price)/self.close_Price + 1)*(1-self.fee)**2 - 1
else:
profit = 0
return profit
def reset(self, new_df=None):
# Reset the state of the environment to an initial state
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward =0
self.portfolio =0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy =0
self.total_buys_value=0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long=0
self.n_short=0
self.history=[]
# pass test dataset to environment
if new_df:
self.df = new_df
# Set the current step to a random point within the data frame
# self.current_step = random.randint(
# 0, len(self.df.loc[:, 'open'].values) - 6)
# the observation include the given period history data
self.current_step = DATA_HIS_PERIOD #random.randint(DATA_HIS_PERIOD,len(self.df.loc[:,'open'].values)-1)
# for i in range(DATA_HIS_PERIOD):
# self.history.append([0.0,0.0,0.0,0.0,0.0,0.0])
return self._next_observation()
def render(self, mode='human', close=False):
# Render the environment to the screen
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
print('-'*30)
print(f'Step: {self.current_step}')
print(f'Balance: {self.balance}')
print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})')
print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
print(f'Profit: {profit}')
return profit
| 41.69697
| 127
| 0.581476
| 1,571
| 12,384
| 4.385742
| 0.144494
| 0.039187
| 0.054427
| 0.028737
| 0.437881
| 0.38389
| 0.324238
| 0.296226
| 0.264296
| 0.235414
| 0
| 0.02345
| 0.321625
| 12,384
| 296
| 128
| 41.837838
| 0.796691
| 0.202196
| 0
| 0.368932
| 0
| 0.004854
| 0.047221
| 0.007241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033981
| false
| 0
| 0.029126
| 0
| 0.097087
| 0.043689
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13dd331f73864377a1d0952e862e552f50ab90bf
| 2,053
|
py
|
Python
|
processing/manager.py
|
mrfleap/us-population-heatmap
|
e3f1c5d8294716ff491c7b8b40adb77929f9aeee
|
[
"MIT"
] | null | null | null |
processing/manager.py
|
mrfleap/us-population-heatmap
|
e3f1c5d8294716ff491c7b8b40adb77929f9aeee
|
[
"MIT"
] | null | null | null |
processing/manager.py
|
mrfleap/us-population-heatmap
|
e3f1c5d8294716ff491c7b8b40adb77929f9aeee
|
[
"MIT"
] | null | null | null |
import json
import os
import pathlib
import time
from tqdm import tqdm
from aggregator import aggregate
from download import DOWNLOAD_PATH, download_files, unzip_files
from tqdm.contrib.concurrent import process_map
def main():
start = time.time()
# print("Downloading files...")
# download_files()
# print("Unzipping shapefiles...")
# unzip_files()
state_ids = []
for file in os.listdir(DOWNLOAD_PATH):
file_path = os.path.join(DOWNLOAD_PATH, file)
if os.path.isfile(file_path) and pathlib.Path(file_path).suffix == ".txt":
state_ids.append(file[file.index("BG") + 2 : file.index(".")])
# print("Computing population JSON heatmaps...")
# compute_json_heatmaps(state_ids)
print("Aggregating JSON files into one...")
aggegrate_json_files(state_ids)
end = time.time()
print(f"Done in {(end - start):0.2f}s")
def compute_json_heatmaps(state_ids):
data_files = []
for state_id in state_ids:
data_files.append(
(
state_id,
os.path.join(DOWNLOAD_PATH, f"CenPop2020_Mean_BG{state_id}.txt"),
os.path.join(DOWNLOAD_PATH, f"tl_2020_{state_id}_bg", f"tl_2020_{state_id}_bg.shp"),
)
)
process_map(create_json_for_state, data_files, max_workers=4)
def aggegrate_json_files(state_ids):
with open("public/data/pop.json", "w") as f:
f.write("""{"type": "FeatureCollection", "features": [""")
# state_ids = state_ids[:2]
features = []
for state_id in tqdm(state_ids):
geojson = None
with open(os.path.join(DOWNLOAD_PATH, f"{state_id}.json")) as f:
geojson = json.load(f)
with open("public/data/pop.json", "a") as f:
f.write(json.dumps(geojson["features"])[1:-1] + ("," if state_id != state_ids[-1] else ""))
with open("public/data/pop.json", "a") as f:
f.write("]}")
def create_json_for_state(args):
return aggregate(*args, hide_output=True)
if __name__ == "__main__":
main()
| 27.013158
| 103
| 0.632245
| 283
| 2,053
| 4.342756
| 0.310954
| 0.071603
| 0.032547
| 0.058584
| 0.262815
| 0.158666
| 0.056957
| 0.056957
| 0.056957
| 0.056957
| 0
| 0.012579
| 0.225524
| 2,053
| 75
| 104
| 27.373333
| 0.760377
| 0.096931
| 0
| 0.043478
| 0
| 0
| 0.156013
| 0.042254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.173913
| 0.021739
| 0.282609
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ddb1c02b1ed46330b061450969494a2a48af52
| 794
|
py
|
Python
|
gen_data.py
|
kshoji6011/vehicleai
|
135de71cce65f4a61b42c49493ed356f2d512d6c
|
[
"MIT"
] | null | null | null |
gen_data.py
|
kshoji6011/vehicleai
|
135de71cce65f4a61b42c49493ed356f2d512d6c
|
[
"MIT"
] | null | null | null |
gen_data.py
|
kshoji6011/vehicleai
|
135de71cce65f4a61b42c49493ed356f2d512d6c
|
[
"MIT"
] | null | null | null |
from PIL import Image
import os, glob
import numpy as np
from sklearn import model_selection
classes = ["car", "bycycle", "motorcycle", "pedestrian"]
num_class = len(classes)
image_size = 50
# 画像の読み込み
X = []
Y = []
for index, classlabel in enumerate(classes):
photos_dir = "./" + classlabel
files = glob.glob(photos_dir + "/*.jpg")
for i, file in enumerate(files):
if i >=237: break
image = Image.open(file)
image = image.convert("RGB")
image = image.resize((image_size, image_size))
data = np.asarray(image) / 255
X.append(data)
Y.append(index)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y)
xy = (X_train, X_test, y_train, y_test)
np.save("./vehicle.npy", xy)
| 24.8125
| 73
| 0.644836
| 119
| 794
| 4.151261
| 0.462185
| 0.054656
| 0.02834
| 0.044534
| 0.089069
| 0.089069
| 0.089069
| 0.089069
| 0
| 0
| 0
| 0.012821
| 0.214106
| 794
| 32
| 74
| 24.8125
| 0.778846
| 0.008816
| 0
| 0
| 0
| 0
| 0.068702
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13de3e4f691cb4b36cd9750b30e5106c02f72fb9
| 724
|
py
|
Python
|
app/main.py
|
immortel32/Sword_Sorcery_Story_Generator
|
7978dfc335813362b2d94c455b970f58421123c8
|
[
"MIT"
] | 2
|
2021-04-01T00:50:22.000Z
|
2021-04-01T02:18:45.000Z
|
app/main.py
|
immortel32/Sword_Sorcery_Story_Generator
|
7978dfc335813362b2d94c455b970f58421123c8
|
[
"MIT"
] | 1
|
2021-04-01T21:39:44.000Z
|
2021-04-01T21:39:44.000Z
|
app/main.py
|
immortel32/Sword_Sorcery_Story_Generator
|
7978dfc335813362b2d94c455b970f58421123c8
|
[
"MIT"
] | 1
|
2021-04-01T01:03:33.000Z
|
2021-04-01T01:03:33.000Z
|
from services import waypoint_scenarios, quest_scenarios
from services.build_campaign import Campaign
from log_setup import log
if __name__ == "__main__":
number_waypoint_scenario = waypoint_scenarios.get_number_of_waypoint_scenarios()
log.info(f"We have {number_waypoint_scenario} waypoint available")
number_quests_available = quest_scenarios.get_number_of_quest_scenarios()
log.info(f"We have {number_quests_available} quests available")
random_waypoint_scenario = waypoint_scenarios.get_random_scenario(10)
random_quest = quest_scenarios.get_random_scenario(1)
campaign = Campaign()
campaign.build_campaign(
waypoint_list=random_waypoint_scenario, quest_list=random_quest
)
| 42.588235
| 84
| 0.809392
| 92
| 724
| 5.880435
| 0.293478
| 0.125693
| 0.133087
| 0.110906
| 0.240296
| 0.107209
| 0.107209
| 0
| 0
| 0
| 0
| 0.004747
| 0.127072
| 724
| 16
| 85
| 45.25
| 0.851266
| 0
| 0
| 0
| 0
| 0
| 0.153315
| 0.070442
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13defa0932ab990d6739afd72d573b29bcd8a6e3
| 2,473
|
py
|
Python
|
distill.py
|
Lukeming-tsinghua/Interpretable-NN-for-IBD-diagnosis
|
5fb0fae774e010cdd6b63ff487a4528f0397647d
|
[
"MIT"
] | null | null | null |
distill.py
|
Lukeming-tsinghua/Interpretable-NN-for-IBD-diagnosis
|
5fb0fae774e010cdd6b63ff487a4528f0397647d
|
[
"MIT"
] | null | null | null |
distill.py
|
Lukeming-tsinghua/Interpretable-NN-for-IBD-diagnosis
|
5fb0fae774e010cdd6b63ff487a4528f0397647d
|
[
"MIT"
] | null | null | null |
import os
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report
from torch.optim import Adam
from tqdm import tqdm
from data import DataIteratorDistill
from loss import FocalLoss
from model import CNN
from torchtext import data, vocab
from args import get_args, print_args
from config import ConfigBinaryClassification
from config import ConfigBinaryClassificationDistill
from config import ConfigTripleClassification
if __name__ == "__main__":
args = get_args()
print_args(args)
if args.class_num == 2:
cfg = ConfigBinaryClassificationDistill()
elif args.class_num == 3:
cfg = ConfigTripleClassification()
else:
raise ValueError("wrong class num")
device = torch.device("cuda:%d" % args.cuda)
Data = DataIteratorDistill(config=cfg, train_batchsize=args.batch_size)
model = torch.load("checkpoints/CNN-29", map_location=device)
optimizer = Adam(model.parameters(), lr=args.lr)
criterion = FocalLoss(classes=args.class_num, device=device).to(device)
criterion_kv = nn.KLDivLoss().to(device)
alpha = 0.2
T = 2
for epoch in range(args.epoch_num):
print(epoch)
for sample in Data.train_iter:
model.train()
optimizer.zero_grad()
output = model(sample.text.permute(1, 0).to(device))
loss_f = criterion(output, sample.label.to(device))
output = F.log_softmax(output/T, 1)
score = torch.cat((sample.pred0.unsqueeze(1).to(device),
sample.pred1.unsqueeze(1).to(device)), dim=1)
score = F.softmax(score/T,1)
loss_kv = criterion_kv(output, score.to(device)) * T * T
loss = alpha * loss_f + (1 - alpha) * loss_kv
#print(loss_f.item(), loss_kv.item())
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
preds = []
labels = []
for sample in Data.valid_iter:
output = model(sample.text.permute(1, 0).to(device))
p = output.argmax(1).cpu().tolist()
l = sample.label.tolist()
preds += p
labels += l
report = classification_report(preds, labels)
print(report)
torch.save(model, os.path.join(args.save_dir, args.save_config + str(epoch)))
| 34.347222
| 89
| 0.632835
| 306
| 2,473
| 4.996732
| 0.352941
| 0.041857
| 0.031393
| 0.020929
| 0.049706
| 0.049706
| 0.049706
| 0.049706
| 0.049706
| 0
| 0
| 0.010977
| 0.263243
| 2,473
| 71
| 90
| 34.830986
| 0.828211
| 0.014557
| 0
| 0.032787
| 0
| 0
| 0.019704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.262295
| 0
| 0.262295
| 0.065574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13df4c0e986f7c76ecd11c6a6721e985d305104d
| 7,597
|
py
|
Python
|
tests/TALTests/HTMLTests/TALAttributesTestCases.py
|
janbrohl/SimpleTAL
|
f5a3ddd9a74cf9af7356bb431513e3534717802d
|
[
"BSD-3-Clause"
] | 5
|
2015-11-20T12:17:04.000Z
|
2021-03-19T13:49:33.000Z
|
tests/TALTests/HTMLTests/TALAttributesTestCases.py
|
mar10/SimpleTAL
|
f5a3ddd9a74cf9af7356bb431513e3534717802d
|
[
"BSD-3-Clause"
] | 5
|
2015-09-20T12:55:23.000Z
|
2018-05-12T10:34:20.000Z
|
tests/TALTests/HTMLTests/TALAttributesTestCases.py
|
mar10/SimpleTAL
|
f5a3ddd9a74cf9af7356bb431513e3534717802d
|
[
"BSD-3-Clause"
] | 1
|
2022-01-24T13:37:38.000Z
|
2022-01-24T13:37:38.000Z
|
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2016, Jan Brohl <janbrohl@t-online.de>
# All rights reserved.
# See LICENSE.txt
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# If you make any bug fixes or feature enhancements please let me know!
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
class TALAttributesTestCases(unittest.TestCase):
def setUp(self):
self.context = simpleTALES.Context()
self.context.addGlobal('test', 'testing')
self.context.addGlobal('link', 'www.owlfish.com')
self.context.addGlobal('needsQuoting', """Does "this" work?""")
self.context.addGlobal('number', 5)
self.context.addGlobal('uniQuote', 'Does "this" work?')
self.context.addGlobal('anotherdefault', {
'inhere': simpleTALES.DEFAULTVALUE
})
def _runTest_(self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testAddingAnAttribute(self):
self._runTest_(
'<html tal:attributes="link link" href="owlfish.com">Hello</html>',
'<html link="www.owlfish.com" href="owlfish.com">Hello</html>',
"Addition of attribute 'link' failed.")
def testRemovingAnAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href nothing" href="owlfish.com">Hello</html>',
'<html class="test">Hello</html>',
"Removal of attribute 'href' failed.")
def testDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href default" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testAnotherDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href anotherdefault/inhere" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testMultipleAttributes(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default;class nothing;new test" href="owlfish.com">Hello</html>',
'<html new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once failed.")
def testMultipleAttributesSpace(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default ; class string:Hello there; new test" href="owlfish.com">Hello</html>',
'<html class="Hello there" new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testMultipleAttributesEscaped(self):
self._runTest_(
'<html old="still " here" class="test" tal:attributes="href default ; class string: Semi-colon;;test;new test " href="owlfish.com">Hello</html>',
'''<html class="Semi-colon;test" new="testing" old='still " here' href="owlfish.com">Hello</html>''',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href needsQuoting">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href number">Hello</html>',
"""<html href="5" existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href uniQuote">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testOriginalAttributes(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt='"Testing"' existingatt='"Testing"'>"Testing"</html>""",
"Accessing existing attributes failed.")
def testMultipleOriginalAttributes(self):
self._runTest_(
'<html one="Value One" two="Value two" three="Value three" tal:attributes="four attrs/three" tal:content="attrs/one">Hello</html>',
"""<html four="Value three" one="Value One" two="Value two" three="Value three">Value One</html>""",
"Accessing multiple existing attributes failed.")
def testAmpersandEscapeInAttributes(self):
self._runTest_(
'<html existingAtt="&Testing&" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt="&Testing&" existingatt="&Testing&">&Testing&</html>""",
"Accessing existing attributes failed.")
#~ def testAttributeCase (self):
#~ self._runTest_ ('<html HREF="Testing" tal:attributes="HREF test">Hello</html>'
#~ ,"""<html href="testing">Hello</html>"""
#~ ,"HTML Attributes not treated as case insensitive.")
if __name__ == '__main__':
unittest.main()
| 46.042424
| 162
| 0.65618
| 881
| 7,597
| 5.609535
| 0.30193
| 0.045528
| 0.039458
| 0.053824
| 0.441319
| 0.416026
| 0.374545
| 0.374545
| 0.36038
| 0.29138
| 0
| 0.003002
| 0.210741
| 7,597
| 164
| 163
| 46.323171
| 0.821214
| 0.252863
| 0
| 0.26
| 0
| 0.15
| 0.488528
| 0.162184
| 0
| 0
| 0
| 0
| 0.01
| 1
| 0.15
| false
| 0.01
| 0.07
| 0
| 0.23
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13dfb252487b62555c998999e6abd56dcb22612c
| 562
|
py
|
Python
|
iseq_prof/fasta.py
|
EBI-Metagenomics/iseq-prof
|
ca41a0f3aa1e70e59648bdc08b36da1ec76220ad
|
[
"MIT"
] | null | null | null |
iseq_prof/fasta.py
|
EBI-Metagenomics/iseq-prof
|
ca41a0f3aa1e70e59648bdc08b36da1ec76220ad
|
[
"MIT"
] | null | null | null |
iseq_prof/fasta.py
|
EBI-Metagenomics/iseq-prof
|
ca41a0f3aa1e70e59648bdc08b36da1ec76220ad
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import List
from fasta_reader import FASTAItem, FASTAWriter, read_fasta
__all__ = ["downsample"]
def downsample(infile: Path, outfile: Path, size: int, random):
targets: List[FASTAItem] = list(read_fasta(infile))
if size > len(targets):
raise ValueError("Size is greater than the number of targets.")
targets = random.choice(targets, size, replace=False).tolist()
with FASTAWriter(outfile) as writer:
for target in targets:
writer.write_item(target.defline, target.sequence)
| 29.578947
| 71
| 0.715302
| 72
| 562
| 5.472222
| 0.611111
| 0.045685
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190391
| 562
| 18
| 72
| 31.222222
| 0.865934
| 0
| 0
| 0
| 0
| 0
| 0.094306
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e04df35258103610f99481901f9649956a3c76
| 209
|
py
|
Python
|
src/data_settings.py
|
DhruvSrikanth/TSLA-React
|
2ce4edb6b21ec1a301047124cfda5bb30deb3a90
|
[
"MIT"
] | null | null | null |
src/data_settings.py
|
DhruvSrikanth/TSLA-React
|
2ce4edb6b21ec1a301047124cfda5bb30deb3a90
|
[
"MIT"
] | null | null | null |
src/data_settings.py
|
DhruvSrikanth/TSLA-React
|
2ce4edb6b21ec1a301047124cfda5bb30deb3a90
|
[
"MIT"
] | null | null | null |
# API keys
# YF_API_KEY = "YRVHVLiFAt3ANYZf00BXr2LHNfZcgKzdWVmsZ9Xi" # yahoo finance api key
TICKER = "TSLA"
INTERVAL = "1m"
PERIOD = "1d"
LOOK_BACK = 30 # hard limit to not reach rate limit of 100 per day
| 20.9
| 81
| 0.732057
| 30
| 209
| 5
| 0.866667
| 0.08
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070588
| 0.186603
| 209
| 10
| 82
| 20.9
| 0.811765
| 0.655502
| 0
| 0
| 0
| 0
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e0ca25df4bf90f8ba82de1f47aa08f14078d33
| 5,304
|
py
|
Python
|
numba/roc/tests/hsapy/test_gufuncbuilding.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
numba/roc/tests/hsapy/test_gufuncbuilding.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 108
|
2020-08-17T22:38:26.000Z
|
2021-12-06T09:44:14.000Z
|
numba/roc/tests/hsapy/test_gufuncbuilding.py
|
luk-f-a/numba
|
3a682bd827e416335e3574bc7b10f0ec69adb701
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import numpy as np
from numba.roc.vectorizers import HsaGUFuncVectorize
from numba.roc.dispatch import HSAGenerializedUFunc
from numba import guvectorize
import unittest
def ufunc_add_core(a, b, c):
for i in range(c.size):
c[i] = a[i] + b[i]
class TestGUFuncBuilding(unittest.TestCase):
def test_gufunc_building(self):
ufbldr = HsaGUFuncVectorize(ufunc_add_core, "(x),(x)->(x)")
ufbldr.add("(float32[:], float32[:], float32[:])")
ufbldr.add("(intp[:], intp[:], intp[:])")
ufunc = ufbldr.build_ufunc()
self.assertIsInstance(ufunc, HSAGenerializedUFunc)
# Test integer version
A = np.arange(100, dtype=np.intp)
B = np.arange(100, dtype=np.intp) + 1
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test integer version with 2D inputs
A = A.reshape(50, 2)
B = B.reshape(50, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test integer version with 3D inputs
A = A.reshape(5, 10, 2)
B = B.reshape(5, 10, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test real version
A = np.arange(100, dtype=np.float32)
B = np.arange(100, dtype=np.float32) + 1
expected = A + B
got = ufunc(A, B)
np.testing.assert_allclose(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.float32), got.dtype)
# Test real version with 2D inputs
A = A.reshape(50, 2)
B = B.reshape(50, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_allclose(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.float32), got.dtype)
def test_gufunc_building_scalar_output(self):
def sum_row(inp, out):
tmp = 0.
for i in range(inp.shape[0]):
tmp += inp[i]
out[0] = tmp
ufbldr = HsaGUFuncVectorize(sum_row, "(n)->()")
ufbldr.add("void(int32[:], int32[:])")
ufunc = ufbldr.build_ufunc()
inp = np.arange(300, dtype=np.int32).reshape(100, 3)
out = ufunc(inp)
for i in range(inp.shape[0]):
np.testing.assert_equal(inp[i].sum(), out[i])
def test_gufunc_scalar_input_saxpy(self):
def axpy(a, x, y, out):
for i in range(out.shape[0]):
out[i] = a * x[i] + y[i]
ufbldr = HsaGUFuncVectorize(axpy, '(),(t),(t)->(t)')
ufbldr.add("void(float32, float32[:], float32[:], float32[:])")
saxpy = ufbldr.build_ufunc()
A = np.float32(2)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i])
X = np.arange(10, dtype=np.float32)
Y = np.arange(10, dtype=np.float32)
out = saxpy(A, X, Y)
for j in range(10):
exp = A * X[j] + Y[j]
self.assertTrue(exp == out[j], (exp, out[j]))
A = np.arange(5, dtype=np.float32)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A[j] * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i], (exp, out[j, i]))
class TestGUFuncDecor(unittest.TestCase):
def test_gufunc_decorator(self):
@guvectorize(["void(float32, float32[:], float32[:], float32[:])"],
'(),(t),(t)->(t)', target='roc')
def saxpy(a, x, y, out):
for i in range(out.shape[0]):
out[i] = a * x[i] + y[i]
A = np.float32(2)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i])
X = np.arange(10, dtype=np.float32)
Y = np.arange(10, dtype=np.float32)
out = saxpy(A, X, Y)
for j in range(10):
exp = A * X[j] + Y[j]
self.assertTrue(exp == out[j], (exp, out[j]))
A = np.arange(5, dtype=np.float32)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A[j] * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i], (exp, out[j, i]))
if __name__ == '__main__':
unittest.main()
| 31.951807
| 75
| 0.532805
| 760
| 5,304
| 3.672368
| 0.117105
| 0.060193
| 0.09029
| 0.064493
| 0.701182
| 0.653171
| 0.631673
| 0.59871
| 0.59871
| 0.59871
| 0
| 0.047554
| 0.310143
| 5,304
| 165
| 76
| 32.145455
| 0.715223
| 0.026961
| 0
| 0.658537
| 0
| 0
| 0.047527
| 0
| 0
| 0
| 0
| 0
| 0.186992
| 1
| 0.065041
| false
| 0
| 0.04065
| 0
| 0.121951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e1722808236b6aeebbdaf4b408b6e5d0b9cadb
| 738
|
py
|
Python
|
control-flow/solution/file_hosts.py
|
giserh/book-python
|
ebd4e70cea1dd56986aa8efbae3629ba3f1ba087
|
[
"MIT"
] | 1
|
2019-01-02T15:04:08.000Z
|
2019-01-02T15:04:08.000Z
|
control-flow/solution/file_hosts.py
|
giserh/book-python
|
ebd4e70cea1dd56986aa8efbae3629ba3f1ba087
|
[
"MIT"
] | null | null | null |
control-flow/solution/file_hosts.py
|
giserh/book-python
|
ebd4e70cea1dd56986aa8efbae3629ba3f1ba087
|
[
"MIT"
] | null | null | null |
FILE = r'../src/etc-hosts.txt'
hostnames = []
try:
with open(FILE, encoding='utf-8') as file:
content = file.readlines()
except FileNotFoundError:
print('File does not exist')
except PermissionError:
print('Permission denied')
for line in content:
if line.startswith('#'):
continue
if line.isspace():
continue
line = line.strip().split()
ip = line[0]
hosts = line[1:]
for record in hostnames:
if record['ip'] == ip:
record['hostnames'].update(hosts)
break
else:
hostnames.append({
'hostnames': set(hosts),
'protocol': 'IPv4' if '.' in ip else 'IPv6',
'ip': ip,
})
print(hostnames)
| 19.421053
| 56
| 0.550136
| 83
| 738
| 4.891566
| 0.554217
| 0.029557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009785
| 0.307588
| 738
| 37
| 57
| 19.945946
| 0.784736
| 0
| 0
| 0.071429
| 0
| 0
| 0.136856
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e274ea10a3039f67d424845564d23d8affb74d
| 2,875
|
py
|
Python
|
algo/test/test_maximum_cut.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 24
|
2016-03-21T07:53:54.000Z
|
2020-06-29T12:16:36.000Z
|
algo/test/test_maximum_cut.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 5
|
2015-09-29T17:12:36.000Z
|
2020-03-26T20:51:56.000Z
|
algo/test/test_maximum_cut.py
|
ssavinash1/Algorithm_stanford
|
f2588b6bcac2b0858e78b819e6e8402109e80ee2
|
[
"MIT"
] | 12
|
2016-05-24T16:48:32.000Z
|
2020-10-02T12:22:09.000Z
|
# -*- coding: utf-8 -*-
import unittest
from src.graph import Graph
from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph
class MaximumCut(unittest.TestCase):
def test_maximum_cut_for_bipartite_graphs(self):
""" Given the following bipartite graph.
(a)-----(b)
\
\----(c)
(d)-----(e)
/
(f)----/
\
\----(g)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'c'),
('d', 'e'), ('f', 'e'), ('f', 'g')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(len(left), [3,4], 'either 3 or 4')
self.assertIn(len(right), [3,4], 'eighter 3 or 4')
self.assertEqual(7, len(left)+len(right), 'no vertex counted twice')
def test_maximum_cut_for_larger_bipartite_graphs(self):
""" A sligthly larger graph:
(a) (c)
| \ /|
| x |
| / \ |
(b) (d)
| \ /|
| x |
| / \ |
(e) (f)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'd'), ('c', 'b'), ('c', 'd'),
('b', 'e'), ('b', 'f'), ('d', 'e'), ('d', 'f')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(set(left), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertIn(set(right), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertNotEqual(left, right, 'not the same subsets')
def test_maximum_cut(self):
""" Given a graph:
(u)----(v)
| \ / |
| \/ |
| /\ |
| / \ |
(w)---(x)
"""
g = Graph.build(edges=[
('u', 'v'), ('u', 'w'), ('u', 'x'), ('v', 'x'),('w', 'x')],
directed=False)
(left, right) = maximum_cut(g)
expected = [{'u', 'v'}, {'w', 'x'}, {'x', 'u'}, {'w', 'v'}]
self.assertNotEqual(left, right, 'no common vertices between cuts')
self.assertIn(set(left), expected, 'should correctly split the graph')
self.assertIn(set(right), expected, 'should correctly split the graph')
def test_weighted_maximum_cut(self):
""" Given the following weighted graph.
(u)-3-(v)
| \ / |
| 5\/1 4
2 /\ |
| / \ |
(w)-6-(x)
"""
g = Graph.build(edges=[
('u', 'v', 3), ('u', 'w', 2), ('u', 'x', 5),
('v', 'x', 4),('w', 'x', 6)],
directed=False)
(left, right) = maximum_cut(g)
self.assertEqual(2, len(left), 'left should contain 2 vertices')
self.assertEqual(2, len(right), 'right should contain 2 vertices')
| 33.823529
| 79
| 0.420522
| 322
| 2,875
| 3.65528
| 0.226708
| 0.093458
| 0.055225
| 0.074766
| 0.371283
| 0.314359
| 0.253186
| 0.13254
| 0.13254
| 0.105353
| 0
| 0.013684
| 0.364522
| 2,875
| 84
| 80
| 34.22619
| 0.630542
| 0.151304
| 0
| 0.27027
| 0
| 0
| 0.138389
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 1
| 0.108108
| false
| 0
| 0.081081
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e34fac65209ea50100d76fe6090282f3d8e3b4
| 5,788
|
py
|
Python
|
gdb/print-avs-rbtree.py
|
kemonats/avs_commons
|
ecce4edf5376d132e3686af227c9adf22ce1090e
|
[
"Apache-2.0"
] | 4
|
2016-11-04T12:55:32.000Z
|
2019-03-21T15:07:58.000Z
|
gdb/print-avs-rbtree.py
|
kemonats/avs_commons
|
ecce4edf5376d132e3686af227c9adf22ce1090e
|
[
"Apache-2.0"
] | 5
|
2015-02-11T09:34:36.000Z
|
2021-04-19T08:51:50.000Z
|
gdb/print-avs-rbtree.py
|
kemonats/avs_commons
|
ecce4edf5376d132e3686af227c9adf22ce1090e
|
[
"Apache-2.0"
] | 17
|
2015-12-17T10:32:09.000Z
|
2022-02-14T10:58:39.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2021 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# installation: append "source PATH_TO_THIS_SCRIPT" to ~/.gdbinit
import gdb
class PrintAvsRbtreeBase(gdb.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.intptr_type = gdb.lookup_type('unsigned long long')
self.int_type = gdb.lookup_type('int')
self.output_format = '%%s 0x%%0%dx = %%s' % (self.intptr_type.sizeof * 2,)
# TODO
self.color_offset = -32
self.parent_offset = -24
self.left_offset = -16
self.right_offset = -8
def _print_tree(self, ptr, path='', depth=0, visited_addrs=set()):
left_ptr_value = ptr.cast(self.intptr_type) + self.left_offset
left_ptr = left_ptr_value.cast(ptr.type.pointer()).dereference()
right_ptr_value = ptr.cast(self.intptr_type) + self.right_offset
right_ptr = right_ptr_value.cast(ptr.type.pointer()).dereference()
prefix = ''.join(' |' if x == 'L' else ' ' for x in path)
if path:
if path[-1] == 'L':
prefix += '- '
elif path[-1] == 'R':
prefix = prefix[:-1] + "'- "
print(prefix + self.output_format % (path[-1] if path else ' ', int(ptr), str(ptr.dereference())))
if int(left_ptr) in visited_addrs or int(right_ptr) in visited_addrs:
print('circular tree detected, stopping')
return
visited_addrs.add(left_ptr)
visited_addrs.add(right_ptr)
if int(left_ptr) != 0:
self._print_tree(left_ptr, path + 'L', depth+1, visited_addrs)
if int(right_ptr) != 0:
self._print_tree(right_ptr, path + 'R', depth+1, visited_addrs)
class PrintAvsRbtreeSubtree(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree-subtree',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) != 1:
print('usage: print-avs-rbtree-subtree expr\n'
' expr - an expression that avaluates to a valid AVS_RBTREE_NODE pointer\n')
return
expr = args[0]
val = gdb.parse_and_eval(expr)
if val is None:
print('cannot evaluate expression: ' + expr)
return
if val == 0:
print('(null)')
else:
self._print_tree(val)
class PrintAvsRbtree(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) != 1:
print('usage: print-avs-rbtree expr\n'
' expr - an expression that avaluates to a valid AVS_RBTREE pointer\n')
return
expr = args[0]
val = gdb.parse_and_eval('*(' + expr + ')')
if val is None:
print('cannot evaluate expression: ' + expr)
return
if val == 0:
print('(null)')
else:
self._print_tree(val)
class PrintAvsRbtreeNode(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree-node',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) not in (1, 2):
print('usage: print-avs-rbtree expr [with_magic]\n'
' expr - an expression that avaluates to a valid AVS_RBTREE_NODE pointer\n'
' with_magic - if present, "magic" fields are displayed\n')
return
expr = args[0]
with_magic = len(args) > 1
ptr = gdb.parse_and_eval(expr)
if ptr is None:
print('cannot evaluate expression: ' + expr)
return
if ptr == 0:
print('(null)')
else:
intptr_ptr = ptr.cast(self.intptr_type)
if with_magic:
print((intptr_ptr + self.rb_magic_offset))
print((intptr_ptr + self.rb_magic_offset).cast(self.int_type.pointer()))
print('rb magic: %s' % ((intptr_ptr + self.rb_magic_offset).cast(self.int_type.pointer()).dereference()))
print('tree magic: %s' % ((intptr_ptr + self.tree_magic_offset).cast(self.int_type.pointer()).dereference()))
print('color: %s' % ((intptr_ptr + self.color_offset ).cast(self.int_type.pointer()).dereference()))
print('parent: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.parent_offset).cast(ptr.type.pointer()).dereference()))
print('left: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.left_offset ).cast(ptr.type.pointer()).dereference()))
print('right: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.right_offset ).cast(ptr.type.pointer()).dereference()))
PrintAvsRbtreeSubtree()
PrintAvsRbtree()
PrintAvsRbtreeNode()
| 37.102564
| 146
| 0.593988
| 729
| 5,788
| 4.506173
| 0.226337
| 0.030137
| 0.034094
| 0.027397
| 0.53516
| 0.499543
| 0.473973
| 0.416134
| 0.382648
| 0.301065
| 0
| 0.011541
| 0.281444
| 5,788
| 155
| 147
| 37.341935
| 0.778312
| 0.114029
| 0
| 0.396226
| 0
| 0
| 0.141487
| 0.013503
| 0
| 0
| 0
| 0.006452
| 0
| 1
| 0.075472
| false
| 0
| 0.009434
| 0
| 0.188679
| 0.254717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e3573ef0ab92fc261e9835b12be4ef8345103f
| 1,372
|
py
|
Python
|
hour17/PythonGroup.py
|
sampx/mongodb-practice
|
0698b21b7da57693ba4146384c8ad65530b0066b
|
[
"MIT"
] | null | null | null |
hour17/PythonGroup.py
|
sampx/mongodb-practice
|
0698b21b7da57693ba4146384c8ad65530b0066b
|
[
"MIT"
] | null | null | null |
hour17/PythonGroup.py
|
sampx/mongodb-practice
|
0698b21b7da57693ba4146384c8ad65530b0066b
|
[
"MIT"
] | null | null | null |
from pymongo import MongoClient
def displayGroup(results):
for result in results:
print (result)
def firstIsALastIsVowel(collection):
key = {'first' : True, "last" : True}
cond = {'first' : 'a', 'last' :
{'$in' : ["a","e","i","o","u"]}}
initial = {'count' : 0}
reduce = "function (obj, prev) { prev.count++; }"
results = collection.group(key, cond, initial, reduce)
print ("\n\n'A' words grouped by first and last" + \
" letter that end with a vowel:")
displayGroup(results)
def firstLetterTotals(collection):
key = {'first' : True}
cond = {}
initial = {'vowels' : 0, 'cons' : 0}
reduce = "function (obj, prev) { " + \
"prev.vowels += obj.stats.vowels; " + \
"prev.cons += obj.stats.consonants; " + \
"}"
finalize = "function (obj) { " + \
"obj.total = obj.vowels + obj.cons; " + \
"}"
results = collection.group(key, cond, initial, reduce, finalize)
print ("\n\nWords grouped by first letter " + \
"with totals:")
displayGroup(results)
if __name__=="__main__":
mongo = MongoClient('mongodb://localhost:27017/')
db = mongo['words']
collection = db['word_stats']
firstIsALastIsVowel(collection)
firstLetterTotals(collection)
| 39.2
| 69
| 0.54519
| 138
| 1,372
| 5.355072
| 0.427536
| 0.077131
| 0.048714
| 0.05954
| 0.184032
| 0.184032
| 0.113667
| 0
| 0
| 0
| 0
| 0.008282
| 0.295918
| 1,372
| 35
| 70
| 39.2
| 0.756729
| 0
| 0
| 0.114286
| 0
| 0
| 0.294249
| 0.035101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.028571
| 0
| 0.114286
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e4eede3d6e6a6be8776d50a9b969b677e1d046
| 5,115
|
py
|
Python
|
packnet_sfm/models/model_utils.py
|
pection/packnet-sfm
|
d5673567b649e6bfda292c894cacdeb06aa80913
|
[
"MIT"
] | 1
|
2022-02-22T06:19:02.000Z
|
2022-02-22T06:19:02.000Z
|
packnet_sfm/models/model_utils.py
|
pection/packnet-sfm
|
d5673567b649e6bfda292c894cacdeb06aa80913
|
[
"MIT"
] | null | null | null |
packnet_sfm/models/model_utils.py
|
pection/packnet-sfm
|
d5673567b649e6bfda292c894cacdeb06aa80913
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Toyota Research Institute. All rights reserved.
from packnet_sfm.utils.image import flip_lr, interpolate_scales
from packnet_sfm.utils.misc import filter_dict
from packnet_sfm.utils.types import is_tensor, is_list, is_numpy
def flip(tensor, flip_fn):
"""
Flip tensors or list of tensors based on a function
Parameters
----------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Tensor to be flipped
flip_fn : Function
Flip function
Returns
-------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Flipped tensor or list of tensors
"""
if not is_list(tensor):
return flip_fn(tensor)
else:
if not is_list(tensor[0]):
return [flip_fn(val) for val in tensor]
else:
return [[flip_fn(v) for v in val] for val in tensor]
def merge_outputs(*outputs):
"""
Merges model outputs for logging
Parameters
----------
outputs : tuple of dict
Outputs to be merged
Returns
-------
output : dict
Dictionary with a "metrics" key containing a dictionary with various metrics and
all other keys that are not "loss" (it is handled differently).
"""
ignore = ['loss'] # Keys to ignore
combine = ['metrics'] # Keys to combine
merge = {key: {} for key in combine}
for output in outputs:
# Iterate over all keys
for key, val in output.items():
# Combine these keys
if key in combine:
for sub_key, sub_val in output[key].items():
assert sub_key not in merge[key].keys(), \
'Combining duplicated key {} to {}'.format(sub_key, key)
merge[key][sub_key] = sub_val
# Ignore these keys
elif key not in ignore:
assert key not in merge.keys(), \
'Adding duplicated key {}'.format(key)
merge[key] = val
return merge
def stack_batch(batch):
"""
Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W)
Parameters
----------
batch : dict
Batch
Returns
-------
batch : dict
Stacked batch
"""
# If there is multi-camera information
if len(batch['rgb'].shape) == 5:
assert batch['rgb'].shape[0] == 1, 'Only batch size 1 is supported for multi-cameras'
# Loop over all keys
for key in batch.keys():
# If list, stack every item
if is_list(batch[key]):
if is_tensor(batch[key][0]) or is_numpy(batch[key][0]):
batch[key] = [sample[0] for sample in batch[key]]
# Else, stack single item
else:
batch[key] = batch[key][0]
return batch
def flip_batch_input(batch):
"""
Flip batch input information (copies data first)
Parameters
----------
batch : dict
Batch information
Returns
-------
batch : dict
Flipped batch
"""
# Flip tensors
for key in filter_dict(batch, [
'rgb', 'rgb_context',
'input_depth', 'input_depth_context',
]):
batch[key] = flip(batch[key], flip_lr)
# Flip intrinsics
for key in filter_dict(batch, [
'intrinsics'
]):
batch[key] = batch[key].clone()
batch[key][:, 0, 2] = batch['rgb'].shape[3] - batch[key][:, 0, 2]
# Return flipped batch
return batch
def flip_output(output):
"""
Flip output information
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
Returns
-------
output : dict
Flipped output
"""
# Flip tensors
for key in filter_dict(output, [
'uncertainty', 'logits_semantic', 'ord_probability',
'inv_depths', 'inv_depths_context', 'inv_depths1', 'inv_depths2',
'pred_depth', 'pred_depth_context', 'pred_depth1', 'pred_depth2',
'pred_inv_depth', 'pred_inv_depth_context', 'pred_inv_depth1', 'pred_inv_depth2',
]):
output[key] = flip(output[key], flip_lr)
return output
def upsample_output(output, mode='nearest', align_corners=None):
"""
Upsample multi-scale outputs to full resolution.
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
mode : str
Which interpolation mode is used
align_corners: bool or None
Whether corners will be aligned during interpolation
Returns
-------
output : dict
Upsampled output
"""
for key in filter_dict(output, [
'inv_depths', 'uncertainty'
]):
output[key] = interpolate_scales(
output[key], mode=mode, align_corners=align_corners)
for key in filter_dict(output, [
'inv_depths_context'
]):
output[key] = [interpolate_scales(
val, mode=mode, align_corners=align_corners) for val in output[key]]
return output
| 28.259669
| 93
| 0.585728
| 634
| 5,115
| 4.605678
| 0.250789
| 0.035616
| 0.019178
| 0.023973
| 0.205137
| 0.170205
| 0.158562
| 0.115753
| 0.093151
| 0.093151
| 0
| 0.00671
| 0.300684
| 5,115
| 180
| 94
| 28.416667
| 0.809617
| 0.352884
| 0
| 0.242857
| 0
| 0
| 0.144675
| 0.007368
| 0
| 0
| 0
| 0
| 0.042857
| 1
| 0.085714
| false
| 0
| 0.042857
| 0
| 0.242857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e8cb3c3e9246762451e5cb22ca74f1dccc3db7
| 2,656
|
py
|
Python
|
predict_recognition.py
|
yeyupiaoling/Kersa-Speaker-Recognition
|
7ccf42c006f42ff6074ad3937e44a0dfa68c6d33
|
[
"Apache-2.0"
] | 42
|
2020-07-12T13:21:13.000Z
|
2021-07-01T01:06:12.000Z
|
predict_recognition.py
|
yeyupiaoling/VoiceprintRecognition-Keras
|
7ccf42c006f42ff6074ad3937e44a0dfa68c6d33
|
[
"Apache-2.0"
] | 3
|
2020-08-19T06:16:02.000Z
|
2020-11-02T02:16:56.000Z
|
predict_recognition.py
|
yeyupiaoling/Kersa-Speaker-Recognition
|
7ccf42c006f42ff6074ad3937e44a0dfa68c6d33
|
[
"Apache-2.0"
] | 12
|
2020-07-15T14:33:51.000Z
|
2021-05-24T03:55:04.000Z
|
import argparse
import os
import shutil
import time
import numpy as np
from utils import model, utils
from utils.record import RecordAudio
parser = argparse.ArgumentParser()
parser.add_argument('--audio_db', default='audio_db/', type=str, help='音频库的路径')
parser.add_argument('--threshold', default=0.7, type=float, help='判断是否为同一个人的阈值')
parser.add_argument('--model_path', default=r'models/resnet34-56.h5', type=str, help='模型的路径')
args = parser.parse_args()
person_feature = []
person_name = []
# 获取模型
network_eval = model.vggvox_resnet2d_icassp(input_dim=(257, None, 1), mode='eval')
# 加载预训练模型
network_eval.load_weights(os.path.join(args.model_path), by_name=True)
print('==> successfully loading model {}.'.format(args.model_path))
# 预测获取声纹特征
def predict(path):
specs = utils.load_data(path, mode='eval')
specs = np.expand_dims(np.expand_dims(specs, 0), -1)
feature = network_eval.predict(specs)[0]
return feature
# 加载要识别的音频库
def load_audio_db(audio_db_path):
start = time.time()
audios = os.listdir(audio_db_path)
for audio in audios:
path = os.path.join(audio_db_path, audio)
name = audio[:-4]
feature = predict(path)
person_name.append(name)
person_feature.append(feature)
print("Loaded %s audio." % name)
end = time.time()
print('加载音频库完成,消耗时间:%fms' % (round((end - start) * 1000)))
# 识别声纹
def recognition(path):
name = ''
pro = 0
feature = predict(path)
for i, person_f in enumerate(person_feature):
# 计算相识度
dist = np.dot(feature, person_f.T)
if dist > pro:
pro = dist
name = person_name[i]
return name, pro
# 声纹注册
def register(path, user_name):
save_path = os.path.join(args.audio_db, user_name + os.path.basename(path)[-4:])
shutil.move(path, save_path)
feature = predict(save_path)
person_name.append(user_name)
person_feature.append(feature)
if __name__ == '__main__':
load_audio_db(args.audio_db)
record_audio = RecordAudio()
while True:
select_fun = int(input("请选择功能,0为注册音频到声纹库,1为执行声纹识别:"))
if select_fun == 0:
audio_path = record_audio.record()
name = input("请输入该音频用户的名称:")
if name == '': continue
register(audio_path, name)
elif select_fun == 1:
audio_path = record_audio.record()
name, p = recognition(audio_path)
if p > args.threshold:
print("识别说话的为:%s,相似度为:%f" % (name, p))
else:
print("音频库没有该用户的语音")
else:
print('请正确选择功能')
| 28.255319
| 103
| 0.628765
| 343
| 2,656
| 4.676385
| 0.35277
| 0.039277
| 0.031796
| 0.017456
| 0.074813
| 0.037406
| 0
| 0
| 0
| 0
| 0
| 0.012935
| 0.243223
| 2,656
| 93
| 104
| 28.55914
| 0.785075
| 0.017696
| 0
| 0.115942
| 0
| 0
| 0.093041
| 0.01807
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.101449
| 0
| 0.188406
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e90500b4879323474a6761d01dbd32906a8b6c
| 6,853
|
py
|
Python
|
cubedash/_product.py
|
vconrado/datacube-explorer
|
ccb9a9a42e5dd16e2b0325a1f881b080bb2806e6
|
[
"Apache-2.0"
] | null | null | null |
cubedash/_product.py
|
vconrado/datacube-explorer
|
ccb9a9a42e5dd16e2b0325a1f881b080bb2806e6
|
[
"Apache-2.0"
] | null | null | null |
cubedash/_product.py
|
vconrado/datacube-explorer
|
ccb9a9a42e5dd16e2b0325a1f881b080bb2806e6
|
[
"Apache-2.0"
] | null | null | null |
import logging
from datetime import timedelta
from flask import Blueprint, Response, abort, redirect, url_for
from cubedash import _model, _utils, _utils as utils
_LOG = logging.getLogger(__name__)
bp = Blueprint("product", __name__)
@bp.route("/about.csv")
def legacy_about_csv():
return redirect(".storage_csv")
@bp.route("/audit/storage.csv")
def storage_csv():
"""Get the product storage table as a CSV"""
product_locations = _model.STORE.products_location_samples_all()
return utils.as_csv(
filename_prefix="product-information",
headers=(
"name",
"count",
"locations",
"license",
"definition",
"summary_time",
"metadata_type",
),
rows=(
(
product.name,
summary.dataset_count,
[
location.common_prefix
for location in (product_locations.get(product.name) or [])
],
_utils.product_license(product),
url_for("product.raw_product_doc", name=product.name, _external=True),
summary.last_refresh_time,
product.metadata_type.name,
)
for product, summary in _model.get_products_with_summaries()
),
)
@bp.route("/products.txt")
def product_list_text():
# This is useful for bash scripts when we want to loop products :)
return Response(
"\n".join(t.name for t in _model.STORE.all_dataset_types()),
content_type="text/plain",
)
@bp.route("/metadata-types.txt")
def metadata_type_list_text():
# This is useful for bash scripts when we want to loop them :)
return Response(
"\n".join(t.name for t in _model.STORE.all_metadata_types()),
content_type="text/plain",
)
@bp.route("/audit/storage")
def storage_page():
product_locations = _model.STORE.products_location_samples_all()
return utils.render(
"storage.html",
product_summary_and_location=[
(product, summary, (product_locations.get(product.name) or []))
for product, summary in _model.get_products_with_summaries()
],
)
@bp.route("/product")
def product_redirect():
"""
If people remove the name from a "/product/<name>" url, take them somewhere useful
"""
return redirect(url_for(".products_page"))
@bp.route("/products")
def products_page():
return utils.render(
"products.html",
)
@bp.route("/metadata-types")
def metadata_types_page():
return utils.render(
"metadata-types.html",
)
@bp.route("/product/<name>.odc-product.yaml")
def legacy_raw_product_doc(name):
return redirect(url_for(".raw_product_doc", name=name))
@bp.route("/products/<name>.odc-product.yaml")
def raw_product_doc(name):
product = _model.STORE.index.products.get_by_name(name)
if not product:
abort(404, f"Unknown product {name!r}")
ordered_metadata = utils.prepare_document_formatting(
product.definition, "Product", include_source_url=True
)
return utils.as_yaml(ordered_metadata)
@bp.route("/metadata-type/<name>")
def legacy_metadata_type_page(name):
return redirect(url_for(".metadata_type_page", name=name))
@bp.route("/metadata-types/<name>")
def metadata_type_page(name):
metadata_type = _model.STORE.index.metadata_types.get_by_name(name)
if not metadata_type:
abort(404, f"Unknown metadata type {name!r}")
ordered_metadata = utils.prepare_document_formatting(metadata_type.definition)
products_using_it = sorted(
(
p
for p in _model.STORE.index.products.get_all()
if p.metadata_type.name == name
),
key=lambda p: p.name,
)
return utils.render(
"metadata-type.html",
metadata_type=metadata_type,
metadata_doc=ordered_metadata,
products_using_it=products_using_it,
)
@bp.route("/metadata-type/<name>.odc-type.yaml")
def legacy_metadata_type_doc(name):
return redirect(url_for(".raw_metadata_type_doc", name=name))
@bp.route("/metadata-types/<name>.odc-type.yaml")
def raw_metadata_type_doc(name):
metadata_type = _model.STORE.index.metadata_types.get_by_name(name)
if not metadata_type:
abort(404, f"Unknown metadata type {name!r}")
ordered_metadata = utils.prepare_document_formatting(
metadata_type.definition, "Metadata Type", include_source_url=True
)
return utils.as_yaml(ordered_metadata)
@bp.route("/products.odc-product.yaml")
def raw_all_products_doc():
resp = utils.as_yaml(
*(
utils.prepare_document_formatting(
product.definition,
f"Product {product.name}",
include_source_url=url_for(
".raw_product_doc", name=product.name, _external=True
),
)
for product in _model.STORE.all_dataset_types()
)
)
# Add Explorer ID to the download filename if they have one.
utils.suggest_download_filename(
resp,
prefix="products",
suffix=".odc-product.yaml",
)
return resp
@bp.route("/metadata-types.odc-type.yaml")
def raw_all_metadata_types_doc():
resp = utils.as_yaml(
*(
utils.prepare_document_formatting(
type_.definition,
f"Metadata Type {type_.name}",
include_source_url=url_for(
".raw_metadata_type_doc", name=type_.name, _external=True
),
)
for type_ in _model.STORE.all_metadata_types()
),
)
# Add Explorer ID to the download filename if they have one.
utils.suggest_download_filename(
resp,
prefix="metadata-types",
suffix=".odc-type.yaml",
)
return resp
def _iso8601_duration(tdelta: timedelta):
"""
Format a timedelta as an iso8601 duration
>>> _iso8601_duration(timedelta(seconds=0))
'PT0S'
>>> _iso8601_duration(timedelta(seconds=1))
'PT1S'
>>> _iso8601_duration(timedelta(seconds=23423))
'PT6H30M23S'
>>> _iso8601_duration(timedelta(seconds=4564564556))
'P52830DT14H35M56S'
"""
all_secs = tdelta.total_seconds()
secs = int(all_secs % 60)
h_m_s = (
int(all_secs // 3600 % 24),
int(all_secs // 60 % 60),
secs if secs % 1 != 0 else int(secs),
)
parts = ["P"]
days = int(all_secs // 86400)
if days:
parts.append(f"{days}D")
if any(h_m_s):
parts.append("T")
if all_secs:
for val, name in zip(h_m_s, ["H", "M", "S"]):
if val:
parts.append(f"{val}{name}")
else:
parts.append("T0S")
return "".join(parts)
| 27.522088
| 86
| 0.620604
| 828
| 6,853
| 4.881643
| 0.188406
| 0.07719
| 0.025977
| 0.021029
| 0.502969
| 0.435675
| 0.388669
| 0.320139
| 0.287976
| 0.264226
| 0
| 0.017357
| 0.260178
| 6,853
| 248
| 87
| 27.633065
| 0.779882
| 0.094119
| 0
| 0.241758
| 0
| 0
| 0.144533
| 0.049047
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093407
| false
| 0
| 0.021978
| 0.043956
| 0.208791
| 0.010989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13e960260e528ba2d92aa2f22ba8b6f12cf5bbfe
| 1,923
|
py
|
Python
|
litex_boards/platforms/sipeed_tang_nano.py
|
ozbenh/litex-boards
|
f18b10d1edb4e162a77972e2e9c5bad54ca00788
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/platforms/sipeed_tang_nano.py
|
ozbenh/litex-boards
|
f18b10d1edb4e162a77972e2e9c5bad54ca00788
|
[
"BSD-2-Clause"
] | null | null | null |
litex_boards/platforms/sipeed_tang_nano.py
|
ozbenh/litex-boards
|
f18b10d1edb4e162a77972e2e9c5bad54ca00788
|
[
"BSD-2-Clause"
] | null | null | null |
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
# Board diagram/pinout:
# https://user-images.githubusercontent.com/1450143/133655492-532d5e9a-0635-4889-85c9-68683d06cae0.png
# http://dl.sipeed.com/TANG/Nano/HDK/Tang-NANO-2704(Schematic).pdf
from migen import *
from litex.build.generic_platform import *
from litex.build.gowin.platform import GowinPlatform
from litex.build.openfpgaloader import OpenFPGALoader
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk24", 0, Pins("35"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("16"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("17"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("18"), IOStandard("LVCMOS33")),
# Buttons.
("user_btn", 0, Pins("15"), IOStandard("LVCMOS33")),
("user_btn", 0, Pins("14"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("8")),
Subsignal("rx", Pins("9")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(GowinPlatform):
default_clk_name = "clk24"
default_clk_period = 1e9/24e6
def __init__(self):
GowinPlatform.__init__(self, "GW1N-LV1QN48C6/I5", _io, _connectors, toolchain="gowin", devicename="GW1N-1")
self.toolchain.options["use_done_as_gpio"] = 1
def create_programmer(self):
return OpenFPGALoader("tangnano")
def do_finalize(self, fragment):
GowinPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk24", loose=True), 1e9/24e6)
| 32.05
| 115
| 0.573583
| 200
| 1,923
| 5.36
| 0.57
| 0.117537
| 0.039179
| 0.037313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066463
| 0.147166
| 1,923
| 59
| 116
| 32.59322
| 0.587195
| 0.339054
| 0
| 0
| 0
| 0
| 0.149004
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.137931
| 0.034483
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13eaaea3944207924e8d3d8e8f4c1a6a0ee51732
| 10,525
|
py
|
Python
|
nm_cavia/rl/metalearner.py
|
anon-6994/nm-metarl
|
45c8798c2139d8c200cc7a398331c1b98a0dccec
|
[
"MIT"
] | null | null | null |
nm_cavia/rl/metalearner.py
|
anon-6994/nm-metarl
|
45c8798c2139d8c200cc7a398331c1b98a0dccec
|
[
"MIT"
] | null | null | null |
nm_cavia/rl/metalearner.py
|
anon-6994/nm-metarl
|
45c8798c2139d8c200cc7a398331c1b98a0dccec
|
[
"MIT"
] | null | null | null |
import torch
from torch.distributions.kl import kl_divergence
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from rl_utils.optimization import conjugate_gradient
from rl_utils.torch_utils import (weighted_mean, detach_distribution, weighted_normalize)
class MetaLearner(object):
"""Meta-learner
The meta-learner is responsible for sampling the trajectories/episodes
(before and after the one-step adaptation), compute the inner loss, compute
the updated parameters based on the inner-loss, and perform the meta-update.
[1] Chelsea Finn, Pieter Abbeel, Sergey Levine, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] Richard Sutton, Andrew Barto, "Reinforcement learning: An introduction",
2018 (http://incompleteideas.net/book/the-book-2nd.html)
[3] John Schulman, Philipp Moritz, Sergey Levine, Michael Jordan,
Pieter Abbeel, "High-Dimensional Continuous Control Using Generalized
Advantage Estimation", 2016 (https://arxiv.org/abs/1506.02438)
[4] John Schulman, Sergey Levine, Philipp Moritz, Michael I. Jordan,
Pieter Abbeel, "Trust Region Policy Optimization", 2015
(https://arxiv.org/abs/1502.05477)
"""
def __init__(self, sampler, policy, baseline, gamma=0.95,
fast_lr=0.5, tau=1.0, device='cpu'):
self.sampler = sampler
self.policy = policy
self.baseline = baseline
self.gamma = gamma
self.fast_lr = fast_lr
self.tau = tau
self.to(device)
def inner_loss(self, episodes, params=None):
"""Compute the inner loss for the one-step gradient update. The inner
loss is REINFORCE with baseline [2], computed on advantages estimated
with Generalized Advantage Estimation (GAE, [3]).
"""
values = self.baseline(episodes)
advantages = episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=episodes.mask)
pi = self.policy(episodes.observations, params=params)
log_probs = pi.log_prob(episodes.actions)
if log_probs.dim() > 2:
log_probs = torch.sum(log_probs, dim=2)
loss = -weighted_mean(log_probs * advantages, dim=0, weights=episodes.mask)
return loss
def adapt(self, episodes, first_order=False, params=None, lr=None):
"""Adapt the parameters of the policy network to a new task, from
sampled trajectories `episodes`, with a one-step gradient update [1].
"""
if lr is None:
lr = self.fast_lr
# Fit the baseline to the training episodes
self.baseline.fit(episodes)
# Get the loss on the training episodes
loss = self.inner_loss(episodes, params=params)
# Get the new parameters after a one-step gradient update
params = self.policy.update_params(loss, step_size=lr, first_order=first_order, params=params)
return params, loss
def sample(self, tasks, first_order=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
"""
episodes = []
losses = []
for task in tasks:
self.sampler.reset_task(task)
self.policy.reset_context()
train_episodes = self.sampler.sample(self.policy, gamma=self.gamma)
# inner loop (for CAVIA, this only updates the context parameters)
params, loss = self.adapt(train_episodes, first_order=first_order)
# rollouts after inner loop update
valid_episodes = self.sampler.sample(self.policy, params=params, gamma=self.gamma)
episodes.append((train_episodes, valid_episodes))
losses.append(loss.item())
return episodes, losses
def test(self, tasks, num_steps, batch_size, halve_lr):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.batchsize
"""
episodes_per_task = []
for task in tasks:
# reset context params (for cavia) and task
self.policy.reset_context()
self.sampler.reset_task(task)
# start with blank params
params = None
# gather some initial experience and log performance
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
# initialise list which will log all rollouts for the current task
curr_episodes = [test_episodes]
for i in range(1, num_steps + 1):
# lower learning rate after first update (for MAML, as described in their paper)
if i == 1 and halve_lr:
lr = self.fast_lr / 2
else:
lr = self.fast_lr
# inner-loop update
params, loss = self.adapt(test_episodes, first_order=True, params=params, lr=lr)
# get new rollouts
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
curr_episodes.append(test_episodes)
episodes_per_task.append(curr_episodes)
self.policy.reset_context()
return episodes_per_task
def kl_divergence(self, episodes, old_pis=None):
kls = []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# this is the inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
pi = self.policy(valid_episodes.observations, params=params)
if old_pi is None:
old_pi = detach_distribution(pi)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(kls, dim=0))
def hessian_vector_product(self, episodes, damping=1e-2):
"""Hessian-vector product, based on the Perlmutter method."""
def _product(vector):
kl = self.kl_divergence(episodes)
grads = torch.autograd.grad(kl, self.policy.parameters(), create_graph=True)
flat_grad_kl = parameters_to_vector(grads)
grad_kl_v = torch.dot(flat_grad_kl, vector)
grad2s = torch.autograd.grad(grad_kl_v, self.policy.parameters())
flat_grad2_kl = parameters_to_vector(grad2s)
return flat_grad2_kl + damping * vector
return _product
def surrogate_loss(self, episodes, old_pis=None):
losses, kls, pis = [], [], []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# do inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
with torch.set_grad_enabled(old_pi is None):
# get action values after inner-loop update
pi = self.policy(valid_episodes.observations, params=params)
pis.append(detach_distribution(pi))
if old_pi is None:
old_pi = detach_distribution(pi)
values = self.baseline(valid_episodes)
advantages = valid_episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=valid_episodes.mask)
log_ratio = (pi.log_prob(valid_episodes.actions)
- old_pi.log_prob(valid_episodes.actions))
if log_ratio.dim() > 2:
log_ratio = torch.sum(log_ratio, dim=2)
ratio = torch.exp(log_ratio)
loss = -weighted_mean(ratio * advantages, dim=0, weights=valid_episodes.mask)
losses.append(loss)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(losses, dim=0)), torch.mean(torch.stack(kls, dim=0)), pis
def step(self, episodes, max_kl=1e-3, cg_iters=10, cg_damping=1e-2,
ls_max_steps=10, ls_backtrack_ratio=0.5):
"""Meta-optimization step (ie. update of the initial parameters), based
on Trust Region Policy Optimization (TRPO, [4]).
"""
old_loss, _, old_pis = self.surrogate_loss(episodes)
# this part will take higher order gradients through the inner loop:
grads = torch.autograd.grad(old_loss, self.policy.parameters())
grads = parameters_to_vector(grads)
# Compute the step direction with Conjugate Gradient
hessian_vector_product = self.hessian_vector_product(episodes, damping=cg_damping)
stepdir = conjugate_gradient(hessian_vector_product, grads, cg_iters=cg_iters)
# Compute the Lagrange multiplier
shs = 0.5 * torch.dot(stepdir, hessian_vector_product(stepdir))
lagrange_multiplier = torch.sqrt(shs / max_kl)
step = stepdir / lagrange_multiplier
# Save the old parameters
old_params = parameters_to_vector(self.policy.parameters())
print()
# Line search
step_size = 1.0
for _ in range(ls_max_steps):
vector_to_parameters(old_params - step_size * step, self.policy.parameters())
loss, kl, _ = self.surrogate_loss(episodes, old_pis=old_pis)
improve = loss - old_loss
if (improve.item() < 0.0) and (kl.item() < max_kl):
break
step_size *= ls_backtrack_ratio
else:
print('no update?')
vector_to_parameters(old_params, self.policy.parameters())
print('improve:', improve.item())
print('kl:', kl.item())
print('step_size:', step_size)
return loss
def to(self, device, **kwargs):
self.policy.to(device, **kwargs)
self.baseline.to(device, **kwargs)
self.device = device
| 40.171756
| 120
| 0.624323
| 1,296
| 10,525
| 4.907407
| 0.200617
| 0.033019
| 0.018868
| 0.017296
| 0.270912
| 0.221855
| 0.207233
| 0.202044
| 0.186635
| 0.178931
| 0
| 0.013286
| 0.284846
| 10,525
| 261
| 121
| 40.325671
| 0.831673
| 0.230974
| 0
| 0.275168
| 0
| 0
| 0.004312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073826
| false
| 0
| 0.033557
| 0
| 0.174497
| 0.033557
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13eac7b96baca5f54a52331a42d7e035d905943e
| 2,321
|
py
|
Python
|
request/management/commands/purgerequests.py
|
hramezani/django-request
|
4b9c7b22f26338d2c93110477aa44041b1c5ddb4
|
[
"BSD-2-Clause"
] | 373
|
2016-04-22T21:18:41.000Z
|
2022-03-31T23:13:31.000Z
|
request/management/commands/purgerequests.py
|
hramezani/django-request
|
4b9c7b22f26338d2c93110477aa44041b1c5ddb4
|
[
"BSD-2-Clause"
] | 128
|
2016-04-22T21:30:55.000Z
|
2022-03-08T20:24:44.000Z
|
request/management/commands/purgerequests.py
|
hramezani/django-request
|
4b9c7b22f26338d2c93110477aa44041b1c5ddb4
|
[
"BSD-2-Clause"
] | 79
|
2016-04-25T08:44:56.000Z
|
2022-03-17T01:41:27.000Z
|
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from ...models import Request
DURATION_OPTIONS = {
'hours': lambda amount: timezone.now() - timedelta(hours=amount),
'days': lambda amount: timezone.now() - timedelta(days=amount),
'weeks': lambda amount: timezone.now() - timedelta(weeks=amount),
'months': lambda amount: timezone.now() + relativedelta(months=-amount),
'years': lambda amount: timezone.now() + relativedelta(years=-amount),
}
try:
# to keep backward Python 2 compatibility
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Purge old requests.'
def add_arguments(self, parser):
parser.add_argument(
'amount',
type=int,
)
parser.add_argument('duration')
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
def handle(self, *args, **options):
amount = options['amount']
duration = options['duration']
# Check we have the correct values
if duration[-1] != 's': # If its not plural, make it plural
duration_plural = '{0}s'.format(duration)
else:
duration_plural = duration
if duration_plural not in DURATION_OPTIONS:
raise CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))
qs = Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))
count = qs.count()
if count == 0:
print('There are no requests to delete.')
return
if options.get('interactive'):
confirm = input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount, duration, count))
else:
confirm = 'yes'
if confirm == 'yes':
qs.delete()
else:
print('Purge cancelled')
| 30.142857
| 88
| 0.623007
| 269
| 2,321
| 5.315985
| 0.475836
| 0.052448
| 0.06993
| 0.08042
| 0.117483
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004706
| 0.267557
| 2,321
| 76
| 89
| 30.539474
| 0.836471
| 0.04567
| 0
| 0.084746
| 0
| 0
| 0.212574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0.016949
| 0.084746
| 0
| 0.169492
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ec7c22478f599c0c77b64f3478dbd3b142fa61
| 8,135
|
py
|
Python
|
cdci_data_analysis/analysis/plot_tools.py
|
andreatramacere/cdci_data_analysis
|
8ae34a7252d6baf011a3b99fbe4f6e624b63d7df
|
[
"MIT"
] | null | null | null |
cdci_data_analysis/analysis/plot_tools.py
|
andreatramacere/cdci_data_analysis
|
8ae34a7252d6baf011a3b99fbe4f6e624b63d7df
|
[
"MIT"
] | null | null | null |
cdci_data_analysis/analysis/plot_tools.py
|
andreatramacere/cdci_data_analysis
|
8ae34a7252d6baf011a3b99fbe4f6e624b63d7df
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "Andrea Tramacere"
import numpy as np
from astropy import wcs
from bokeh.layouts import row, widgetbox,gridplot
from bokeh.models import CustomJS, Slider,HoverTool,ColorBar,LinearColorMapper,LabelSet,ColumnDataSource
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.palettes import Plasma256
class Image(object):
def __init__(self,data,header):
self.data=data
self.header=header
def change_image_contrast(self, attr, old, new):
# print attr,old,new
self.fig_im.glyph.color_mapper.update(low=self.graph_min_slider.value, high=self.graph_max_slider.value)
def get_html_draw(self,w=None,h=None, catalog=None, plot=False, vmin=None, vmax=None):
#import plotly
#import plotly.graph_objs as go
#from plotly.graph_objs import Layout
# print('vmin,vmax',vmin,vmax)
msk = ~np.isnan(self.data)
if vmin is None:
vmin = self.data[msk].min()
if vmax is None:
vmax = self.data[msk].max()
min_s = self.data.min()
max_s = self.data.max()
r = self.data.shape[0] * 2
c = self.data.shape[1] * 2
fig = figure(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * 0.5),
tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])
w = wcs.WCS(self.header)
color_mapper = LinearColorMapper(low=min_s, high=max_s, palette=Plasma256)
fig_im = fig.image(image=[self.data], x=[0], y=[0], dw=[c * 0.5], dh=[r * 0.5],
color_mapper=color_mapper)
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
renderers=[fig_im])
fig.add_tools(hover)
#fig, (ax) = plt.subplots(1, 1, figsize=(4, 3), subplot_kw={'projection': WCS(self.header)})
#im = ax.imshow(self.data,
# origin='lower',
# zorder=1,
# interpolation='none',
# aspect='equal',
# cmap=plt.get_cmap('jet'),
# vmin=vmin,
# vmax=vmax)
if catalog is not None:
lon = catalog.ra
lat = catalog.dec
if len(lat) > 0.:
pixcrd = w.wcs_world2pix(np.column_stack((lon, lat)), 0)
msk = ~np.isnan(pixcrd[:, 0])
#ax.plot(pixcrd[:, 0][msk], pixcrd[:, 1][msk], 'o', mfc='none')
source = ColumnDataSource(data=dict(lon=pixcrd[:, 0][msk]+0.5,
lat=pixcrd[:, 1][msk]+0.5,
names=catalog.name[msk]))
#for ID, (x, y) in enumerate(pixcrd):
# if msk[ID]:
# # print ('xy',(pixcrd[:, 0][ID], pixcrd[:, 1][ID]))
# ax.annotate('%s' % catalog.name[ID], xy=(x, y), color='white')
#print(pixcrd[:][msk])
fig.scatter(x='lon', y='lat', marker='circle', size=15,
line_color="white", fill_color=None, alpha=1.0, source=source)
labels = LabelSet(x='lon', y='lat', text='names', level='glyph',
x_offset=5, y_offset=5, render_mode='canvas', source=source, text_color='white')
fig.add_layout(labels)
#print'cat', catalog[msk]
color_bar = ColorBar(color_mapper=color_mapper,
label_standoff=12, border_line_color=None, location=(0, 0))
JS_code_slider = """
var vmin = low_slider.value;
var vmax = high_slider.value;
fig_im.glyph.color_mapper.high = vmax;
fig_im.glyph.color_mapper.low = vmin;
"""
callback = CustomJS(args=dict(fig_im=fig_im), code=JS_code_slider)
self.graph_min_slider = Slider(title="Sig. Min", start=min_s, end=max_s, step=1, value=min_s, callback=callback)
self.graph_max_slider = Slider(title="Sig. Max", start=min_s, end=max_s, step=1, value=max_s * 0.8,
callback=callback)
self.graph_min_slider.on_change('value', self.change_image_contrast)
self.graph_max_slider.on_change('value', self.change_image_contrast)
callback.args["low_slider"] = self.graph_min_slider
callback.args["high_slider"] = self.graph_max_slider
#ax.set_xlabel('RA')
#ax.set_ylabel('DEC')
#ax.grid(True, color='white')
#fig.colorbar(im, ax=ax)
#plugins.connect(fig, plugins.MousePosition(fontsize=14))
#if plot == True:
# print('plot', plot)
# mpld3.show()
fig.add_layout(color_bar, 'right')
layout = row(
fig, widgetbox(self.graph_min_slider, self.graph_max_slider),
)
#curdoc().add_root(layout)
#output_file("slider.html", title="slider.py example")
#from bokeh.io import show
#show(layout)
script, div = components(layout)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class ScatterPlot(object):
def __init__(self,w,h,x_label=None,y_label=None,x_range=None,y_range=None,title=None,y_axis_type='linear',x_axis_type='linear'):
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y")])
self.fig = figure(title=title, width=w, height=h,x_range=x_range,y_range=y_range,
y_axis_type=y_axis_type,
x_axis_type=x_axis_type,
tools=[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']
)
if x_label is not None:
self.fig.xaxis.axis_label = x_label
if y_label is not None:
self.fig.yaxis.axis_label = y_label
def add_errorbar(self, x, y, xerr=None, yerr=None, color='red',
point_kwargs={}, error_kwargs={}):
self.fig.circle(x, y, color=color, **point_kwargs)
if xerr is not None:
x_err_x = []
x_err_y = []
for px, py, err in zip(x, y, xerr):
x_err_x.append((px - err, px + err))
x_err_y.append((py, py))
self.fig.multi_line(x_err_x, x_err_y, color=color, **error_kwargs)
if yerr is not None:
y_err_x = []
y_err_y = []
for px, py, err in zip(x, y, yerr):
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
self.fig.multi_line(y_err_x, y_err_y, color=color, **error_kwargs)
def add_step_line(self,x,y,legend=None):
#print('a')
self.fig.step(x,y,name=legend, mode="center")
#print('b')
def add_line(self,x,y,legend=None,color=None):
self.fig.line(x,y,legend=legend,line_color=color)
def get_html_draw(self):
layout = row(
self.fig
)
#curdoc().add_root(layout)
#show(layout)
script, div = components(layout)
#print ('script',script)
#print ('div',div)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class GridPlot(object):
def __init__(self,f1,f2,w=None,h=None):
self.f1=f1
self.f2=f2
def get_html_draw(self,w=None,h=None):
#l = layout([self.f1.fig],[self.f2.fig])
grid = gridplot([self.f1.fig,self.f2.fig],ncols=1,plot_width=w, plot_height=h)
#curdoc().add_root(grid)
#show(grid)
#output_file("test.html")
script, div = components(grid)
html_dict={}
html_dict['script']=script
html_dict['div'] = div
return html_dict
| 31.288462
| 132
| 0.547019
| 1,071
| 8,135
| 3.966387
| 0.225957
| 0.006591
| 0.014124
| 0.021186
| 0.250235
| 0.202448
| 0.130885
| 0.118644
| 0.09887
| 0.073446
| 0
| 0.012397
| 0.315796
| 8,135
| 259
| 133
| 31.409266
| 0.750808
| 0.153534
| 0
| 0.125
| 0
| 0
| 0.076158
| 0.024704
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078125
| false
| 0
| 0.070313
| 0
| 0.195313
| 0.007813
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13edd551b75e71fd96ef5d443c0c54bbec028a56
| 844
|
py
|
Python
|
test/unit/test_testaid_unit_pathlist.py
|
RebelCodeBase/testaid
|
998c827b826fe4374ecf0a234fef61a975e2fcd7
|
[
"Apache-2.0"
] | 17
|
2019-08-04T09:29:19.000Z
|
2020-05-16T02:25:20.000Z
|
test/unit/test_testaid_unit_pathlist.py
|
RebelCodeBase/testaid
|
998c827b826fe4374ecf0a234fef61a975e2fcd7
|
[
"Apache-2.0"
] | 12
|
2019-07-19T22:20:42.000Z
|
2020-01-20T06:45:38.000Z
|
test/unit/test_testaid_unit_pathlist.py
|
RebelCodeBase/testaid
|
998c827b826fe4374ecf0a234fef61a975e2fcd7
|
[
"Apache-2.0"
] | 3
|
2019-08-08T18:18:13.000Z
|
2019-10-07T13:46:03.000Z
|
from pathlib import Path
from testaid.pathlist import PathList
def test_testaid_unit_pathlist_roles_blacklist(testvars_roles_blacklist):
assert testvars_roles_blacklist is not None
def test_testaid_unit_pathlist_roles_whitelist(testvars_roles_whitelist):
assert testvars_roles_whitelist is not None
def test_testaid_unit_pathlist_get(tmp_path):
msd = tmp_path / 'molecule_scenario_directory'
dir1 = msd / 'dir1'
dir1.mkdir(parents=True)
dir2 = tmp_path / 'dir2'
dir2.mkdir()
file1 = dir1 / 'file1.yml'
file1.touch()
file2 = dir1 / 'file2.yml'
file2.touch()
file3 = dir2 / 'file3.yml'
file3.touch()
my_pathlist = [Path(file3), Path(file1), Path(file2)]
my_pathstring = 'dir1:../dir2/file3.yml'
pathlist = PathList(my_pathstring, msd)
assert pathlist.get() == my_pathlist
| 29.103448
| 73
| 0.726303
| 112
| 844
| 5.196429
| 0.3125
| 0.089347
| 0.072165
| 0.092784
| 0.182131
| 0.182131
| 0.120275
| 0.120275
| 0
| 0
| 0
| 0.034532
| 0.17654
| 844
| 28
| 74
| 30.142857
| 0.802878
| 0
| 0
| 0
| 0
| 0
| 0.099526
| 0.058057
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ee6c7b533dc2afb81ce7087fea00c547679671
| 22,147
|
py
|
Python
|
tests/unit/zhmcclient/test_hba.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/zhmcclient/test_hba.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/zhmcclient/test_hba.py
|
vkpro-forks/python-zhmcclient
|
eab2dca37cb417d03411450dabf72805214b5ca0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _hba module.
"""
from __future__ import absolute_import, print_function
import pytest
import re
import copy
from zhmcclient import Client, Hba, HTTPError, NotFound
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
# Object IDs and names of our faked HBAs:
HBA1_OID = 'hba 1-oid'
HBA1_NAME = 'hba 1'
HBA2_OID = 'hba 2-oid'
HBA2_NAME = 'hba 2'
# URIs and Object IDs of elements referenced in HBA properties:
FCP1_OID = 'fake-fcp1-oid'
PORT11_OID = 'fake-port11-oid'
PORT11_URI = '/api/adapters/{}/storage-ports/{}'.format(FCP1_OID, PORT11_OID)
class TestHba(object):
"""All tests for Hba and HbaManager classes."""
def setup_method(self):
"""
Set up a faked session, and add a faked CPC in DPM mode with one
partition that has no HBAs.
Add one FCP adapter and port.
"""
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
# Add a CPC in DPM mode
self.faked_cpc = self.session.hmc.cpcs.add({
'element-id': 'fake-cpc1-oid',
# element-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': 'fake-cpc1-name',
'description': 'CPC #1 (DPM mode)',
'status': 'active',
'dpm-enabled': True,
'is-ensemble-member': False,
'iml-mode': 'dpm',
})
self.cpc = self.client.cpcs.find(name='fake-cpc1-name')
# Add a partition to the CPC
self.faked_partition = self.faked_cpc.partitions.add({
'element-id': 'fake-part1-oid',
# element-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'partition',
'name': 'fake-part1-name',
'description': 'Partition #1',
'status': 'active',
'initial-memory': 1024,
'maximum-memory': 2048,
})
self.partition = self.cpc.partitions.find(name='fake-part1-name')
# Add an FCP adapter and port to the CPC
self.faked_fcp1 = self.faked_cpc.adapters.add({
'object-id': FCP1_OID,
'parent': self.faked_cpc.uri,
'class': 'adapter',
'name': 'fcp1',
'description': 'FCP #1',
'status': 'active',
'type': 'fcp',
'adapter-id': '123',
'detected-card-type': '10gbe-roce-express',
'card-location': '1234-5678-J.01',
'port-count': 1,
'network-port-uris': [],
'state': 'online',
'configured-capacity': 80,
'used-capacity': 0,
'allowed-capacity': 80,
'maximum-total-capacity': 80,
'physical-channel-status': 'operating',
})
self.faked_port11 = self.faked_fcp1.ports.add({
'element-id': PORT11_OID,
'parent': self.faked_fcp1.uri,
'class': 'storage-port',
'index': 1,
'name': 'fake-port11-name',
'description': 'FCP #1 Port #1',
})
assert PORT11_URI == self.faked_port11.uri
def add_hba1(self):
"""Add a faked HBA 1 to the faked partition."""
faked_hba = self.faked_partition.hbas.add({
'element-id': HBA1_OID,
# element-uri will be automatically set
'parent': self.faked_partition.uri,
'class': 'hba',
'name': HBA1_NAME,
'description': 'HBA ' + HBA1_NAME,
'adapter-port-uri': PORT11_URI,
'wwpn': 'AABBCCDDEEFF0011',
'device-number': '1111',
})
return faked_hba
def add_hba2(self):
"""Add a faked HBA 2 to the faked partition."""
faked_hba = self.faked_partition.hbas.add({
'element-id': HBA2_OID,
# element-uri will be automatically set
'parent': self.faked_partition.uri,
'class': 'hba',
'name': HBA2_NAME,
'description': 'HBA ' + HBA2_NAME,
'adapter-port-uri': PORT11_URI,
'wwpn': 'AABBCCDDEEFF0012',
'device-number': '1112',
})
return faked_hba
def test_hbamanager_initial_attrs(self):
"""Test initial attributes of HbaManager."""
hba_mgr = self.partition.hbas
# Verify all public properties of the manager object
assert hba_mgr.resource_class == Hba
assert hba_mgr.session == self.session
assert hba_mgr.parent == self.partition
assert hba_mgr.partition == self.partition
# TODO: Test for HbaManager.__repr__()
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(),
['element-uri']),
(dict(full_properties=False),
['element-uri']),
(dict(full_properties=True),
None),
]
)
def test_hbamanager_list_full_properties(
self, full_properties_kwargs, prop_names):
"""Test HbaManager.list() with full_properties."""
# Add two faked HBAs
faked_hba1 = self.add_hba1()
faked_hba2 = self.add_hba2()
exp_faked_hbas = [faked_hba1, faked_hba2]
hba_mgr = self.partition.hbas
# Execute the code to be tested
hbas = hba_mgr.list(**full_properties_kwargs)
assert_resources(hbas, exp_faked_hbas, prop_names)
@pytest.mark.parametrize(
"filter_args, exp_oids", [
({'element-id': HBA1_OID},
[HBA1_OID]),
({'element-id': HBA2_OID},
[HBA2_OID]),
({'element-id': [HBA1_OID, HBA2_OID]},
[HBA1_OID, HBA2_OID]),
({'element-id': [HBA1_OID, HBA1_OID]},
[HBA1_OID]),
({'element-id': HBA1_OID + 'foo'},
[]),
({'element-id': [HBA1_OID, HBA2_OID + 'foo']},
[HBA1_OID]),
({'element-id': [HBA2_OID + 'foo', HBA1_OID]},
[HBA1_OID]),
({'name': HBA1_NAME},
[HBA1_OID]),
({'name': HBA2_NAME},
[HBA2_OID]),
({'name': [HBA1_NAME, HBA2_NAME]},
[HBA1_OID, HBA2_OID]),
({'name': HBA1_NAME + 'foo'},
[]),
({'name': [HBA1_NAME, HBA2_NAME + 'foo']},
[HBA1_OID]),
({'name': [HBA2_NAME + 'foo', HBA1_NAME]},
[HBA1_OID]),
({'name': [HBA1_NAME, HBA1_NAME]},
[HBA1_OID]),
({'name': '.*hba 1'},
[HBA1_OID]),
({'name': 'hba 1.*'},
[HBA1_OID]),
({'name': 'hba .'},
[HBA1_OID, HBA2_OID]),
({'name': '.ba 1'},
[HBA1_OID]),
({'name': '.+'},
[HBA1_OID, HBA2_OID]),
({'name': 'hba 1.+'},
[]),
({'name': '.+hba 1'},
[]),
({'name': HBA1_NAME,
'element-id': HBA1_OID},
[HBA1_OID]),
({'name': HBA1_NAME,
'element-id': HBA1_OID + 'foo'},
[]),
({'name': HBA1_NAME + 'foo',
'element-id': HBA1_OID},
[]),
({'name': HBA1_NAME + 'foo',
'element-id': HBA1_OID + 'foo'},
[]),
]
)
def test_hbamanager_list_filter_args(self, filter_args, exp_oids):
"""Test HbaManager.list() with filter_args."""
# Add two faked HBAs
self.add_hba1()
self.add_hba2()
hba_mgr = self.partition.hbas
# Execute the code to be tested
hbas = hba_mgr.list(filter_args=filter_args)
assert len(hbas) == len(exp_oids)
if exp_oids:
oids = [hba.properties['element-id'] for hba in hbas]
assert set(oids) == set(exp_oids)
@pytest.mark.parametrize(
"initial_partition_status, exp_status_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_prop_exc", [
({},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-hba-x'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'adapter-port-uri': PORT11_URI},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-hba-x',
'adapter-port-uri': PORT11_URI},
['element-uri', 'name', 'adapter-port-uri'],
None),
]
)
def test_hbamanager_create(
self, input_props, exp_prop_names, exp_prop_exc,
initial_partition_status, exp_status_exc):
"""Test HbaManager.create()."""
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
hba_mgr = self.partition.hbas
if exp_status_exc:
exp_exc = exp_status_exc
elif exp_prop_exc:
exp_exc = exp_prop_exc
else:
exp_exc = None
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba = hba_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
# Note: the Hba object returned by Hba.create() has
# the input properties plus 'element-uri' plus 'element-id'.
hba = hba_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(hba, Hba)
hba_name = hba.name
exp_hba_name = hba.properties['name']
assert hba_name == exp_hba_name
hba_uri = hba.uri
exp_hba_uri = hba.properties['element-uri']
assert hba_uri == exp_hba_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in hba.properties
if prop_name in input_props:
value = hba.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_hba_repr(self):
"""Test Hba.__repr__()."""
# Add a faked hba
faked_hba = self.add_hba1()
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
# Execute the code to be tested
repr_str = repr(hba)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=hba.__class__.__name__,
id=id(hba)),
repr_str)
@pytest.mark.parametrize(
"initial_partition_status, exp_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
def test_hba_delete(self, initial_partition_status, exp_exc):
"""Test Hba.delete()."""
# Add a faked HBA to be tested and another one
faked_hba = self.add_hba1()
self.add_hba2()
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the HBA still exists
hba_mgr.find(name=faked_hba.name)
else:
# Execute the code to be tested.
hba.delete()
# Check that the HBA no longer exists
with pytest.raises(NotFound) as exc_info:
hba_mgr.find(name=faked_hba.name)
def test_hba_delete_create_same_name(self):
"""Test Hba.delete() followed by Hba.create() with same name."""
# Add a faked HBA to be tested and another one
faked_hba = self.add_hba1()
hba_name = faked_hba.name
self.add_hba2()
# Construct the input properties for a third HBA with same name
part3_props = copy.deepcopy(faked_hba.properties)
part3_props['description'] = 'Third HBA'
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # deletable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=hba_name)
# Execute the deletion code to be tested.
hba.delete()
# Check that the HBA no longer exists
with pytest.raises(NotFound):
hba_mgr.find(name=hba_name)
# Execute the creation code to be tested.
hba_mgr.create(part3_props)
# Check that the HBA exists again under that name
hba3 = hba_mgr.find(name=hba_name)
description = hba3.get_property('description')
assert description == 'Third HBA'
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New HBA description'},
{'device-number': 'FEDC',
'description': 'New HBA description'},
]
)
def test_hba_update_properties(self, input_props):
"""Test Hba.update_properties()."""
# Add a faked HBA
faked_hba = self.add_hba1()
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # updatable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
hba.pull_full_properties()
saved_properties = copy.deepcopy(hba.properties)
# Execute the code to be tested
hba.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in hba.properties
prop_value = hba.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
hba.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in hba.properties
prop_value = hba.properties[prop_name]
assert prop_value == exp_prop_value
def test_hba_update_name(self):
"""Test Hba.update_properties() with 'name' property."""
# Add a faked HBA
faked_hba = self.add_hba1()
hba_name = faked_hba.name
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # updatable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=hba_name)
new_hba_name = "new-" + hba_name
# Execute the code to be tested
hba.update_properties(properties={'name': new_hba_name})
# Verify that the resource is no longer found by its old name, using
# list() (this does not use the name-to-URI cache).
hbas_list = hba_mgr.list(
filter_args=dict(name=hba_name))
assert len(hbas_list) == 0
# Verify that the resource is no longer found by its old name, using
# find() (this uses the name-to-URI cache).
with pytest.raises(NotFound):
hba_mgr.find(name=hba_name)
# Verify that the resource object already reflects the update, even
# though it has not been refreshed yet.
assert hba.properties['name'] == new_hba_name
# Refresh the resource object and verify that it still reflects the
# update.
hba.pull_full_properties()
assert hba.properties['name'] == new_hba_name
# Verify that the resource can be found by its new name, using find()
new_hba_find = hba_mgr.find(name=new_hba_name)
assert new_hba_find.properties['name'] == new_hba_name
# Verify that the resource can be found by its new name, using list()
new_hbas_list = hba_mgr.list(
filter_args=dict(name=new_hba_name))
assert len(new_hbas_list) == 1
new_hba_list = new_hbas_list[0]
assert new_hba_list.properties['name'] == new_hba_name
@pytest.mark.parametrize(
"initial_partition_status, exp_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
def test_hba_reassign_port(self, initial_partition_status, exp_exc):
"""Test Hba.reassign_port()."""
# Add a faked HBA to be tested.
# Its port points to a faked URI.
faked_hba = self.add_hba1()
# Add a faked FCP with one port that the HBA will be reassigned to
faked_adapter = self.faked_cpc.adapters.add({
'object-id': 'fake-fcp1-oid',
# object-uri is auto-set based upon object-id
'parent': self.faked_cpc.uri,
'class': 'adapter',
'name': 'fake-fcp1',
'description': 'FCP #1',
'status': 'active',
'type': 'fcp',
# adapter-family is auto-set based upon type
'adapter-id': '123',
'detected-card-type': 'ficon-express-16s',
'card-location': '1234-5678-J.01',
'port-count': 1,
'storage-port-uris': [],
'state': 'online',
'configured-capacity': 80,
'used-capacity': 0,
'allowed-capacity': 80,
'maximum-total-capacity': 80,
'channel-path-id': '1B',
'physical-channel-status': 'operating',
})
adapter = self.cpc.adapters.find(name='fake-fcp1')
faked_adapter.ports.add({
'element-id': 'fake-port1-oid',
# element-uri is auto-set based upon object-id
'parent': faked_adapter.uri,
'class': 'storage-port',
'name': 'fake-port1',
'description': 'FCP #1 Port 1',
'index': 0,
'fabric-id': None,
})
port = adapter.ports.find(name='fake-port1')
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
# The HBA object we will perform the test on
hba = self.partition.hbas.find(name=faked_hba.name)
# Save the HBA properties for later comparison
hba.pull_full_properties()
saved_properties = copy.deepcopy(hba.properties)
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba.reassign_port(port)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the port of the HBA is unchanged ...
prop_name = 'adapter-port-uri'
# ... in the resource object:
assert hba.properties[prop_name] == saved_properties[prop_name]
# ... and again when refreshed from the mock state:
hba.pull_full_properties()
assert hba.properties[prop_name] == saved_properties[prop_name]
else:
# Execute the code to be tested.
hba.reassign_port(port)
# Check that the port of the HBA has been set ...
# ... in the resource object:
prop_name = 'adapter-port-uri'
assert hba.properties[prop_name] == port.uri
# ... and again when refreshed from the mock state:
hba.pull_full_properties()
assert hba.properties[prop_name] == port.uri
| 34.658842
| 77
| 0.557457
| 2,608
| 22,147
| 4.547546
| 0.128451
| 0.018887
| 0.013491
| 0.015346
| 0.592074
| 0.52403
| 0.490051
| 0.426391
| 0.391062
| 0.350422
| 0
| 0.01895
| 0.325687
| 22,147
| 638
| 78
| 34.713166
| 0.775211
| 0.192441
| 0
| 0.532864
| 0
| 0
| 0.149692
| 0.01476
| 0
| 0
| 0
| 0.001567
| 0.084507
| 1
| 0.030516
| false
| 0
| 0.016432
| 0
| 0.053991
| 0.002347
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13ef6d428251649b315fbe8757c2d7336d7471a8
| 367
|
py
|
Python
|
compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 2,338
|
2018-06-19T17:34:51.000Z
|
2022-03-31T11:00:37.000Z
|
compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 3,740
|
2019-01-23T15:36:48.000Z
|
2022-03-31T22:01:13.000Z
|
compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py
|
medismailben/llvm-project
|
e334a839032fe500c3bba22bf976ab7af13ce1c1
|
[
"Apache-2.0"
] | 500
|
2019-01-23T07:49:22.000Z
|
2022-03-30T02:59:37.000Z
|
def getRoot(config):
if not config.parent:
return config
return getRoot(config.parent)
root = getRoot(config)
# We only run a small set of tests on Windows for now.
# Override the parent directory's "unsupported" decision until we can handle
# all of its tests.
if root.host_os in ['Windows']:
config.unsupported = False
else:
config.unsupported = True
| 24.466667
| 76
| 0.73842
| 56
| 367
| 4.821429
| 0.660714
| 0.144444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179837
| 367
| 14
| 77
| 26.214286
| 0.89701
| 0.395095
| 0
| 0
| 0
| 0
| 0.03211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f22ca29da54a0f4486e1f8539ee236d259fc1e
| 5,960
|
py
|
Python
|
efetch_server/plugins/fa_sqlite/fa_sqlite_ajax.py
|
Syrkadian/efetch
|
120ac963507d54998beecfd8b8cd85ad123e6e54
|
[
"Apache-2.0"
] | 38
|
2015-08-18T00:29:16.000Z
|
2021-12-06T15:53:47.000Z
|
efetch_server/plugins/fa_sqlite/fa_sqlite_ajax.py
|
Syrkadian/efetch
|
120ac963507d54998beecfd8b8cd85ad123e6e54
|
[
"Apache-2.0"
] | 20
|
2016-03-18T02:20:27.000Z
|
2020-04-09T22:16:42.000Z
|
efetch_server/plugins/fa_sqlite/fa_sqlite_ajax.py
|
Syrkadian/efetch
|
120ac963507d54998beecfd8b8cd85ad123e6e54
|
[
"Apache-2.0"
] | 8
|
2016-08-23T14:59:15.000Z
|
2020-04-09T21:43:25.000Z
|
"""
AJAX for SQLite Viewer plugin
"""
from yapsy.IPlugin import IPlugin
from flask import Response, jsonify
import json
import logging
import sqlite3
class FaSqliteAjax(IPlugin):
def __init__(self):
self.display_name = 'SQLite Ajax'
self.popularity = 0
self.cache = True
self.fast = False
self.action = False
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def check(self, evidence, path_on_disk):
"""Checks if the file is compatible with this plugin"""
return True
def mimetype(self, mimetype):
"""Returns the mimetype of this plugins get command"""
return "application/json"
def get(self, evidence, helper, path_on_disk, request):
"""Returns the result of this plugin to be displayed in a browser"""
method = helper.get_request_value(request, 'method', raise_key_error=True)
if method == "base":
return self.base_tree(path_on_disk)
elif method == "children":
return self.get_children(request, helper, path_on_disk)
elif method == "values":
return self.values(request, helper, path_on_disk)
logging.error('Unknown method "' + method + '" provided')
raise ValueError('Method "' + method + '" is not valid')
def base_tree(self, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
base_tree = []
cursor.execute("SELECT * FROM sqlite_master WHERE type='table';")
cursor.fetchone()
# Master Table
base_tree.append({'title': u'Master Table (1)',
'key': u'master',
'folder': True,
'lazy': True
})
# Tables
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
base_tree.append({'title': u'Tables (' + unicode(len(tables)) + u')',
'key': u'table',
'folder': True,
'lazy': True
})
# Views
cursor.execute("SELECT name FROM sqlite_master WHERE type='view';")
views = cursor.fetchall()
base_tree.append({'title': u'Views (' + unicode(len(views)) + u')',
'key': u'view',
'folder': True,
'lazy': True
})
# Indexes
cursor.execute("SELECT name FROM sqlite_master WHERE type='index';")
indexes = cursor.fetchall()
base_tree.append({'title': u'Indexes (' + unicode(len(indexes)) + u')',
'key': u'index',
'folder': True,
'lazy': True
})
# Triggers
cursor.execute("SELECT name FROM sqlite_master WHERE type='trigger';")
triggers = cursor.fetchall()
base_tree.append({'title': u'Triggers (' + unicode(len(triggers)) + u')',
'key': u'trigger',
'folder': True,
'lazy': True
})
connection.close()
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(base_tree), mimetype='application/json')
def get_children(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
children = []
if key == u'master':
children.append({'title': u'Master Table (1)',
'key': u'sqlite_master',
'folder': False,
'lazy': False
})
else:
for child in self.get_tables(key, path_on_disk):
children.append({'title': child,
'key': child,
'folder': False,
'lazy': False
})
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(children), mimetype='application/json')
def get_tables(self, key, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
tables = []
table_list = cursor.execute("SELECT name FROM sqlite_master WHERE type='" + key + "';")
for table in table_list:
tables.append(unicode(table[0]))
connection.close()
return tables
def values(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
cursor.execute("pragma table_info('" + key + "')")
rows = cursor.fetchall()
table = [ u'<table id="sqlitet01" class="display">', u' <thead><tr>' ]
for row in rows:
table.append(u' <th>' + unicode(row[1]) + u'</th>')
table.append(u' </tr> </thead>')
cursor.execute('SELECT * FROM ' + key)
rows = cursor.fetchall()
for row in rows:
table.append(u' <tr>')
for item in row:
try:
table.append(u' <td>' + unicode(item) + u'</td>')
except:
table.append(u' <td>' + unicode(type(item)) + u'</td>')
table.append(u' </tr>')
table.append(u'</table>')
connection.close()
return jsonify({'table': '\n'.join(table)})
| 35.266272
| 133
| 0.51896
| 620
| 5,960
| 4.882258
| 0.217742
| 0.025768
| 0.042947
| 0.041625
| 0.428477
| 0.358441
| 0.346878
| 0.286092
| 0.267592
| 0.169805
| 0
| 0.007055
| 0.357886
| 5,960
| 169
| 134
| 35.266272
| 0.783904
| 0.08104
| 0
| 0.308943
| 0
| 0
| 0.158011
| 0
| 0
| 0
| 0
| 0.005917
| 0
| 1
| 0.081301
| false
| 0
| 0.04065
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f2af320410f86bedc3a6ddc8c44eb547f14053
| 252
|
py
|
Python
|
raspagem/random/lista_cidades.py
|
sslppractice/propython
|
fa470c3bf0dcfbb26037146d77c7491596cabb26
|
[
"MIT"
] | null | null | null |
raspagem/random/lista_cidades.py
|
sslppractice/propython
|
fa470c3bf0dcfbb26037146d77c7491596cabb26
|
[
"MIT"
] | null | null | null |
raspagem/random/lista_cidades.py
|
sslppractice/propython
|
fa470c3bf0dcfbb26037146d77c7491596cabb26
|
[
"MIT"
] | null | null | null |
import requests, json
url = 'http://educacao.dadosabertosbr.com/api/cidades/ce'
cidades = requests.get(url).content
cidades = cidades.decode('utf-8')
cidades = json.loads(cidades)
for cidade in cidades:
codigo, nome = cidade.split(':')
print(nome)
| 22.909091
| 57
| 0.734127
| 35
| 252
| 5.285714
| 0.685714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004464
| 0.111111
| 252
| 10
| 58
| 25.2
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0.218254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f3a6d9012ba4c4473a1ffb1f1db1418326ee1f
| 7,566
|
py
|
Python
|
src/autonomous/purepursuit.py
|
Sloomey/DeepSpace2019
|
dda035c0ac100209b03a2ff04d86df09c6de9a85
|
[
"MIT"
] | null | null | null |
src/autonomous/purepursuit.py
|
Sloomey/DeepSpace2019
|
dda035c0ac100209b03a2ff04d86df09c6de9a85
|
[
"MIT"
] | null | null | null |
src/autonomous/purepursuit.py
|
Sloomey/DeepSpace2019
|
dda035c0ac100209b03a2ff04d86df09c6de9a85
|
[
"MIT"
] | null | null | null |
import math
from constants import Constants
from utils import vector2d
from wpilib import SmartDashboard as Dash
from autonomous import pursuitpoint
class PurePursuit():
"""An implementation of the Pure Pursuit path tracking algorithm."""
def __init__(self, path):
self.path = path
self.pursuit_points = [pursuitpoint.PursuitPoint(p, c) for p, c in zip(
self.path.getPoints(), self.path.getCurvatures())]
self.last_lookahead_index = 0
self.cur_curvature = 0
self.target_velocities = vector2d.Vector2D()
self.closest_point_index = 0
def computeVelocities(self):
"""Compute the velocities along the path."""
# Compute the velocities along the path using the curvature and Constants.CURVE_VELOCITY
for ppoint in self.pursuit_points:
if abs(ppoint.curvature) <= Constants.CURVATURE_THRESHOLD:
velocity = Constants.MAX_VELOCITY
else:
velocity = min(Constants.MAX_VELOCITY,
Constants.CURVE_VELOCITY/ppoint.curvature)
ppoint.velocity = velocity
# Limit the acceleration of the velocities
for i in reversed(range(0, len(self.pursuit_points)-1)):
distance = self.pursuit_points[i].point.getDistance(
self.pursuit_points[i+1].point)
new_velocity = math.sqrt(
self.pursuit_points[i+1].velocity**2 + (2 * Constants.MAX_ACCELERATION * distance))
new_velocity = min(self.pursuit_points[i].velocity, new_velocity)
self.pursuit_points[i].velocity = new_velocity
def updateLookaheadPointIndex2(self, state):
"""Update the lookahead point given the current robot state.
Uses the minimum distance point if the state is more than
Constants.LOOKAHEAD_DIST from all points, otherwise uses the
closes point to self.loohead_distance"""
# Compute point distances to state and differences from those distances to Constants.LOOKAHEAD_DIST
distances = [math.hypot(state.x - ppoint.point.x,
state.y - ppoint.point.y) for ppoint in self.pursuit_points]
differences = [abs(d-Constants.LOOKAHEAD_DIST) for d in distances]
min_distance = min(distances)
# Get new lookahead index
if min_distance <= Constants.LOOKAHEAD_DIST:
self.last_lookahead_index = differences.index(min(differences))
else:
self.last_lookahead_index = distances.index(min_distance)
def updateLookaheadPointIndex(self, state):
"""Loop over the points in the path to get the lookahead point given the current robot state."""
for i in range(self.last_lookahead_index, len(self.pursuit_points)-1):
lookahead = self.computeLookaheadPoint(
self.pursuit_points[i].point, self.pursuit_points[i+1].point, state)
if lookahead != None:
self.last_lookahead_index = i
def computeLookaheadPoint(self, start, end, state):
"""Compute the lookahead point given the current robot state.
Returns a point if the current state is Constants.LOOKAHEAD_DIST
from between start and end, otherwise returns None."""
# Algorithm for circle line segment intersection found here: https://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm/1084899#1084899
segment_direction = end - start
center_to_start = start - state
a = segment_direction * segment_direction
b = 2 * (center_to_start * segment_direction)
c = (center_to_start * center_to_start) - Constants.LOOKAHEAD_DIST ** 2
discriminant = b**2 - (4 * a * c)
if discriminant < 0:
return None
else:
discriminant = math.sqrt(discriminant)
t0 = (-b - discriminant) / (2 * a)
t1 = (-b + discriminant) / (2 * a)
if t0 >= 0 and t0 <= 1:
return start + t0 * segment_direction
if t1 >= 0 and t1 <= 1:
return start + t1 * segment_direction
return None
def updateCurvature(self, state):
"""Update the curvature from the current lookahead point to the current robot position."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
# Transform the lookahead and state.pos to get an aligned vector
transform = lookahead - state.pos
transform = transform.getRotated(-state.angle)
# Use the transformed vector to calculate the curvature (derived from https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf#page=12)
self.cur_curvature = (2 * transform.x) / Constants.LOOKAHEAD_DIST**2
def updateClosestPointIndex(self, state):
"""Update the index of the closest point to the current robot position."""
index = self.closest_point_index
smallest_distance = self.pursuit_points[index].point.getDistance(state)
for i in range(0, len(self.pursuit_points)):
distance = self.pursuit_points[i].point.getDistance(state)
if smallest_distance > distance:
smallest_distance = distance
index = i
self.closest_point_index = index
def updateTargetVelocities(self, state):
"""Update the target velocities of the left and right wheels."""
robot_velocity = self.pursuit_points[self.closest_point_index].velocity
# Use kinematics (http://robotsforroboticists.com/drive-kinematics/) and algebra to find wheel target velocties
l_velocity = robot_velocity * \
(2 + self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
r_velocity = robot_velocity * \
(2 - self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
scale = max(abs(l_velocity), abs(r_velocity))
if scale > 1:
l_velocity /= scale
r_velocity /= scale
self.target_velocities = vector2d.Vector2D(l_velocity, r_velocity)
def update(self, state):
"""Update the pure pursuit follower(runs all update functions)."""
# TODO which lookahead function to use
self.updateLookaheadPointIndex(state.pos)
# self.updateLookaheadPointIndex2(state.pos)
self.updateCurvature(state)
self.updateClosestPointIndex(state.pos)
self.updateTargetVelocities(state.pos)
def outputToSmartDashboard(self):
"""Output values to the smart dashboard."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
closest = self.pursuit_points[self.closest_point_index].point
Dash.putNumberArray("Lookahead Point", [lookahead.x, lookahead.y])
Dash.putNumber("Curvature", self.cur_curvature)
Dash.putNumberArray("Closes Point", [closest.x, closest.y])
Dash.putNumberArray("Target Velocities", [
self.target_velocities.x, self.target_velocities.y])
#print("Lookahead Point - {}".format(lookahead))
#print("Curvature - {}".format(self.cur_curvature))
#print("Closes Point - {}".format(closest))
#print("Target Velocities - {}".format(self.target_velocities))
# print("------------------------------")
def isDone(self):
"""Check if the path is done being followed."""
return (len(self.pursuit_points) - self.closest_point_index) <= 1
| 50.10596
| 178
| 0.655432
| 886
| 7,566
| 5.455982
| 0.216704
| 0.045511
| 0.070335
| 0.029789
| 0.235416
| 0.195283
| 0.13736
| 0.081092
| 0.055027
| 0.033099
| 0
| 0.014137
| 0.252049
| 7,566
| 150
| 179
| 50.44
| 0.840078
| 0.266191
| 0
| 0.086538
| 0
| 0
| 0.00977
| 0
| 0
| 0
| 0
| 0.006667
| 0
| 1
| 0.105769
| false
| 0
| 0.048077
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f4c5d6b839fc74a59e3720afa044833541c6ea
| 8,661
|
py
|
Python
|
esphome/voluptuous_schema.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
esphome/voluptuous_schema.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
esphome/voluptuous_schema.py
|
TheEggi/esphomeyaml
|
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
|
[
"MIT"
] | null | null | null |
import difflib
import itertools
import voluptuous as vol
from esphome.py_compat import string_types
class ExtraKeysInvalid(vol.Invalid):
def __init__(self, *arg, **kwargs):
self.candidates = kwargs.pop('candidates')
vol.Invalid.__init__(self, *arg, **kwargs)
def ensure_multiple_invalid(err):
if isinstance(err, vol.MultipleInvalid):
return err
return vol.MultipleInvalid(err)
# pylint: disable=protected-access, unidiomatic-typecheck
class _Schema(vol.Schema):
"""Custom cv.Schema that prints similar keys on error."""
def __init__(self, schema, extra=vol.PREVENT_EXTRA, extra_schemas=None):
super(_Schema, self).__init__(schema, extra=extra)
# List of extra schemas to apply after validation
# Should be used sparingly, as it's not a very voluptuous-way/clean way of
# doing things.
self._extra_schemas = extra_schemas or []
def __call__(self, data):
res = super(_Schema, self).__call__(data)
for extra in self._extra_schemas:
try:
res = extra(res)
except vol.Invalid as err:
raise ensure_multiple_invalid(err)
return res
def _compile_mapping(self, schema, invalid_msg=None):
invalid_msg = invalid_msg or 'mapping value'
# Check some things that ESPHome's schemas do not allow
# mostly to keep the logic in this method sane (so these may be re-added if needed).
for key in schema:
if key is vol.Extra:
raise ValueError("ESPHome does not allow vol.Extra")
if isinstance(key, vol.Remove):
raise ValueError("ESPHome does not allow vol.Remove")
if isinstance(key, vol.primitive_types):
raise ValueError("All schema keys must be wrapped in cv.Required or cv.Optional")
# Keys that may be required
all_required_keys = set(key for key in schema if isinstance(key, vol.Required))
# Keys that may have defaults
all_default_keys = set(key for key in schema if isinstance(key, vol.Optional))
# Recursively compile schema
_compiled_schema = {}
for skey, svalue in vol.iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
# Sort compiled schema (probably not necessary for esphome, but leave it here just in case)
candidates = list(vol.schema_builder._iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some
# optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in vol.primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, vol.Marker) and type(skey.schema) in vol.primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be
# applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
key_names = []
for skey in schema:
if isinstance(skey, string_types):
key_names.append(skey)
elif isinstance(skey, vol.Marker) and isinstance(skey.schema, string_types):
key_names.append(skey.schema)
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, vol.Undefined) and key.schema not in key_value_map:
# A default value has been specified for this missing key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []),
additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except vol.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
try:
cval = cvalue(key_path, value)
out[new_key] = cval
except vol.MultipleInvalid as e:
exception_errors.extend(e.errors)
except vol.Invalid as e:
exception_errors.append(e)
if exception_errors:
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if self.extra == vol.ALLOW_EXTRA:
out[key] = value
elif self.extra != vol.REMOVE_EXTRA:
if isinstance(key, string_types) and key_names:
matches = difflib.get_close_matches(key, key_names)
errors.append(ExtraKeysInvalid('extra keys not allowed', key_path,
candidates=matches))
else:
errors.append(vol.Invalid('extra keys not allowed', key_path))
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = getattr(key, 'msg', None) or 'required key not provided'
errors.append(vol.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise vol.MultipleInvalid(errors)
return out
return validate_mapping
def add_extra(self, validator):
validator = _Schema(validator)
self._extra_schemas.append(validator)
return self
# pylint: disable=arguments-differ
def extend(self, *schemas, **kwargs):
extra = kwargs.pop('extra', None)
if kwargs:
raise ValueError
if not schemas:
return self.extend({})
if len(schemas) != 1:
ret = self
for schema in schemas:
ret = ret.extend(schema)
return ret
schema = schemas[0]
if isinstance(schema, vol.Schema):
schema = schema.schema
ret = super(_Schema, self).extend(schema, extra=extra)
return _Schema(ret.schema, extra=ret.extra, extra_schemas=self._extra_schemas)
| 43.305
| 99
| 0.564831
| 992
| 8,661
| 4.792339
| 0.240927
| 0.018511
| 0.008414
| 0.015145
| 0.12032
| 0.089188
| 0.053429
| 0.037863
| 0.037863
| 0.017669
| 0
| 0.000362
| 0.362776
| 8,661
| 199
| 100
| 43.522613
| 0.861026
| 0.205173
| 0
| 0.090226
| 0
| 0
| 0.033017
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06015
| false
| 0
| 0.030075
| 0
| 0.172932
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f5928fe05ccf64858c18af5eff2188153c32e0
| 20,738
|
py
|
Python
|
semisupervised/DensityPeaks.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | 3
|
2021-12-10T09:04:18.000Z
|
2022-01-22T15:03:19.000Z
|
semisupervised/DensityPeaks.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | 107
|
2021-12-02T07:43:11.000Z
|
2022-03-31T11:02:46.000Z
|
semisupervised/DensityPeaks.py
|
dpr1005/Semisupervised-learning-and-instance-selection-methods
|
646d9e729c85322e859928e71a3241f2aec6d93d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: Daniel Puente Ramírez
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
Li, J., Zhu, Q., & Wu, Q. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
Wu, D., Shang, M., Luo, X., Xu, J., Yan, H., Deng, W., & Wang, G. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = pd.DataFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, samples, y):
"""Fit method."""
try:
l, u, y = split(samples, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_stdpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
| 35.268707
| 86
| 0.580384
| 2,615
| 20,738
| 4.456597
| 0.142639
| 0.014158
| 0.011412
| 0.013214
| 0.252703
| 0.20671
| 0.165866
| 0.136005
| 0.12485
| 0.120817
| 0
| 0.009857
| 0.3347
| 20,738
| 587
| 87
| 35.32879
| 0.834819
| 0.264876
| 0
| 0.165192
| 0
| 0
| 0.020728
| 0
| 0
| 0
| 0
| 0
| 0.00295
| 1
| 0.064897
| false
| 0
| 0.032448
| 0
| 0.138643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f713d62e74a1cd787ec98b134812d16f5287ea
| 933
|
py
|
Python
|
N-aryTreeLevelOrderTraversal429.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
N-aryTreeLevelOrderTraversal429.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
N-aryTreeLevelOrderTraversal429.py
|
Bit64L/LeetCode-Python-
|
64847cbb1adcaca4561b949e8acc52e8e031a6cb
|
[
"MIT"
] | null | null | null |
"""
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
from Queue import Queue
que = Queue()
que.put(root)
ans, tmp, k = [], [], 1
while que.qsize() != 0:
node = que.get()
tmp.append(node.val)
k -= 1
for child in node.children:
que.put(child)
if k == 0:
k = que.qsize()
ans.append(list(tmp))
tmp = []
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.levelOrder(node1))
| 20.733333
| 39
| 0.485531
| 103
| 933
| 4.359223
| 0.436893
| 0.040089
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022569
| 0.382637
| 933
| 44
| 40
| 21.204545
| 0.756944
| 0.069668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.241379
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f7593938a4204f0e27844ca0c493ca0b47ec5f
| 16,444
|
py
|
Python
|
plugin.video.team.milhanos/websocket/_core.py
|
akuala/REPO.KUALA
|
ea9a157025530d2ce8fa0d88431c46c5352e89d4
|
[
"Apache-2.0"
] | 2
|
2018-11-02T19:55:30.000Z
|
2020-08-14T02:22:20.000Z
|
venv/lib/python3.5/site-packages/websocket/_core.py
|
dukakisxyz/wifiportal21-map
|
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
|
[
"MIT"
] | null | null | null |
venv/lib/python3.5/site-packages/websocket/_core.py
|
dukakisxyz/wifiportal21-map
|
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
|
[
"MIT"
] | 3
|
2019-12-17T20:47:00.000Z
|
2021-02-11T19:03:59.000Z
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import six
import socket
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import struct
import threading
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._socket import *
from ._utils import *
from ._url import *
from ._logging import *
from ._http import *
from ._handshake import *
from ._ssl_compat import *
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is an echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False, **options):
"""
Initialize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the func takes 1 argument as integer.
The argument means length of mask key.
This func must return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until receive a close frame.
If None, it will wait forever until receive a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
try:
frame = self.recv_frame()
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
def create_connection(url, timeout=None, class_=WebSocket, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied,
the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
class_: class to instantiate when creating the connection. It has to implement
settimeout and connect. It's __init__ should be compatible with
WebSocket.__init__, i.e. accept all of it's kwargs.
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"enable_multithread" -> enable lock for multithread.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols.
default is None.
"skip_utf8_validation" - skip utf8 validation.
"socket" - pre-initialized stream socket.
"""
sockopt = options.pop("sockopt", [])
sslopt = options.pop("sslopt", {})
fire_cont_frame = options.pop("fire_cont_frame", False)
enable_multithread = options.pop("enable_multithread", False)
skip_utf8_validation = options.pop("skip_utf8_validation", False)
websock = class_(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=fire_cont_frame,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation, **options)
websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
websock.connect(url, **options)
return websock
| 33.490835
| 90
| 0.589698
| 1,909
| 16,444
| 4.964379
| 0.199057
| 0.021948
| 0.020893
| 0.007386
| 0.385037
| 0.357497
| 0.305476
| 0.281629
| 0.257993
| 0.224755
| 0
| 0.006302
| 0.324495
| 16,444
| 490
| 91
| 33.559184
| 0.846867
| 0.43189
| 0
| 0.325
| 0
| 0
| 0.027961
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14
| false
| 0.01
| 0.08
| 0.025
| 0.37
| 0.005
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13f9663c3671ee791e1374fc1c550b7438edff48
| 1,033
|
py
|
Python
|
tests/base_tests/polygon_tests/test_contains.py
|
lycantropos/gon
|
b3f811ece5989d1623b17d633a84071fbff6dd69
|
[
"MIT"
] | 10
|
2020-07-18T12:55:52.000Z
|
2022-03-20T07:09:10.000Z
|
tests/base_tests/polygon_tests/test_contains.py
|
lycantropos/gon
|
b3f811ece5989d1623b17d633a84071fbff6dd69
|
[
"MIT"
] | 52
|
2019-07-11T16:59:01.000Z
|
2022-03-29T19:41:59.000Z
|
tests/base_tests/polygon_tests/test_contains.py
|
lycantropos/gon
|
b3f811ece5989d1623b17d633a84071fbff6dd69
|
[
"MIT"
] | 1
|
2020-03-22T12:56:07.000Z
|
2020-03-22T12:56:07.000Z
|
from typing import Tuple
from hypothesis import given
from gon.base import (Point,
Polygon)
from tests.utils import (equivalence,
implication)
from . import strategies
@given(strategies.polygons)
def test_vertices(polygon: Polygon) -> None:
assert all(vertex in polygon
for vertex in polygon.border.vertices)
assert all(vertex in polygon
for hole in polygon.holes
for vertex in hole.vertices)
@given(strategies.polygons_with_points)
def test_convex_hull(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
assert implication(point in polygon, point in polygon.convex_hull)
@given(strategies.polygons_with_points)
def test_indexing(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
before_indexing = point in polygon
polygon.index()
after_indexing = point in polygon
assert equivalence(before_indexing, after_indexing)
| 26.487179
| 72
| 0.708616
| 127
| 1,033
| 5.598425
| 0.283465
| 0.101266
| 0.090014
| 0.04782
| 0.371308
| 0.371308
| 0.295359
| 0.182841
| 0.182841
| 0.182841
| 0
| 0
| 0.224589
| 1,033
| 38
| 73
| 27.184211
| 0.88764
| 0
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.12
| false
| 0
| 0.2
| 0
| 0.32
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9141fcf42d65abf107a484255f641db4d6e639b
| 3,249
|
py
|
Python
|
examples/canvas/bezier.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | 2
|
2015-10-26T12:35:37.000Z
|
2020-11-26T12:06:09.000Z
|
examples/canvas/bezier.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | null | null | null |
examples/canvas/bezier.py
|
sirpercival/kivy
|
29ef854a200e6764aae60ea29324379c69d271a3
|
[
"MIT"
] | 3
|
2015-07-18T11:03:59.000Z
|
2018-03-17T01:32:42.000Z
|
#!/usr/bin/env python
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line
class BezierTest(FloatLayout):
def __init__(self, points=[], loop=False, *args, **kwargs):
super(BezierTest, self).__init__(*args, **kwargs)
self.d = 10
self.points = points
self.loop = loop
self.current_point = None
with self.canvas:
Color(1.0, 0.0, 0.0)
self.bezier = Bezier(
points=self.points,
segments=150,
loop=self.loop,
dash_length=100,
dash_offset=10)
Color(1.0, 0.0, 1.0)
self.line = Line(
points=self.points+self.points[:2],
dash_offset=10,
dash_length=100)
s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_bezier_dash_offset)
self.add_widget(s)
s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_line_dash_offset)
self.add_widget(s)
def _set_bezier_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.bezier.dash_length = 100 - value
self.bezier.dash_offset = value
def _set_line_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.line.dash_length = 100 - value
self.line.dash_offset = value
def on_touch_down(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
for i, p in enumerate(list(zip(self.points[::2], self.points[1::2]))):
if (
abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and
abs(touch.pos[1] - self.pos[1] - p[1]) < self.d):
self.current_point = i + 1
return True
return super(BezierTest, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
if self.current_point:
self.current_point = None
return True
return super(BezierTest, self).on_touch_up(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
c = self.current_point
if c:
self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0]
self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1]
self.bezier.points = self.points
self.line.points = self.points + self.points[:2]
return True
return super(BezierTest, self).on_touch_move(touch)
class Main(App):
def build(self):
from math import cos, sin, radians
x = y = 150
l = 100
# Pacman !
points = [x, y]
for i in range(45, 360, 45):
i = radians(i)
points.extend([x + cos(i) * l, y + sin(i) * l])
return BezierTest(points=points, loop=True)
if __name__ == '__main__':
Main().run()
| 33.84375
| 82
| 0.544783
| 434
| 3,249
| 3.926267
| 0.221198
| 0.070423
| 0.056338
| 0.035211
| 0.461854
| 0.394953
| 0.326878
| 0.290493
| 0.216549
| 0.216549
| 0
| 0.038763
| 0.333026
| 3,249
| 95
| 83
| 34.2
| 0.747577
| 0.037242
| 0
| 0.136986
| 0
| 0
| 0.003202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09589
| false
| 0
| 0.068493
| 0
| 0.287671
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b918984647c67e09bce945847905654d35530277
| 15,886
|
py
|
Python
|
tests/test_pyclipper.py
|
odidev/pyclipper
|
3de54fa4c4d5b8efeede364fbe69336f935f88f2
|
[
"MIT"
] | null | null | null |
tests/test_pyclipper.py
|
odidev/pyclipper
|
3de54fa4c4d5b8efeede364fbe69336f935f88f2
|
[
"MIT"
] | null | null | null |
tests/test_pyclipper.py
|
odidev/pyclipper
|
3de54fa4c4d5b8efeede364fbe69336f935f88f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
Tests for Pyclipper wrapper library.
"""
from __future__ import print_function
from unittest2 import TestCase, main
import sys
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
import pyclipper
# Example polygons from http://www.angusj.com/delphi/clipper.php
PATH_SUBJ_1 = [[180, 200], [260, 200], [260, 150], [180, 150]] # square, orientation is False
PATH_SUBJ_2 = [[215, 160], [230, 190], [200, 190]] # triangle
PATH_CLIP_1 = [[190, 210], [240, 210], [240, 130], [190, 130]] # square
PATH_SIGMA = [[300, 400], [100, 400], [200, 300], [100, 200], [300, 200]] # greek letter sigma
PATTERN = [[4, -6], [6, -6], [-4, 6], [-6, 6]]
INVALID_PATH = [[1, 1], ] # less than 2 vertices
class TestPyclipperModule(TestCase):
def test_has_classes(self):
self.assertTrue(hasattr(pyclipper, 'Pyclipper'))
self.assertTrue(hasattr(pyclipper, 'PyclipperOffset'))
def test_has_namespace_methods(self):
for method in ('Orientation', 'Area', 'PointInPolygon', 'SimplifyPolygon', 'SimplifyPolygons',
'CleanPolygon', 'CleanPolygons', 'MinkowskiSum', 'MinkowskiSum2', 'MinkowskiDiff',
'PolyTreeToPaths', 'ClosedPathsFromPolyTree', 'OpenPathsFromPolyTree',
'ReversePath', 'ReversePaths'):
self.assertTrue(hasattr(pyclipper, method))
class TestNamespaceMethods(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
def test_orientation(self):
self.assertFalse(pyclipper.Orientation(PATH_SUBJ_1))
self.assertTrue(pyclipper.Orientation(PATH_SUBJ_1[::-1]))
def test_area(self):
# area less than 0 because orientation is False
area_neg = pyclipper.Area(PATH_SUBJ_1)
area_pos = pyclipper.Area(PATH_SUBJ_1[::-1])
self.assertLess(area_neg, 0)
self.assertGreater(area_pos, 0)
self.assertEqual(abs(area_neg), area_pos)
def test_point_in_polygon(self):
# on polygon
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
# in polygon
self.assertEqual(pyclipper.PointInPolygon((200, 180), PATH_SUBJ_1), 1)
# outside of polygon
self.assertEqual(pyclipper.PointInPolygon((500, 500), PATH_SUBJ_1), 0)
def test_minkowski_sum(self):
solution = pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
self.assertGreater(len(solution), 0)
def test_minkowski_sum2(self):
solution = pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
self.assertGreater(len(solution), 0)
def test_minkowski_diff(self):
solution = pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
self.assertGreater(len(solution), 0)
def test_reverse_path(self):
solution = pyclipper.ReversePath(PATH_SUBJ_1)
manualy_reversed = PATH_SUBJ_1[::-1]
self.check_reversed_path(solution, manualy_reversed)
def test_reverse_paths(self):
solution = pyclipper.ReversePaths([PATH_SUBJ_1])
manualy_reversed = [PATH_SUBJ_1[::-1]]
self.check_reversed_path(solution[0], manualy_reversed[0])
def check_reversed_path(self, path_1, path_2):
if len(path_1) is not len(path_2):
return False
for i in range(len(path_1)):
self.assertEqual(path_1[i][0], path_2[i][0])
self.assertEqual(path_1[i][1], path_2[i][1])
def test_simplify_polygon(self):
solution = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
def test_simplify_polygons(self):
solution = pyclipper.SimplifyPolygons([PATH_SUBJ_1])
solution_single = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution), len(solution_single))
_do_solutions_match(solution, solution_single)
def test_clean_polygon(self):
solution = pyclipper.CleanPolygon(PATH_CLIP_1)
self.assertEqual(len(solution), len(PATH_CLIP_1))
def test_clean_polygons(self):
solution = pyclipper.CleanPolygons([PATH_CLIP_1])
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution[0]), len(PATH_CLIP_1))
class TestFilterPyPolyNode(TestCase):
def setUp(self):
tree = pyclipper.PyPolyNode()
tree.Contour.append(PATH_CLIP_1)
tree.IsOpen = True
child = pyclipper.PyPolyNode()
child.IsOpen = False
child.Parent = tree
child.Contour = PATH_SUBJ_1
tree.Childs.append(child)
child = pyclipper.PyPolyNode()
child.IsOpen = True
child.Parent = tree
child.Contour = PATH_SUBJ_2
tree.Childs.append(child)
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = PATTERN
child.Childs.append(child2)
# empty contour should not
# be included in filtered results
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = []
child.Childs.append(child2)
self.tree = tree
def test_polytree_to_paths(self):
paths = pyclipper.PolyTreeToPaths(self.tree)
self.check_paths(paths, 4)
def test_closed_paths_from_polytree(self):
paths = pyclipper.ClosedPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def test_open_paths_from_polytree(self):
paths = pyclipper.OpenPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def check_paths(self, paths, expected_nr):
self.assertEqual(len(paths), expected_nr)
self.assertTrue(all((len(path) > 0 for path in paths)))
class TestPyclipperAddPaths(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
def test_add_path(self):
# should not raise an exception
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
# should not raise an exception
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
def test_add_path_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPath, INVALID_PATH, pyclipper.PT_CLIP, True)
def test_add_paths_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPaths, [INVALID_PATH, INVALID_PATH],
pyclipper.PT_CLIP, True)
try:
self.pc.AddPaths([INVALID_PATH, PATH_CLIP_1], pyclipper.PT_CLIP)
self.pc.AddPaths([PATH_CLIP_1, INVALID_PATH], pyclipper.PT_CLIP)
except pyclipper.ClipperException:
self.fail("add_paths raised ClipperException when not all paths were invalid")
class TestClassProperties(TestCase):
def check_property_assignment(self, pc, prop_name, values):
for val in values:
setattr(pc, prop_name, val)
self.assertEqual(getattr(pc, prop_name), val)
def test_pyclipper_properties(self):
pc = pyclipper.Pyclipper()
for prop_name in ('ReverseSolution', 'PreserveCollinear', 'StrictlySimple'):
self.check_property_assignment(pc, prop_name, [True, False])
def test_pyclipperoffset_properties(self):
for factor in range(6):
pyclipper.SCALING_FACTOR = 10 ** factor
pc = pyclipper.PyclipperOffset()
for prop_name in ('MiterLimit', 'ArcTolerance'):
self.check_property_assignment(pc, prop_name, [2.912, 132.12, 12, -123])
class TestPyclipperExecute(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
self.add_default_paths(self.pc)
self.default_args = [pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD]
@staticmethod
def add_default_paths(pc):
pc.AddPath(PATH_CLIP_1, pyclipper.PT_CLIP)
pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], pyclipper.PT_SUBJECT)
@staticmethod
def add_paths(pc, clip_path, subj_paths, addend=None, multiplier=None):
pc.AddPath(_modify_vertices(clip_path, addend=addend, multiplier=multiplier), pyclipper.PT_CLIP)
for subj_path in subj_paths:
pc.AddPath(_modify_vertices(subj_path, addend=addend, multiplier=multiplier), pyclipper.PT_SUBJECT)
def test_get_bounds(self):
bounds = self.pc.GetBounds()
self.assertIsInstance(bounds, pyclipper.PyIntRect)
self.assertEqual(bounds.left, 180)
self.assertEqual(bounds.right, 260)
self.assertEqual(bounds.top, 130)
self.assertEqual(bounds.bottom, 210)
def test_execute(self):
solution = self.pc.Execute(*self.default_args)
self.assertEqual(len(solution), 2)
def test_execute2(self):
solution = self.pc.Execute2(*self.default_args)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.check_pypolynode(solution)
def test_execute_empty(self):
pc = pyclipper.Pyclipper()
with self.assertRaises(pyclipper.ClipperException):
pc.Execute(pyclipper.CT_UNION,
pyclipper.PFT_NONZERO,
pyclipper.PFT_NONZERO)
def test_clear(self):
self.pc.Clear()
with self.assertRaises(pyclipper.ClipperException):
self.pc.Execute(*self.default_args)
def test_exact_results(self):
"""
Test whether coordinates passed into the library are returned exactly, if they are not affected by the
operation.
"""
pc = pyclipper.Pyclipper()
# Some large triangle.
path = [[[0, 1], [0, 0], [15 ** 15, 0]]]
pc.AddPaths(path, pyclipper.PT_SUBJECT, True)
result = pc.Execute(pyclipper.PT_CLIP, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
assert result == path
def check_pypolynode(self, node):
self.assertTrue(len(node.Contour) == 0 or len(node.Contour) > 2)
# check vertex coordinate, should not be an iterable (in that case
# that means that node.Contour is a list of paths, should be path
if node.Contour:
self.assertFalse(hasattr(node.Contour[0][0], '__iter__'))
for child in node.Childs:
self.check_pypolynode(child)
class TestPyclipperOffset(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
@staticmethod
def add_path(pc, path):
pc.AddPath(path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def test_execute(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 1)
def test_execute2(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute2(2.0)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.assertEqual(len(pyclipper.OpenPathsFromPolyTree(solution)), 0)
self.assertEqual(len(pyclipper.ClosedPathsFromPolyTree(solution)), 1)
def test_clear(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
pc.Clear()
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 0)
class TestScalingFactorWarning(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 2.
self.pc = pyclipper.Pyclipper()
def test_orientation(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Orientation(PATH_SUBJ_1)
def test_area(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Area(PATH_SUBJ_1)
def test_point_in_polygon(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
def test_minkowski_sum(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
def test_minkowski_sum2(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
def test_minkowski_diff(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
def test_add_path(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
class TestScalingFunctions(TestCase):
scale = 2 ** 31
path = [(0, 0), (1, 1)]
paths = [path] * 3
def test_value_scale_to(self):
value = 0.5
res = pyclipper.scale_to_clipper(value, self.scale)
assert isinstance(res, integer_types)
assert res == int(value * self.scale)
def test_value_scale_from(self):
value = 1000000000000
res = pyclipper.scale_from_clipper(value, self.scale)
assert isinstance(res, float)
# Convert to float to get "normal" division in Python < 3.
assert res == float(value) / self.scale
def test_path_scale_to(self):
res = pyclipper.scale_to_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, integer_types) for i in res for j in i)
def test_path_scale_from(self):
res = pyclipper.scale_from_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, float) for i in res for j in i)
def test_paths_scale_to(self):
res = pyclipper.scale_to_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, integer_types) for i in res for j in i for k in j)
def test_paths_scale_from(self):
res = pyclipper.scale_from_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, float) for i in res for j in i for k in j)
class TestNonStandardNumbers(TestCase):
def test_sympyzero(self):
try:
from sympy import Point2D
from sympy.core.numbers import Zero
except ImportError:
self.skipTest("Skipping, sympy not available")
path = [(0,0), (0,1)]
path = [Point2D(v) for v in [(0,0), (0,1)]]
assert type(path[0].x) == Zero
path = pyclipper.scale_to_clipper(path)
assert path == [[0, 0], [0, 2147483648]]
def _do_solutions_match(paths_1, paths_2, factor=None):
if len(paths_1) != len(paths_2):
return False
paths_1 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_1]
paths_2 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_2]
return all(((p_1 in paths_2) for p_1 in paths_1))
def _modify_vertices(path, addend=0.0, multiplier=1.0, converter=None):
path = path[:]
def convert_coordinate(c):
if multiplier is not None:
c *= multiplier
if addend is not None:
c += addend
if converter:
c = converter(c)
return c
return [[convert_coordinate(c) for c in v] for v in path]
def run_tests():
main()
if __name__ == '__main__':
run_tests()
| 34.914286
| 111
| 0.660078
| 1,990
| 15,886
| 5.080905
| 0.149246
| 0.032539
| 0.021363
| 0.008901
| 0.485808
| 0.419147
| 0.325784
| 0.265849
| 0.235189
| 0.209277
| 0
| 0.029281
| 0.234672
| 15,886
| 454
| 112
| 34.991189
| 0.802352
| 0.045701
| 0
| 0.330247
| 0
| 0
| 0.026948
| 0.002913
| 0
| 0
| 0
| 0
| 0.231481
| 1
| 0.197531
| false
| 0
| 0.021605
| 0
| 0.274691
| 0.003086
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b919ab13ac46e733a617fc950c062280033c20b8
| 439
|
py
|
Python
|
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 5
|
2020-05-25T03:08:09.000Z
|
2022-02-27T05:57:28.000Z
|
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 1
|
2020-12-22T01:35:36.000Z
|
2022-01-28T01:51:06.000Z
|
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
|
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
|
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
|
[
"MIT"
] | 1
|
2020-05-06T01:56:55.000Z
|
2020-05-06T01:56:55.000Z
|
from env_SingleCatchPigs import EnvSingleCatchPigs
import random
env = EnvSingleCatchPigs(7)
max_iter = 10000
env.set_agent_at([2, 2], 0)
env.set_pig_at([4, 4], 0)
for i in range(max_iter):
print("iter= ", i)
env.render()
action = random.randint(0, 4)
print('action is', action)
reward, done = env.step(action)
print('reward', reward, 'done', done)
if reward > 0:
print('catch the pig', reward, done)
| 24.388889
| 50
| 0.658314
| 66
| 439
| 4.272727
| 0.484848
| 0.106383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042735
| 0.200456
| 439
| 17
| 51
| 25.823529
| 0.760684
| 0
| 0
| 0
| 0
| 0
| 0.086758
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.266667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91c1523d70c0416c1afa5a4c6a25a3d2f1e426b
| 3,417
|
py
|
Python
|
eust/tables/data.py
|
rasmuse/eust
|
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
|
[
"MIT"
] | 1
|
2021-03-14T04:06:02.000Z
|
2021-03-14T04:06:02.000Z
|
eust/tables/data.py
|
rasmuse/eust
|
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
|
[
"MIT"
] | 9
|
2019-04-29T09:01:39.000Z
|
2021-11-15T17:48:36.000Z
|
eust/tables/data.py
|
rasmuse/eust
|
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
|
[
"MIT"
] | 1
|
2019-10-23T08:56:33.000Z
|
2019-10-23T08:56:33.000Z
|
# -*- coding: utf-8 -*-
import re
import gzip
import pandas as pd
import numpy as np
from eust.core import _download_file, conf
_DIMENSION_NAME_RE = re.compile(r"^[a-z_0-9]+$")
_YEAR_RE = re.compile(r"^(1|2)[0-9]{3}$")
def _is_valid_dimension_name(s: str) -> bool:
return bool(_DIMENSION_NAME_RE.match(s))
def _split_values_flags(series: pd.Series) -> pd.DataFrame:
split = series.str.split(" ")
df = pd.DataFrame(
{
"value": split.apply(lambda l: l[0] if l else None),
"flag": split.apply(lambda l: l[1] if l and len(l) > 1 else None),
}
)
return df
def _set_multiindex_dtype(index, level, type_):
index_df = index.to_frame()
index_df[level] = index_df[level].astype(type_)
new_index = index_df.set_index(index.names).index
return new_index
def _read_tsv(path_or_buffer) -> pd.DataFrame:
d = pd.read_csv(path_or_buffer, sep="\t", header=0, dtype=str)
top_left_cell = d.columns[0]
row_dimension_names, header_dimension_name = top_left_cell.split("\\")
row_dimension_names = row_dimension_names.split(",")
index_data = d[top_left_cell]
del d[top_left_cell]
assert len(set(index_data)) == len(index_data) # no duplicates
assert len(row_dimension_names) >= 1
d.columns.name = header_dimension_name
index_data = index_data.apply(lambda s: s.split(","))
d.index = pd.MultiIndex.from_arrays(
list(zip(*index_data)), names=row_dimension_names,
)
# cannot handle multidimensional column labels
d = d.stack()
assert set(d.apply(type)) == {str}
assert isinstance(d, pd.Series), d.columns
assert all(map(_is_valid_dimension_name, d.index.names))
d.index.set_levels(
[level.str.strip() for level in d.index.levels], inplace=True
)
d = _split_values_flags(d)
d.loc[d["value"] == ":", "value"] = np.nan
d["value"] = d["value"].astype(float)
if "time" in d.index.names:
time_strings = d.index.unique("time")
matches_year = (_YEAR_RE.match(s) for s in time_strings)
if all(matches_year):
d.index = _set_multiindex_dtype(d.index, "time", int)
d = d.sort_index()
return d
_TSV_GZ_FILENAME = "data.tsv.gz"
_HDF_FILENAME = "data.h5"
_HDF_TABLE_PATH = "eurostat_table"
def _read_tsv_gz(path_or_buffer) -> pd.DataFrame:
with gzip.open(path_or_buffer, "rb") as f:
return _read_tsv(f)
def _download_tsv_gz(url, dst_dir):
path = dst_dir / _TSV_GZ_FILENAME
_download_file(url, path)
def _read(the_dir):
hdf_path = the_dir / _HDF_FILENAME
tsv_gz_path = the_dir / _TSV_GZ_FILENAME
try:
data = pd.read_hdf(hdf_path, _HDF_TABLE_PATH)
except FileNotFoundError:
data = _read_tsv_gz(tsv_gz_path)
data.to_hdf(
hdf_path,
_HDF_TABLE_PATH,
complevel=conf["hdf_complevel"],
complib=conf["hdf_complib"],
)
# Replace empty flags by None (issue #3)
#
# Doing it at this point so that the null flag is saved in the HDF
# file as a string, for performance reasons.
# This is a pandas PerformanceWarning:
# "your performance may suffer as PyTables will pickle object types
# that it cannot map directly to c-types
# [inferred_type->mixed,key->block0_values] [items->['flag']]"
data["flag"] = data["flag"].replace({"": None})
return data
| 26.695313
| 78
| 0.654375
| 514
| 3,417
| 4.070039
| 0.315175
| 0.021511
| 0.040631
| 0.011472
| 0.060229
| 0.021033
| 0
| 0
| 0
| 0
| 0
| 0.006379
| 0.220076
| 3,417
| 127
| 79
| 26.905512
| 0.778612
| 0.125549
| 0
| 0
| 0
| 0
| 0.047731
| 0
| 0
| 0
| 0
| 0
| 0.064103
| 1
| 0.089744
| false
| 0
| 0.064103
| 0.012821
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91ce0003a23729f5cf4b45b783933c9e0cd6696
| 22,196
|
py
|
Python
|
utils.py
|
fatemehtd/Echo-SyncNet
|
ebb280e83a67b31436c4cfa420f9c06a92ac8c12
|
[
"MIT"
] | 6
|
2021-03-19T16:55:30.000Z
|
2022-03-15T08:41:56.000Z
|
utils.py
|
matiasmolinas/Echo-SyncNet
|
f7f81ead7a24d7574c0668df3765ef58fd71d54d
|
[
"MIT"
] | 3
|
2021-10-01T22:15:44.000Z
|
2022-03-25T03:12:47.000Z
|
utils.py
|
matiasmolinas/Echo-SyncNet
|
f7f81ead7a24d7574c0668df3765ef58fd71d54d
|
[
"MIT"
] | 3
|
2021-03-19T16:55:35.000Z
|
2022-02-03T10:40:48.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import CONFIG
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import io
import math
import os
import time
from absl import flags
from absl import logging
from easydict import EasyDict
import matplotlib
matplotlib.use('Agg')
FLAGS = flags.FLAGS
def visualize_batch(data, global_step, batch_size, num_steps):
"""Visualizes a batch."""
frames = data['frames']
frames_list = tf.unstack(frames, num=num_steps, axis=1)
frames_summaries = tf.concat(frames_list, axis=2)
batch_list = tf.split(frames_summaries, batch_size, axis=0)
batch_summaries = tf.concat(batch_list, axis=1)
tf.summary.image('train_batch', batch_summaries, step=global_step)
def visualize_nearest_neighbours(model, data, global_step, batch_size,
num_steps, num_frames_per_step, split):
"""Visualize nearest neighbours in embedding space."""
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
cnn = model['cnn']
emb = model['emb']
if 'tcn' in CONFIG.TRAINING_ALGO:
cnn_feats = get_cnn_feats(
cnn, data, training=False, num_steps=2 * num_steps)
emb_feats = emb(cnn_feats, 2 * num_steps)
emb_feats = tf.stack(
tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1)
else:
cnn_feats = get_cnn_feats(cnn, data, training=False)
emb_feats = emb(cnn_feats, num_steps)
emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)
query_feats = emb_feats[0]
if CONFIG.OPTICALFLOW:
frames = data['video_frames']
else:
frames = data['frames']
image_list = tf.unstack(frames, num=batch_size, axis=0)
if 'tcn' in CONFIG.TRAINING_ALGO:
im_list = [image_list[0]
[num_frames_per_step - 1::num_frames_per_step][::2]]
else:
im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]]
sim_matrix = np.zeros(
(batch_size-1, num_steps, num_steps), dtype=np.float32)
for i in range(1, batch_size):
candidate_feats = emb_feats[i]
if 'tcn' in CONFIG.TRAINING_ALGO:
img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step][::2]
else:
img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step]
nn_img_list = []
for j in range(num_steps):
curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])
mean_squared_distance = tf.reduce_mean(
tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)
sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)
nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])
nn_img = tf.stack(nn_img_list, axis=0)
im_list.append(nn_img)
def vstack(im):
return tf.concat(tf.unstack(im, num=num_steps), axis=1)
summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],
axis=0), axis=0)
tf.summary.image('%s/nn' % split, summary_im, step=global_step)
# Convert sim_matrix to float32 as summary_image doesn't take float64
sim_matrix = sim_matrix.astype(np.float32)
tf.summary.image('%s/similarity_matrix' % split,
np.expand_dims(sim_matrix, axis=3), step=global_step)
def softmax(w, t=1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def random_choice_noreplace(m, n, axis=-1):
# Generate m random permuations of range (0, n)
# NumPy version: np.random.rand(m,n).argsort(axis=axis)
return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)
def gen_cycles(num_cycles, batch_size, cycle_len):
"""Generate cycles for alignment."""
random_cycles = random_choice_noreplace(
num_cycles, batch_size)[:, :cycle_len]
return random_cycles
def get_warmup_lr(lr, global_step, lr_params):
"""Returns learning rate during warm up phase."""
if lr_params.NUM_WARMUP_STEPS > 0:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(
lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_lr = lr_params.INITIAL_LR * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr
return lr
# Minimally adapted from Tensorflow object_detection code.
def manual_stepping(global_step, boundaries, rates):
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(
tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)), [0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))
def get_lr_fn(optimizer_config):
"""Returns function that provides current learning rate based on config.
NOTE: This returns a function as in Eager we need to call assign to update
the learning rate.
Args:
optimizer_config: EasyDict, contains params required to initialize the
learning rate and the learning rate decay function.
Returns:
lr_fn: function, this can be called to return the current learning rate
based on the provided config.
Raises:
ValueError: in case invalid params have been passed in the config.
"""
lr_params = optimizer_config.LR
# pylint: disable=g-long-lambda
if lr_params.DECAY_TYPE == 'exp_decay':
def lr_fn(lr, global_step): return tf.train.exponential_decay(
lr,
global_step,
lr_params.EXP_DECAY_STEPS,
lr_params.EXP_DECAY_RATE,
staircase=True)()
elif lr_params.DECAY_TYPE == 'manual':
lr_step_boundaries = [int(x)
for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]
f = lr_params.MANUAL_LR_DECAY_RATE
learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p
for p in range(len(lr_step_boundaries) + 1)]
def lr_fn(lr, global_step): return manual_stepping(
global_step, lr_step_boundaries, learning_rate_sequence)
elif lr_params.DECAY_TYPE == 'fixed':
def lr_fn(lr, global_step): return lr_params.INITIAL_LR
elif lr_params.DECAY_TYPE == 'poly':
def lr_fn(lr, global_step): return tf.train.polynomial_decay(
lr,
global_step,
CONFIG.TRAIN.MAX_ITERS,
end_learning_rate=0.0,
power=1.0,
cycle=False)
else:
raise ValueError('Learning rate decay type %s not supported. Only support'
'the following decay types: fixed, exp_decay, manual,'
'and poly.')
return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),
global_step, lr_params))
def get_optimizer(optimizer_config, learning_rate):
"""Returns optimizer based on config and learning rate."""
if optimizer_config.TYPE == 'AdamOptimizer':
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif optimizer_config.TYPE == 'MomentumOptimizer':
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError('Optimizer %s not supported. Only support the following'
'optimizers: AdamOptimizer, MomentumOptimizer .')
return opt
def get_lr_opt_global_step():
"""Intializes learning rate, optimizer and global step."""
optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)
global_step = optimizer.iterations
learning_rate = optimizer.learning_rate
return learning_rate, optimizer, global_step
def create_ckpt(logdir, restore=False, **ckpt_objects):
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(
ckpt_manager.latest_checkpoint) if restore else -1
return ckpt_manager, status, checkpoint
def restore_ckpt(logdir, **ckpt_objects):
"""Create and restore checkpoint (if one exists on the path)."""
# Instantiate checkpoint and restore from any pre-existing checkpoint.
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(ckpt_manager.latest_checkpoint)
return ckpt_manager, status, checkpoint
def to_dict(config):
if isinstance(config, list):
return [to_dict(c) for c in config]
elif isinstance(config, EasyDict):
return dict([(k, to_dict(v)) for k, v in config.items()])
else:
return config
def setup_train_dir(logdir, overwrite=False, force_train=True):
"""Setups directory for training."""
tf.io.gfile.makedirs(logdir)
config_path = os.path.join(logdir, 'config.json')
if not os.path.exists(config_path) or overwrite:
logging.info(
'Using the existing passed in config as no config.json file exists in '
'%s', logdir)
with tf.io.gfile.GFile(config_path, 'w') as config_file:
config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
json.dump(config, config_file, sort_keys=True, indent=4)
else:
logging.info(
'Using config from config.json that exists in %s.', logdir)
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
CONFIG.update(config_dict)
train_logs_dir = os.path.join(logdir, 'train.logs')
if os.path.exists(train_logs_dir) and not force_train:
raise ValueError('You might be overwriting a directory that already '
'has train_logs. Please provide a new logdir name in '
'config or pass --force_train while launching script.')
tf.io.gfile.makedirs(train_logs_dir)
def setup_eval_dir(logdir, config_timeout_seconds=1):
"""Setups directory for evaluation."""
tf.io.gfile.makedirs(logdir)
tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))
config_path = os.path.join(logdir, 'config.json')
while not tf.io.gfile.exists(config_path):
logging.info('Waiting for config to exist. Going to sleep '
' %s for secs.', config_timeout_seconds)
time.sleep(config_timeout_seconds)
while True:
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
if config_dict is None:
time.sleep(config_timeout_seconds)
else:
break
CONFIG.update(config_dict)
def get_data(iterator):
"""Return a data dict which contains all the requested sequences."""
data = iterator.get_next()
return data, data['chosen_steps'], data['seq_lens']
@tf.function
def get_cnn_feats(cnn, data, training, num_steps=None):
"""Passes data through base CNN."""
if num_steps is None:
if training:
num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
else:
num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
cnn.num_steps = num_steps
cnn_feats = cnn(data['frames'])
return cnn_feats
def get_context_steps(step):
num_steps = CONFIG.DATA.NUM_STEPS
stride = CONFIG.DATA.FRAME_STRIDE
# We don't want to see the future.
steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)
return steps
def get_indices(curr_idx, num_steps, seq_len):
steps = range(curr_idx, curr_idx + num_steps)
single_steps = np.concatenate([get_context_steps(step) for step in steps])
single_steps = np.concatenate(np.array(list(map(get_context_steps,
np.arange(curr_idx, curr_idx + num_steps)))))
single_steps = np.maximum(0, single_steps)
single_steps = np.minimum(seq_len, single_steps)
return single_steps
def get_embeddings_dataset(model, iterator, frames_per_batch,
keep_data=False, optical_flow=False, keep_labels=True,
max_embs=None, callbacks=[]):
"""Get embeddings from a one epoch iterator."""
keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS
num_frames_per_step = CONFIG.DATA.NUM_STEPS
cnn = model['cnn']
emb = model['emb']
embs_list = []
labels_list = []
steps_list = []
seq_lens_list = []
names_list = []
seq_labels_list = []
if keep_data:
frames_list = []
if optical_flow:
frame_original_list = []
n = 0
def cond(n):
if max_embs is None:
return True
else:
return n < max_embs
# Make Recurrent Layers stateful, set batch size.
# We do this as we are embedding the whole sequence and that can take
# more than one batch to be passed and we don't want to automatically
# reset hidden states after each batch.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = True
gru_layer.input_spec[0].shape = [1, ]
while cond(n):
try:
print(n)
embs = []
labels = []
steps = []
seq_lens = []
names = []
seq_labels = []
if keep_data:
frames = []
if optical_flow:
frame_original = []
# Reset GRU states for each video.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.reset_states()
data, chosen_steps, seq_len = get_data(iterator)
seq_len = seq_len.numpy()[0]
num_batches = int(math.ceil(float(seq_len)/frames_per_batch))
for i in range(num_batches):
if (i + 1) * frames_per_batch > seq_len:
num_steps = seq_len - i * frames_per_batch
else:
num_steps = frames_per_batch
curr_idx = i * frames_per_batch
curr_data = {}
for k, v in data.items():
# Need to do this as some modalities might not exist.
if len(v.shape) > 1 and v.shape[1] != 0:
idxes = get_indices(curr_idx, num_steps, seq_len)
curr_data[k] = tf.gather(v, idxes, axis=1)
else:
curr_data[k] = v
cnn_feats = get_cnn_feats(cnn, curr_data,
num_steps=num_frames_per_step * num_steps,
training=False)
emb_feats = emb(cnn_feats, num_steps)
logging.debug('On sequence number %d, frames embedded %d', n,
curr_idx + num_steps)
# np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"])
# np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy())
embs.append(emb_feats.numpy())
for f in callbacks:
f(np.concatenate(embs), data, chosen_steps, seq_len)
steps.append(chosen_steps.numpy()[0])
seq_lens.append(seq_len * [seq_len])
all_labels = data['frame_labels'].numpy()[0]
name = data['name'].numpy()[0]
names.append(seq_len * [name])
seq_label = data['seq_labels'].numpy()[0]
seq_labels.append(seq_len * [seq_label])
labels.append(all_labels)
embs = np.concatenate(embs, axis=0)
labels = np.concatenate(labels, axis=0)
steps = np.concatenate(steps, axis=0)
seq_lens = np.concatenate(seq_lens, axis=0)
names = np.concatenate(names, axis=0)
seq_labels = np.concatenate(seq_labels, axis=0)
if keep_data:
frames.append(data['frames'].numpy()[0])
frames = np.concatenate(frames, axis=0)
if optical_flow:
frame_original.append(data['video_frames'].numpy()[0])
frame_original = np.concatenate(frame_original, axis=0)
if keep_labels:
labels = labels[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(labels)
seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]
names = names[~np.isnan(embs).any(axis=1)]
seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]
steps = steps[~np.isnan(embs).any(axis=1)]
if keep_data:
frames = frames[~np.isnan(embs).any(axis=1)]
if optical_flow:
frame_original = frame_original[~np.isnan(embs).any(axis=1)]
embs = embs[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(seq_lens)
assert len(embs) == len(steps)
assert len(names) == len(steps)
embs_list.append(embs)
if keep_labels:
labels_list.append(labels)
seq_labels_list.append(seq_labels)
steps_list.append(steps)
seq_lens_list.append(seq_lens)
names_list.append(names)
if keep_data:
frames_list.append(frames)
if optical_flow:
frame_original_list.append(frame_original)
n += 1
except tf.errors.OutOfRangeError:
logging.info('Finished embedding the dataset.')
break
dataset = {'embs': embs_list,
'seq_lens': seq_lens_list,
'steps': steps_list,
'names': names_list,
'seq_labels': seq_labels_list}
if keep_data:
dataset['frames'] = frames_list
if optical_flow:
dataset['frames_original'] = frame_original_list
if keep_labels:
dataset['labels'] = labels_list
# Reset statefulness to recurrent layers for other evaluation tasks.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = False
return dataset
def gen_plot(x, y):
"""Create a pyplot, save to buffer and return TB compatible image."""
plt.figure()
plt.plot(x, y)
plt.title('Val Accuracy')
plt.ylim(0, 1)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class Stopwatch(object):
"""Simple timer for measuring elapsed time."""
def __init__(self):
self.reset()
def elapsed(self):
return time.time() - self.time
def done(self, target_interval):
return self.elapsed() >= target_interval
def reset(self):
self.time = time.time()
def set_learning_phase(f):
"""Sets the correct learning phase before calling function f."""
def wrapper(*args, **kwargs):
"""Calls the function f after setting proper learning phase."""
if 'training' not in kwargs:
raise ValueError('Function called with set_learning_phase decorator which'
' does not have training argument.')
training = kwargs['training']
if training:
# Set learning_phase to True to use models in training mode.
tf.keras.backend.set_learning_phase(1)
else:
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
return f(*args, **kwargs)
return wrapper
def load_config(config_path):
config = None
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
assert config is not None, "config file is not provided or is corrupted"
return config
def prepare_gpu(ind=-1):
ind = int(ind)
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
if ind > -1:
tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU')
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus),
"Logical GPUs"])
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
| 36.748344
| 113
| 0.621193
| 2,947
| 22,196
| 4.45979
| 0.163217
| 0.02313
| 0.011869
| 0.015826
| 0.297877
| 0.227574
| 0.182987
| 0.160846
| 0.146238
| 0.125618
| 0
| 0.007998
| 0.278969
| 22,196
| 603
| 114
| 36.809287
| 0.813234
| 0.128807
| 0
| 0.202765
| 0
| 0
| 0.060903
| 0
| 0
| 0
| 0
| 0
| 0.011521
| 1
| 0.080645
| false
| 0.004608
| 0.036866
| 0.018433
| 0.182028
| 0.004608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91d0a28a2d3c169f55ef3fbe14306db5438a499
| 8,468
|
py
|
Python
|
UnityPy/classes/Sprite.py
|
dblack2056/UnityPy
|
303291e46ddfbf266131237e59e6b1b5c46a9ca4
|
[
"MIT"
] | null | null | null |
UnityPy/classes/Sprite.py
|
dblack2056/UnityPy
|
303291e46ddfbf266131237e59e6b1b5c46a9ca4
|
[
"MIT"
] | null | null | null |
UnityPy/classes/Sprite.py
|
dblack2056/UnityPy
|
303291e46ddfbf266131237e59e6b1b5c46a9ca4
|
[
"MIT"
] | null | null | null |
from enum import IntEnum
from .Mesh import BoneWeights4, SubMesh, VertexData
from .NamedObject import NamedObject
from .PPtr import PPtr, save_ptr
from ..export import SpriteHelper
from ..enums import SpriteMeshType
from ..streams import EndianBinaryWriter
class Sprite(NamedObject):
@property
def image(self):
return SpriteHelper.get_image_from_sprite(self)
def __init__(self, reader):
super().__init__(reader=reader)
version = self.version
self.m_Rect = reader.read_rectangle_f()
self.m_Offset = reader.read_vector2()
if version >= (4, 5): # 4.5 and up
self.m_Border = reader.read_vector4()
self.m_PixelsToUnits = reader.read_float()
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
self.m_Pivot = reader.read_vector2()
self.m_Extrude = reader.read_u_int()
if version >= (5, 3): # 5.3 and up
self.m_IsPolygon = reader.read_boolean()
reader.align_stream()
if version >= (2017,): # 2017 and up
first = reader.read_bytes(16) # GUID
second = reader.read_long()
self.m_RenderDataKey = (first, second)
self.m_AtlasTags = reader.read_string_array()
self.m_SpriteAtlas = PPtr(reader) # SpriteAtlas
self.m_RD = SpriteRenderData(reader)
if version >= (2017,): # 2017 and up
m_PhysicsShapeSize = reader.read_int()
self.m_PhysicsShape = [
reader.read_vector2_array() for _ in range(m_PhysicsShapeSize)
]
if version >= (2018,): # 2018 and up
m_BonesSize = reader.read_int()
self.m_Bones = [
reader.read_vector2_array() for _ in range(m_BonesSize)
]
def save(self, writer: EndianBinaryWriter = None):
if writer is None:
writer = EndianBinaryWriter(endian=self.reader.endian)
version = self.version
super().save(writer)
writer.write_rectangle_f(self.m_Rect)
writer.write_vector2(self.m_Offset)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.m_Border)
writer.write_float(self.m_PixelsToUnits)
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
writer.write_vector2(self.m_Pivot)
writer.write_u_int(self.m_Extrude)
if version >= (5, 3): # 5.3 and up
writer.write_boolean(self.m_IsPolygon)
writer.align_stream()
if version >= (2017,): # 2017 and up
writer.write_bytes(self.m_RenderDataKey[0]) # GUID
writer.write_long(self.m_RenderDataKey[1])
writer.write_string_array(self.m_AtlasTags)
self.m_SpriteAtlas.save(writer) # SpriteAtlas
self.m_RD.save(writer, version)
if version >= (2017,): # 2017 and up
writer.write_int(len(self.m_PhysicsShape))
for phys in self.m_PhysicsShape:
writer.write_vector2_array(phys)
if version >= (2018,): # 2018 and up
writer.write_int(len(self.m_Bones))
for bone in self.m_Bones:
writer.write_vector2_array(bone)
self.set_raw_data(writer.bytes)
class SecondarySpriteTexture:
def __init__(self, reader):
self.texture = PPtr(reader) # Texture2D
self.name = reader.read_string_to_null()
def save(self, writer):
self.texture.save(writer)
writer.write_string_to_null(self.name)
class SpritePackingRotation(IntEnum):
kSPRNone = (0,)
kSPRFlipHorizontal = (1,)
kSPRFlipVertical = (2,)
kSPRRotate180 = (3,)
kSPRRotate90 = 4
class SpritePackingMode(IntEnum):
kSPMTight = (0,)
kSPMRectangle = 1
class SpriteSettings:
def __init__(self, reader):
self.value = reader.read_u_int()
@property
def value(self):
return self.m_settingsRaw
@value.setter
def value(self, _value):
self.m_settingsRaw = _value
self.packed = self.m_settingsRaw & 1 # 1
self.packingMode = SpritePackingMode((self.m_settingsRaw >> 1) & 1) # 1
self.packingRotation = SpritePackingRotation((self.m_settingsRaw >> 2) & 0xF) # 4
self.meshType = SpriteMeshType((self.m_settingsRaw >> 6) & 1) # 1
# rest of the bits are reserved
def save(self, writer):
writer.write_u_int(self.m_settingsRaw)
class SpriteVertex:
def __init__(self, reader):
version = reader.version
self.pos = reader.read_vector3()
if version[:2] <= (4, 3): # 4.3 and down
self.uv = reader.read_vector2()
def save(self, writer, version):
writer.write_vector3(self.pos)
if version[:2] <= (4, 3): # 4.3 and down
writer.write__vector2(self.uv)
class SpriteRenderData:
def __init__(self, reader):
version = reader.version
self.texture = PPtr(reader) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture = PPtr(reader) # Texture2D
if version >= (2019,): # 2019 and up
secondaryTexturesSize = reader.read_int()
self.secondaryTextures = [
SecondarySpriteTexture(reader) for _ in range(secondaryTexturesSize)
]
if version >= (5, 6): # 5.6 and up
SubMeshesSize = reader.read_int()
self.m_SubMeshes = [SubMesh(reader) for _ in range(SubMeshesSize)]
IndexBufferSize = reader.read_int()
self.m_IndexBuffer = reader.read_bytes(IndexBufferSize)
reader.align_stream()
self.m_VertexData = VertexData(reader)
else:
verticesSize = reader.read_int()
self.vertices = [SpriteVertex(reader) for _ in range(verticesSize)]
self.indices = reader.read_u_short_array()
reader.align_stream()
if version >= (2018,): # 2018 and up
self.m_Bindpose = reader.read_matrix_array()
if version < (2018, 2): # 2018.2 down
self.m_SourceSkinSize = reader.read_int()
self.m_SourceSkin = [BoneWeights4(reader)]
self.textureRect = reader.read_rectangle_f()
self.textureRectOffset = reader.read_vector2()
if version >= (5, 6): # 5.6 and up
self.atlasRectOffset = reader.read_vector2()
self.settingsRaw = SpriteSettings(reader)
if version >= (4, 5): # 4.5 and up
self.uvTransform = reader.read_vector4()
if version >= (2017,): # 2017 and up
self.downscaleMultiplier = reader.read_float()
def save(self, writer, version):
self.texture.save(writer) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture.save(writer) # Texture2D
if version >= (2019,): # 2019 and up
writer.write_int(len(self.secondaryTextures))
for tex in self.secondaryTextures:
tex.save(writer)
if version >= (5, 6): # 5.6 and up
writer.write_int(len(self.m_SubMeshes))
for mesh in self.m_SubMeshes:
mesh.save(writer, version)
writer.write_int(len(self.m_IndexBuffer))
writer.write_bytes(self.m_IndexBuffer)
writer.align_stream()
self.m_VertexData.save(writer, version)
else:
writer.write_int(len(self.vertices))
for vertex in self.vertices:
vertex.save(writer, version)
writer.write_u_short_array(self.indices)
writer.align_stream()
if version >= (2018,): # 2018 and up
writer.write_matrix_array(self.m_Bindpose)
if version < (2018, 2): # 2018.2 down
writer.write_int(self.m_SourceSkinSize)
self.m_SourceSkin[0].save(writer)
writer.write_rectangle_f(self.textureRect)
writer.write_vector2(self.textureRectOffset)
if version >= (5, 6): # 5.6 and up
writer.write_vector2(self.atlasRectOffset)
self.settingsRaw.save(writer)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.uvTransform)
if version >= (2017,): # 2017 and up
writer.write_float(self.downscaleMultiplier)
| 34.563265
| 90
| 0.599906
| 1,003
| 8,468
| 4.879362
| 0.155533
| 0.051083
| 0.026972
| 0.039232
| 0.380057
| 0.263997
| 0.231099
| 0.190437
| 0.087045
| 0.069473
| 0
| 0.042114
| 0.296174
| 8,468
| 244
| 91
| 34.704918
| 0.779027
| 0.05633
| 0
| 0.319372
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000378
| 0
| 0
| 1
| 0.068063
| false
| 0
| 0.036649
| 0.010471
| 0.188482
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91e27e8ce2a32cb1f2fa0c55d35f35399d00f99
| 11,123
|
py
|
Python
|
eazy/filters.py
|
albertfxwang/eazy-py
|
bcfd8a1e49f077adc794202871345542ab29800b
|
[
"MIT"
] | null | null | null |
eazy/filters.py
|
albertfxwang/eazy-py
|
bcfd8a1e49f077adc794202871345542ab29800b
|
[
"MIT"
] | null | null | null |
eazy/filters.py
|
albertfxwang/eazy-py
|
bcfd8a1e49f077adc794202871345542ab29800b
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
from astropy.table import Table
from . import utils
__all__ = ["FilterDefinition", "FilterFile", "ParamFilter"]
VEGA_FILE = os.path.join(utils.path_to_eazy_data(),
'alpha_lyr_stis_008.fits')
VEGA = Table.read(VEGA_FILE)
for c in VEGA.colnames:
VEGA[c] = VEGA[c].astype(float)
class FilterDefinition:
def __init__(self, name=None, wave=None, throughput=None, bp=None):
"""
Bandpass object
Parameters
----------
name : str
Label name
wave : array
Wavelength array, in `astropy.units.Angstrom`.
throughput : array
Throughput, arbitrary normalization
bp : optional, `pysynphot.obsbandpass` object
`pysynphot` filter bandpass
"""
self.name = name
self.wave = wave
self.throughput = throughput
self.Aflux = 1.
# pysynphot Bandpass
if bp is not None:
self.wave = np.cast[np.double](bp.wave)
self.throughput = np.cast[np.double](bp.throughput)
self.name = bp.name
self.norm = 1.
if self.throughput is not None:
self.norm = np.trapz(self.throughput/self.wave, self.wave)
def __repr__(self):
return self.name.__repr__()
def __str__(self):
return self.name.__str__()
def get_extinction(self, EBV=0, Rv=3.1):
"""
Extinction factor
"""
import astropy.units as u
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
self.Alambda = f99(self.wave)
self.Aflux = 10**(-0.4*self.Alambda)
def extinction_correction(self, EBV, Rv=3.1, mag=True, source_lam=None, source_flux=None):
"""
Get the MW extinction correction within the filter.
Optionally supply a source spectrum.
"""
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
if self.wave is None:
print('Filter not defined.')
return False
if source_flux is None:
source_flux = self.throughput*0.+1
else:
source_flux = interp(self.wave, source_lam, source_flux, left=0, right=0)
if (self.wave.min() < 910) | (self.wave.max() > 6.e4):
Alambda = 0.
else:
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
Alambda = f99(self.wave)
delta = np.trapz(self.throughput*source_flux*10**(-0.4*Alambda), self.wave) / np.trapz(self.throughput*source_flux, self.wave)
if mag:
return 2.5*np.log10(delta)
else:
return 1./delta
@property
def ABVega(self):
"""
Compute AB-Vega conversion
"""
from astropy.constants import c
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
# Union of throughput and Vega spectrum arrays
full_x = np.hstack([self.wave, VEGA['WAVELENGTH']])
full_x = full_x[np.argsort(full_x)]
# Vega spectrum, units of f-lambda flux density, cgs
# Interpolate to wavelength grid, no extrapolation
vega_full = interp(full_x, VEGA['WAVELENGTH'], VEGA['FLUX'],
left=0, right=0)
thru_full = interp(full_x, self.wave, self.throughput,
left=0, right=0)
# AB = 0, same units
absp = 3631*1e-23*c.to(u.m/u.s).value*1.e10/full_x**2
# Integrate over the bandpass, flam dlam
num = np.trapz(vega_full*thru_full, full_x)
den = np.trapz(absp*thru_full, full_x)
return -2.5*np.log10(num/den)
@property
def pivot(self):
"""
Pivot wavelength
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
integrator = np.trapz
num = integrator(self.wave, self.wave*self.throughput)
den = integrator(self.wave, self.throughput/self.wave)
pivot = np.sqrt(num/den)
return pivot
@property
def equivwidth(self):
"""
Filter equivalent width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
return np.trapz(self.throughput, self.wave)
@property
def rectwidth(self):
"""
Filter rectangular width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
rect = self.equivwidth / self.throughput.max()
return rect
@property
def ctw95(self):
"""
95% cumulative throughput width
http://www.stsci.edu/hst/acs/analysis/bandwidths/#keywords
"""
dl = np.diff(self.wave)
filt = np.cumsum((self.wave*self.throughput)[1:]*dl)
ctw95 = np.interp([0.025, 0.975], filt/filt.max(), self.wave[1:])
return np.diff(ctw95)[0]
def for_filter_file(self, row_str='{i:6} {wave:.5e} {thru:.5e}'):
"""
Return a string that can be put in the EAZY filter file
"""
header = '{0} {1} lambda_c= {2:.4e} AB-Vega= {3:.3f} w95={4:.1f}'
N = len(self.wave)
lines = [header.format(N, self.name.split('lambda_c')[0],
self.pivot, self.ABVega, self.ctw95)]
lines += [row_str.format(i=i+1, wave=w, thru=t)
for i, (w, t) in enumerate(zip(self.wave, self.throughput))]
return '\n'.join(lines)
class FilterFile:
def __init__(self, file='FILTER.RES.latest', path='./'):
"""
Read a EAZY filter file.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from eazy.filters import FilterFile
res = FilterFile(path=None)
print(len(res.filters))
bp = res[205]
print(bp)
fig, ax = plt.subplots(1,1,figsize=(6,4))
ax.plot(bp.wave, bp.throughput, label=bp.name.split()[0])
ax.set_xlabel('wavelength, Angstroms')
ax.set_ylabel('throughput')
ax.legend()
ax.grid()
fig.tight_layout(pad=0.5)
"""
if path is None:
file_path = os.path.join(os.getenv('EAZYCODE'), 'filters', file)
else:
file_path = os.path.join(path, file)
with open(file_path, 'r') as fp:
lines = fp.readlines()
self.filename = file_path
filters = []
wave = []
trans = []
header = ''
for line in lines:
if 'lambda_c' in line:
if len(wave) > 0:
# Make filter from lines already read in
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
filters.append(new_filter)
# Initialize filter
header = ' '.join(line.split()[1:])
wave = []
trans = []
else:
lspl = np.cast[float](line.split())
wave.append(lspl[1])
trans.append(lspl[2])
# last one
# new_filter = FilterDefinition()
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
filters.append(new_filter)
self.filters = filters
@property
def NFILT(self):
"""
Number of filters in the list
"""
return len(self.filters)
def __getitem__(self, i1):
"""
Return unit-indexed filter, e.g., 161 = 2mass-j
"""
return self.filters[i1-1]
def names(self, verbose=True):
"""
Print the filter names.
"""
if verbose:
for i in range(len(self.filters)):
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
else:
string_list = ['{0:5d} {1}\n'.format(i+1, self.filters[i].name) for i in range(len(self.filters))]
return string_list
def write(self, file='xxx.res', verbose=True):
"""
Dump the filter information to a filter file.
"""
fp = open(file,'w')
for filter in self.filters:
fp.write('{0:6d} {1}\n'.format(len(filter.wave), filter.name))
for i in range(len(filter.wave)):
fp.write('{0:6d} {1:.5e} {2:.5e}\n'.format(i+1, filter.wave[i], filter.throughput[i]))
fp.close()
string_list = self.names(verbose=False)
fp = open(file+'.info', 'w')
fp.writelines(string_list)
fp.close()
if verbose:
print('Wrote <{0}[.info]>'.format(file))
def search(self, search_string, case=False, verbose=True):
"""
Search filter names for ``search_string``. If ``case`` is True, then
match case.
"""
import re
if not case:
search_string = search_string.upper()
matched = []
for i in range(len(self.filters)):
filt_name = self.filters[i].name
if not case:
filt_name = filt_name.upper()
if re.search(search_string, filt_name) is not None:
if verbose:
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
matched.append(i)
return np.array(matched)
class ParamFilter(FilterDefinition):
def __init__(self, line='# Filter #20, RES#78: COSMOS/SUBARU_filter_B.txt - lambda_c=4458.276253'):
self.lambda_c = float(line.split('lambda_c=')[1])
self.name = line.split()[4]
self.fnumber = int(line.split('RES#')[1].split(':')[0])
self.cnumber = int(line.split('Filter #')[1].split(',')[0])
| 30.225543
| 134
| 0.507687
| 1,269
| 11,123
| 4.353822
| 0.22695
| 0.034751
| 0.017919
| 0.01991
| 0.237828
| 0.206878
| 0.1819
| 0.15095
| 0.142262
| 0.115113
| 0
| 0.023102
| 0.37346
| 11,123
| 367
| 135
| 30.307902
| 0.769694
| 0.186281
| 0
| 0.288889
| 0
| 0.005556
| 0.052257
| 0.005914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.072222
| 0.011111
| 0.272222
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91e9c056c9dab4c7981c513788ac7b746223cf5
| 672
|
py
|
Python
|
LeetCode/106.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | 1
|
2020-12-08T02:01:18.000Z
|
2020-12-08T02:01:18.000Z
|
LeetCode/106.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
LeetCode/106.py
|
KevinTMtz/CompetitiveProgramming
|
0bf8a297c404073df707b6d7b06965b055ccd872
|
[
"MIT"
] | null | null | null |
#
# LeetCode
#
# Problem - 106
# URL - https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
if not inorder:
return None
r = postorder.pop()
root = TreeNode(r)
index = inorder.index(r)
root.right = self.buildTree(inorder[index+1:], postorder)
root.left = self.buildTree(inorder[:index], postorder)
return root
| 24
| 97
| 0.651786
| 86
| 672
| 5.046512
| 0.476744
| 0.082949
| 0.092166
| 0.115207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00947
| 0.214286
| 672
| 27
| 98
| 24.888889
| 0.8125
| 0.443452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b91f064ec51160dd5a168a0ea9d44e81a3af31b7
| 44,880
|
py
|
Python
|
evalml/automl/automl_search.py
|
skvorekn/evalml
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/automl/automl_search.py
|
skvorekn/evalml
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
[
"BSD-3-Clause"
] | null | null | null |
evalml/automl/automl_search.py
|
skvorekn/evalml
|
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.callbacks import log_error_callback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_all_pipeline_names_unique,
get_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
get_core_objectives,
get_non_core_objectives,
get_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
get_logger,
log_subtitle,
log_title,
time_elapsed,
update_pipeline
)
logger = get_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
max_iterations=None,
max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
allowed_pipelines=None,
allowed_model_families=None,
start_iteration_callback=None,
add_result_callback=None,
error_callback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.
y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
max_iterations (int): Maximum number of iterations to search. If max_iterations and
max_time is not set, then max_iterations will default to max_iterations of 5.
max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
allowed_model_families to be ignored.
allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_callback (callable): Function called before each pipeline training iteration.
Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_callback (callable): Function called after each pipeline training iteration.
Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_callback (callable): Function called when `search()` errors and raises an Exception.
Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will call `log_error_callback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.
max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the gap and max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_callback = start_iteration_callback
self.add_result_callback = add_result_callback
self.error_callback = error_callback or log_error_callback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = get_default_primary_search_objective(self.problem_type.value)
objective = get_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = get_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [get_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not isinstance(max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
if isinstance(max_time, (int, float)) and max_time < 0:
raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
if max_batches is not None and max_batches < 0:
raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
if max_iterations is not None and max_iterations < 0:
raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
self.max_iterations = max_iterations
self.max_batches = max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.max_iterations and not self.max_time and not self.max_batches:
self.max_batches = 1
logger.info("Using default limit of max_batches=1.\n")
if patience and (not isinstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.allowed_pipelines = allowed_pipelines
self.allowed_model_families = allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]
if self.allowed_pipelines == []:
raise ValueError("No allowed pipelines to search")
check_all_pipeline_names_unique(self.allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and len(self.allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
if self.max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.max_batches and self.max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = len(self.allowed_pipelines) + 1
num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.max_iterations = (1 + len(self.allowed_pipelines) +
self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_callback=self._should_continue,
pre_evaluation_callback=self._pre_evaluation_callback,
post_evaluation_callback=self._post_evaluation_callback)
self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))
logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
if len(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
max_iterations=self.max_iterations,
allowed_pipelines=self.allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_callback(self, pipeline):
if self.start_iteration_callback:
self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if len(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
update_pipeline(logger,
desc,
len(self._results['pipeline_results']) + 1,
self.max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = get_non_core_objectives()
if isinstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! "
"Use evalml.objectives.utils.get_core_objective_names() "
"to get all objective names allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.format(o.name) for o in obj_list])
return '\n'.join(lines)
def _get_funct_name(function):
if callable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {get_objective(self.objective).name}\n"
f"Max Time: {self.max_time}\n"
f"Max Iterations: {self.max_iterations}\n"
f"Max Batches: {self.max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'max_delay'}
if not problem_configuration or not all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should terminate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budget (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatically be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
get_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.max_batches is not None:
logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ")
elif self.max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.max_iterations)
if self.max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if len(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])):
best_pipeline = self.get_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample"):
train_indices = self.data_splitter.transform_sample(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return len(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check max_time and max_iterations
elapsed = time.time() - self._start
if self.max_time and elapsed >= self.max_time:
return False
elif self.max_iterations and num_pipelines >= self.max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value))
for pipeline in self.allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
max_delay = self.problem_configuration['max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay},
"Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = len(cv_data)
for fold_data in cv_data:
for field, value in fold_data['all_objective_scores'].items():
# The 'all_objective_scores' field contains scores for all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_callback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.mean()
percent_better_than_baseline = {}
mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = mean_cv_all_objectives
for obj_name in mean_cv_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name],
self._baseline_cv_scores.get(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = len(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_summary": pipeline.summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].append(pipeline_id)
if not is_baseline:
score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.update()
if self.add_result_callback:
self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def get_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].get(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.get('pipeline_class')
parameters = pipeline_results.get('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of information
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes information such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.get_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".format(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]]
all_objective_scores = pd.DataFrame(all_objective_scores)
for c in all_objective_scores:
if c in ["# Training", "# Validation"]:
all_objective_scores[c] = all_objective_scores[c].astype("object")
continue
mean = all_objective_scores[c].mean(axis=0)
std = all_objective_scores[c].std(axis=0)
all_objective_scores.loc["mean", c] = mean
all_objective_scores.loc["std", c] = std
all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf
all_objective_scores = all_objective_scores.fillna("-")
with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False):
logger.info(all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that allows access to a copy of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return copy.deepcopy(self._results)
@property
def rankings(self):
"""Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a pandas.DataFrame with scoring results from all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return pd.DataFrame(columns=full_rankings_cols)
rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
rankings_df = rankings_df[full_rankings_cols]
rankings_df.sort_values("score", ascending=ascending, inplace=True)
rankings_df.reset_index(drop=True, inplace=True)
return rankings_df
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream format.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline.
Note that the any pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, pd.DataFrame): Holdout features.
y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores.
Note that the any pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
| 50.145251
| 234
| 0.660561
| 5,347
| 44,880
| 5.309519
| 0.115766
| 0.018175
| 0.010039
| 0.013737
| 0.241811
| 0.147658
| 0.103029
| 0.090525
| 0.061571
| 0.048679
| 0
| 0.002556
| 0.267603
| 44,880
| 894
| 235
| 50.201342
| 0.86115
| 0.228142
| 0
| 0.103565
| 0
| 0.013582
| 0.16504
| 0.027253
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047538
| false
| 0.001698
| 0.044143
| 0
| 0.154499
| 0.005093
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92225fd1fc48f3b53478df0ef2d1501b1d04475
| 1,625
|
py
|
Python
|
yellowbrick/regressor/base.py
|
Juan0001/yellowbrick-docs-zh
|
36275d9704fc2a946c5bec5f802106bb5281efd1
|
[
"Apache-2.0"
] | 20
|
2018-03-24T02:29:20.000Z
|
2022-03-03T05:01:40.000Z
|
yellowbrick/regressor/base.py
|
Juan0001/yellowbrick-docs-zh
|
36275d9704fc2a946c5bec5f802106bb5281efd1
|
[
"Apache-2.0"
] | 4
|
2018-03-20T12:01:17.000Z
|
2019-04-07T16:02:19.000Z
|
yellowbrick/regressor/base.py
|
Juan0001/yellowbrick-docs-zh
|
36275d9704fc2a946c5bec5f802106bb5281efd1
|
[
"Apache-2.0"
] | 5
|
2018-03-17T08:18:57.000Z
|
2019-11-15T02:20:20.000Z
|
# yellowbrick.regressor.base
# Base classes for regressor Visualizers.
#
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Fri Jun 03 10:30:36 2016 -0700
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: base.py [7d3f5e6] benjamin@bengfort.com $
"""
Base classes for regressor Visualizers.
"""
##########################################################################
## Imports
##########################################################################
from ..utils import isregressor
from ..base import ScoreVisualizer
from ..exceptions import YellowbrickTypeError
## Packages for export
__all__ = [
"RegressionScoreVisualizer",
]
##########################################################################
## Regression Visualization Base Object
##########################################################################
class RegressionScoreVisualizer(ScoreVisualizer):
"""
Base class for all ScoreVisualizers that evaluate a regression estimator.
The primary functionality of this class is to perform a check to ensure
the passed in estimator is a regressor, otherwise it raises a
``YellowbrickTypeError``.
"""
def __init__(self, model, ax=None, **kwargs):
if not isregressor(model):
raise YellowbrickTypeError(
"This estimator is not a regressor; try a classifier or "
"clustering score visualizer instead!"
)
super(RegressionScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
| 30.660377
| 79
| 0.582154
| 148
| 1,625
| 6.310811
| 0.608108
| 0.023555
| 0.029979
| 0.049251
| 0.072805
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017738
| 0.167385
| 1,625
| 52
| 80
| 31.25
| 0.672579
| 0.424615
| 0
| 0
| 0
| 0
| 0.197279
| 0.042517
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92247a49fd2631992a5eddee925c5305320a529
| 2,941
|
py
|
Python
|
contrib/stack/stripmapStack/crossmul.py
|
falkamelung/isce2
|
edea69d4b6216f4ac729eba78f12547807a2751a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
contrib/stack/stripmapStack/crossmul.py
|
falkamelung/isce2
|
edea69d4b6216f4ac729eba78f12547807a2751a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
contrib/stack/stripmapStack/crossmul.py
|
falkamelung/isce2
|
edea69d4b6216f4ac729eba78f12547807a2751a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-06-05T16:39:25.000Z
|
2021-06-05T16:39:25.000Z
|
#!/usr/bin/env python3
import os
import argparse
import logging
import isce
import isceobj
from components.stdproc.stdproc import crossmul
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
def createParser():
'''
Command Line Parser.
'''
parser = argparse.ArgumentParser( description='Generate offset field between two Sentinel swaths')
parser.add_argument('-m', '--master', type=str, dest='master', required=True,
help='Master image')
parser.add_argument('-s', '--slave', type=str, dest='slave', required=True,
help='Slave image')
parser.add_argument('-o', '--outdir', type=str, dest='prefix', default='crossmul',
help='Prefix of output int and amp files')
parser.add_argument('-a', '--alks', type=int, dest='azlooks', default=1,
help='Azimuth looks')
parser.add_argument('-r', '--rlks', type=int, dest='rglooks', default=1,
help='Range looks')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def run(imageSlc1, imageSlc2, resampName, azLooks, rgLooks):
objSlc1 = isceobj.createSlcImage()
#right now imageSlc1 and 2 are just text files, need to open them as image
IU.copyAttributes(imageSlc1, objSlc1)
objSlc1.setAccessMode('read')
objSlc1.createImage()
objSlc2 = isceobj.createSlcImage()
IU.copyAttributes(imageSlc2, objSlc2)
objSlc2.setAccessMode('read')
objSlc2.createImage()
slcWidth = imageSlc1.getWidth()
intWidth = int(slcWidth / rgLooks)
lines = min(imageSlc1.getLength(), imageSlc2.getLength())
resampAmp = resampName + '.amp'
resampInt = resampName + '.int'
objInt = isceobj.createIntImage()
objInt.setFilename(resampInt)
objInt.setWidth(intWidth)
imageInt = isceobj.createIntImage()
IU.copyAttributes(objInt, imageInt)
objInt.setAccessMode('write')
objInt.createImage()
objAmp = isceobj.createAmpImage()
objAmp.setFilename(resampAmp)
objAmp.setWidth(intWidth)
imageAmp = isceobj.createAmpImage()
IU.copyAttributes(objAmp, imageAmp)
objAmp.setAccessMode('write')
objAmp.createImage()
objCrossmul = crossmul.createcrossmul()
objCrossmul.width = slcWidth
objCrossmul.length = lines
objCrossmul.LooksDown = azLooks
objCrossmul.LooksAcross = rgLooks
objCrossmul.crossmul(objSlc1, objSlc2, objInt, objAmp)
for obj in [objInt, objAmp, objSlc1, objSlc2]:
obj.finalizeImage()
return imageInt, imageAmp
def main(iargs=None):
inps = cmdLineParse(iargs)
img1 = isceobj.createImage()
img1.load(inps.master + '.xml')
img2 = isceobj.createImage()
img2.load(inps.slave + '.xml')
os.makedirs(os.path.dirname(inps.prefix), exist_ok=True)
run(img1, img2, inps.prefix, inps.azlooks, inps.rglooks)
if __name__ == '__main__':
main()
'''
Main driver.
'''
| 27.485981
| 102
| 0.682761
| 321
| 2,941
| 6.208723
| 0.41433
| 0.022579
| 0.042649
| 0.022077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012632
| 0.192452
| 2,941
| 106
| 103
| 27.745283
| 0.826526
| 0.039102
| 0
| 0
| 0
| 0
| 0.092053
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92338655b37aa1b9646d78826676f4639eac7d3
| 550
|
py
|
Python
|
27. Remove Element/solution2.py
|
sunshot/LeetCode
|
8f6503201831055f1d49ed3abb25be44a13ec317
|
[
"MIT"
] | null | null | null |
27. Remove Element/solution2.py
|
sunshot/LeetCode
|
8f6503201831055f1d49ed3abb25be44a13ec317
|
[
"MIT"
] | null | null | null |
27. Remove Element/solution2.py
|
sunshot/LeetCode
|
8f6503201831055f1d49ed3abb25be44a13ec317
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
if not nums:
return 0
curr = 0
n = len(nums)
while curr < n:
if nums[curr] == val:
nums[curr] = nums[n-1]
n -= 1
else:
curr += 1
return n
if __name__== '__main__':
solution = Solution()
nums = [3,2,2,3]
val = 3
ans = solution.removeElement(nums, val)
# print(ans)
print(nums[:ans])
| 23.913043
| 62
| 0.461818
| 66
| 550
| 3.727273
| 0.424242
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031546
| 0.423636
| 550
| 23
| 63
| 23.913043
| 0.74448
| 0.018182
| 0
| 0
| 0
| 0
| 0.014842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.25
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9270600c4aae588202efc6c296f0228f4d2527a
| 21,441
|
py
|
Python
|
tensorboard/backend/event_processing/data_provider_test.py
|
hongxu-jia/tensorboard
|
98d4dadc61fd5a0580bed808653c59fb37748893
|
[
"Apache-2.0"
] | 1
|
2021-01-07T14:58:47.000Z
|
2021-01-07T14:58:47.000Z
|
tensorboard/backend/event_processing/data_provider_test.py
|
hongxu-jia/tensorboard
|
98d4dadc61fd5a0580bed808653c59fb37748893
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/backend/event_processing/data_provider_test.py
|
hongxu-jia/tensorboard
|
98d4dadc61fd5a0580bed808653c59fb37748893
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for `tensorboard.backend.event_processing.data_provider`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.compat.proto import summary_pb2
from tensorboard.data import provider as base_provider
from tensorboard.plugins.graph import metadata as graph_metadata
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.histogram import summary_v2 as histogram_summary
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.plugins.scalar import summary_v2 as scalar_summary
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.image import summary_v2 as image_summary
from tensorboard.util import tensor_util
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
tf1.enable_eager_execution()
class MultiplexerDataProviderTest(tf.test.TestCase):
def setUp(self):
super(MultiplexerDataProviderTest, self).setUp()
self.logdir = self.get_temp_dir()
self.ctx = context.RequestContext()
logdir = os.path.join(self.logdir, "polynomials")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar(
"square", i ** 2, step=2 * i, description="boxen"
)
scalar_summary.scalar("cube", i ** 3, step=3 * i)
logdir = os.path.join(self.logdir, "waves")
with tf.summary.create_file_writer(logdir).as_default():
for i in xrange(10):
scalar_summary.scalar("sine", tf.sin(float(i)), step=i)
scalar_summary.scalar(
"square", tf.sign(tf.sin(float(i))), step=i
)
# Summary with rank-0 data but not owned by the scalars plugin.
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "marigraphs"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"high_tide", tensor=i, step=i, metadata=metadata
)
# Summary with rank-1 data of scalar data class (bad!).
metadata = summary_pb2.SummaryMetadata()
metadata.plugin_data.plugin_name = "greetings"
metadata.data_class = summary_pb2.DATA_CLASS_SCALAR
tf.summary.write(
"bad", tensor=[i, i], step=i, metadata=metadata
)
logdir = os.path.join(self.logdir, "lebesgue")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"),
("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"),
]
for (description, distribution, name) in data:
tensor = tf.constant([distribution], dtype=tf.float64)
for i in xrange(1, 11):
histogram_summary.histogram(
name, tensor * i, step=i, description=description
)
logdir = os.path.join(self.logdir, "mondrian")
with tf.summary.create_file_writer(logdir).as_default():
data = [
("red", (221, 28, 38), "top-right"),
("blue", (1, 91, 158), "bottom-left"),
("yellow", (239, 220, 111), "bottom-right"),
]
for (name, color, description) in data:
image_1x1 = tf.constant([[[color]]], dtype=tf.uint8)
for i in xrange(1, 11):
# Use a non-monotonic sequence of sample sizes to
# test `max_length` calculation.
k = 6 - abs(6 - i) # 1, .., 6, .., 2
# a `k`-sample image summary of `i`-by-`i` images
image = tf.tile(image_1x1, [k, i, i, 1])
image_summary.image(
name,
image,
step=i,
description=description,
max_outputs=99,
)
def create_multiplexer(self):
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
return multiplexer
def create_provider(self):
multiplexer = self.create_multiplexer()
return data_provider.MultiplexerDataProvider(multiplexer, self.logdir)
def test_data_location(self):
provider = self.create_provider()
result = provider.data_location(self.ctx, experiment_id="unused")
self.assertEqual(result, self.logdir)
def test_list_plugins_with_no_graph(self):
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_plugins_with_graph(self):
with tf.compat.v1.Graph().as_default() as graph:
writer = tf.compat.v1.summary.FileWriter(self.logdir)
writer.add_graph(graph)
writer.flush()
provider = self.create_provider()
result = provider.list_plugins(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
"greetings",
"marigraphs",
graph_metadata.PLUGIN_NAME,
histogram_metadata.PLUGIN_NAME,
image_metadata.PLUGIN_NAME,
scalar_metadata.PLUGIN_NAME,
],
)
def test_list_runs(self):
# We can't control the timestamps of events written to disk (without
# manually reading the tfrecords, modifying the data, and writing
# them back out), so we provide a fake multiplexer instead.
start_times = {
"second_2": 2.0,
"first": 1.5,
"no_time": None,
"second_1": 2.0,
}
class FakeMultiplexer(object):
def Runs(multiplexer):
result = ["second_2", "first", "no_time", "second_1"]
self.assertItemsEqual(result, start_times)
return result
def FirstEventTimestamp(multiplexer, run):
self.assertIn(run, start_times)
result = start_times[run]
if result is None:
raise ValueError("No event timestep could be found")
else:
return result
multiplexer = FakeMultiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, "fake_logdir"
)
result = provider.list_runs(self.ctx, experiment_id="unused")
self.assertItemsEqual(
result,
[
base_provider.Run(
run_id=run, run_name=run, start_time=start_time
)
for (run, start_time) in six.iteritems(start_times)
],
)
def test_list_scalars_all(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
sample = result["polynomials"]["square"]
self.assertIsInstance(sample, base_provider.ScalarTimeSeries)
self.assertEqual(sample.max_step, 18)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "boxen")
def test_list_scalars_filters(self):
provider = self.create_provider()
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["waves"], ["square"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
tags=["square", "quartic"]
),
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square"])
self.assertItemsEqual(result["waves"].keys(), ["square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(runs=["waves", "hugs"]),
)
self.assertItemsEqual(result.keys(), ["waves"])
self.assertItemsEqual(result["waves"].keys(), ["sine", "square"])
result = provider.list_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(["un"], ["likely"]),
)
self.assertEqual(result, {})
def test_read_scalars(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["waves", "polynomials", "unicorns"],
tags=["sine", "square", "cube", "iridescence"],
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["polynomials", "waves"])
self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"])
self.assertItemsEqual(result["waves"].keys(), ["square", "sine"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
self.assertEqual(
datum.value,
tensor_util.make_ndarray(event.tensor_proto).item(),
)
def test_read_scalars_downsamples(self):
# TODO(@wchargin): Verify that this always includes the most
# recent datum, as specified by the interface.
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name=scalar_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["waves"]["sine"], 3)
def test_read_scalars_but_not_rank_0(self):
provider = self.create_provider()
run_tag_filter = base_provider.RunTagFilter(["waves"], ["bad"])
# No explicit checks yet.
with six.assertRaisesRegex(
self,
ValueError,
"can only convert an array of size 1 to a Python scalar",
):
provider.read_scalars(
self.ctx,
experiment_id="unused",
plugin_name="greetings",
run_tag_filter=run_tag_filter,
downsample=100,
)
def test_list_tensors_all(self):
provider = self.create_provider()
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=None,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
sample = result["lebesgue"]["uniform"]
self.assertIsInstance(sample, base_provider.TensorTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(
sample.display_name, ""
) # not written by V2 summary ops
self.assertEqual(sample.description, "very smooth")
def test_list_tensors_filters(self):
provider = self.create_provider()
# Quick check only, as scalars and tensors use the same underlying
# filtering implementation.
result = provider.list_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
["lebesgue"], ["uniform"]
),
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform"])
def test_read_tensors(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
run_tag_filter = base_provider.RunTagFilter(
runs=["lebesgue"],
tags=["uniform", "bimodal"],
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
run_tag_filter=run_tag_filter,
downsample=100,
)
self.assertItemsEqual(result.keys(), ["lebesgue"])
self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"])
for run in result:
for tag in result[run]:
tensor_events = multiplexer.Tensors(run, tag)
self.assertLen(result[run][tag], len(tensor_events))
for (datum, event) in zip(result[run][tag], tensor_events):
self.assertEqual(datum.step, event.step)
self.assertEqual(datum.wall_time, event.wall_time)
np.testing.assert_equal(
datum.numpy,
tensor_util.make_ndarray(event.tensor_proto),
)
def test_read_tensors_downsamples(self):
multiplexer = self.create_multiplexer()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
result = provider.read_tensors(
self.ctx,
experiment_id="unused",
plugin_name=histogram_metadata.PLUGIN_NAME,
downsample=3,
)
self.assertLen(result["lebesgue"]["uniform"], 3)
def test_list_blob_sequences(self):
provider = self.create_provider()
with self.subTest("finds all time series for a plugin"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertIsInstance(sample, base_provider.BlobSequenceTimeSeries)
self.assertEqual(sample.max_step, 10)
# nothing to test for wall time, as it can't be mocked out
self.assertEqual(sample.plugin_content, b"")
self.assertEqual(sample.max_length, 6 + 2)
self.assertEqual(sample.description, "bottom-left")
self.assertEqual(sample.display_name, "")
with self.subTest("filters by run/tag"):
result = provider.list_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"],
base_provider.BlobSequenceTimeSeries,
)
def test_read_blob_sequences_and_read_blob(self):
provider = self.create_provider()
with self.subTest("reads all time series for a plugin"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
downsample=4,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(
result["mondrian"].keys(), ["red", "blue", "yellow"]
)
sample = result["mondrian"]["blue"]
self.assertLen(sample, 4) # downsampled from 10
last = sample[-1]
self.assertIsInstance(last, base_provider.BlobSequenceDatum)
self.assertEqual(last.step, 10)
self.assertLen(last.values, 2 + 2)
blobs = [
provider.read_blob(self.ctx, blob_key=v.blob_key)
for v in last.values
]
self.assertEqual(blobs[0], b"10")
self.assertEqual(blobs[1], b"10")
self.assertStartsWith(blobs[2], b"\x89PNG")
self.assertStartsWith(blobs[3], b"\x89PNG")
blue1 = blobs[2]
blue2 = blobs[3]
red1 = provider.read_blob(
self.ctx,
blob_key=result["mondrian"]["red"][-1].values[2].blob_key,
)
self.assertEqual(blue1, blue2)
self.assertNotEqual(blue1, red1)
with self.subTest("filters by run/tag"):
result = provider.read_blob_sequences(
self.ctx,
experiment_id="unused",
plugin_name=image_metadata.PLUGIN_NAME,
run_tag_filter=base_provider.RunTagFilter(
runs=["mondrian", "picasso"], tags=["yellow", "green't"]
),
downsample=1,
)
self.assertItemsEqual(result.keys(), ["mondrian"])
self.assertItemsEqual(result["mondrian"].keys(), ["yellow"])
self.assertIsInstance(
result["mondrian"]["yellow"][0],
base_provider.BlobSequenceDatum,
)
class DownsampleTest(tf.test.TestCase):
"""Tests for the `_downsample` private helper function."""
def test_deterministic(self):
xs = "abcdefg"
expected = data_provider._downsample(xs, k=4)
for _ in range(100):
actual = data_provider._downsample(xs, k=4)
self.assertEqual(actual, expected)
def test_underlong_ok(self):
xs = list("abcdefg")
actual = data_provider._downsample(xs, k=10)
expected = list("abcdefg")
self.assertIsNot(actual, xs)
self.assertEqual(actual, expected)
def test_inorder(self):
xs = list(range(10000))
actual = data_provider._downsample(xs, k=100)
self.assertEqual(actual, sorted(actual))
def test_zero(self):
xs = "abcdefg"
actual = data_provider._downsample(xs, k=0)
self.assertEqual(actual, [])
if __name__ == "__main__":
tf.test.main()
| 39.559041
| 80
| 0.583741
| 2,213
| 21,441
| 5.482151
| 0.167646
| 0.032971
| 0.066436
| 0.031322
| 0.601302
| 0.560007
| 0.507089
| 0.482361
| 0.457798
| 0.43785
| 0
| 0.012269
| 0.308148
| 21,441
| 541
| 81
| 39.632163
| 0.805582
| 0.080826
| 0
| 0.462555
| 0
| 0
| 0.068962
| 0
| 0
| 0
| 0
| 0.001848
| 0.171806
| 1
| 0.052863
| false
| 0
| 0.048458
| 0
| 0.11674
| 0.002203
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b9299565a87f9a052852f5ae8225680eeeb2de61
| 1,923
|
py
|
Python
|
tests/test_serialize.py
|
aferrall/redner
|
be52e4105140f575f153d640ba889eb6e6015616
|
[
"MIT"
] | 1,146
|
2018-11-11T01:47:18.000Z
|
2022-03-31T14:11:03.000Z
|
tests/test_serialize.py
|
Awcrr/redner
|
b4f57037af26b720d916bbaf26103a3499101a9f
|
[
"MIT"
] | 177
|
2018-11-13T22:48:25.000Z
|
2022-03-30T07:19:29.000Z
|
tests/test_serialize.py
|
Awcrr/redner
|
b4f57037af26b720d916bbaf26103a3499101a9f
|
[
"MIT"
] | 127
|
2018-11-11T02:32:17.000Z
|
2022-03-31T07:24:03.000Z
|
import pyredner
import numpy as np
import torch
cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]),
look_at = torch.tensor([0.0, 0.0, 0.0]),
up = torch.tensor([0.0, 1.0, 0.0]),
fov = torch.tensor([45.0]), # in degree
clip_near = 1e-2, # needs to > 0
resolution = (256, 256),
fisheye = False)
mat_grey = pyredner.Material(\
diffuse_reflectance = \
torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device()))
materials = [mat_grey]
shape_triangle = pyredner.Shape(\
vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]],
device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2]], dtype = torch.int32,
device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shape_light = pyredner.Shape(\
vertices = torch.tensor([[-1.0, -1.0, -7.0],
[ 1.0, -1.0, -7.0],
[-1.0, 1.0, -7.0],
[ 1.0, 1.0, -7.0]], device = pyredner.get_device()),
indices = torch.tensor([[0, 1, 2],[1, 3, 2]],
dtype = torch.int32, device = pyredner.get_device()),
uvs = None,
normals = None,
material_id = 0)
shapes = [shape_triangle, shape_light]
light = pyredner.AreaLight(shape_id = 1,
intensity = torch.tensor([20.0,20.0,20.0]))
area_lights = [light]
scene = pyredner.Scene(cam, shapes, materials, area_lights)
scene_state_dict = scene.state_dict()
scene = pyredner.Scene.load_state_dict(scene_state_dict)
scene_args = pyredner.RenderFunction.serialize_scene(\
scene = scene,
num_samples = 16,
max_bounces = 1)
render = pyredner.RenderFunction.apply
img = render(0, *scene_args)
pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
| 34.339286
| 83
| 0.560582
| 264
| 1,923
| 3.962121
| 0.291667
| 0.034417
| 0.031549
| 0.01912
| 0.397706
| 0.381453
| 0.237094
| 0.237094
| 0.237094
| 0.237094
| 0
| 0.077145
| 0.278731
| 1,923
| 55
| 84
| 34.963636
| 0.677001
| 0.01144
| 0
| 0.173913
| 0
| 0
| 0.015806
| 0.015806
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.065217
| 0
| 0.065217
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92a551001bac345f595f68ea0440f1231ad8e57
| 2,302
|
py
|
Python
|
src/zope/publisher/tests/test_requestdataproperty.py
|
Shoobx/zope.publisher
|
790e82045d7ae06146bd8c5e27139555b9ec1641
|
[
"ZPL-2.1"
] | 3
|
2016-11-18T08:58:09.000Z
|
2021-02-01T06:13:45.000Z
|
src/zope/publisher/tests/test_requestdataproperty.py
|
Shoobx/zope.publisher
|
790e82045d7ae06146bd8c5e27139555b9ec1641
|
[
"ZPL-2.1"
] | 42
|
2015-06-02T19:26:10.000Z
|
2022-03-15T07:24:03.000Z
|
src/zope/publisher/tests/test_requestdataproperty.py
|
Shoobx/zope.publisher
|
790e82045d7ae06146bd8c5e27139555b9ec1641
|
[
"ZPL-2.1"
] | 7
|
2015-04-03T09:29:31.000Z
|
2021-06-07T14:47:45.000Z
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Request Data-Property Tests
"""
from unittest import TestCase, makeSuite
from zope.interface.common.tests.basemapping \
import testIEnumerableMapping, testIReadMapping
from zope.publisher.base \
import RequestDataProperty, RequestDataGetter, RequestDataMapper
class TestDataGettr(RequestDataGetter):
_gettrname = 'getSomething'
class TestDataMapper(RequestDataMapper):
_mapname = '_data'
_marker = object()
class Data(object):
def getSomething(self, name, default=_marker):
if name.startswith('Z'):
return "something %s" % name
if default is not _marker:
return default
raise KeyError(name)
something = RequestDataProperty(TestDataGettr)
somedata = RequestDataProperty(TestDataMapper)
class Test(TestCase):
def testRequestDataGettr(self):
testIReadMapping(self, Data().something,
{"Zope": "something Zope"}, ["spam"])
def testRequestDataMapper(self):
data = Data()
sample = {'foo': 'Foo', 'bar': 'Bar'}
data._data = sample
inst = data.somedata
testIReadMapping(self, inst, sample, ["spam"])
testIEnumerableMapping(self, inst, sample)
def testNoAssign(self):
data = Data()
try:
data.something = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
try:
data.somedata = {}
except AttributeError:
pass
else:
raise AssertionError("Shouldn't be able to assign")
def test_suite():
return makeSuite(Test)
| 27.404762
| 78
| 0.614683
| 222
| 2,302
| 6.337838
| 0.504505
| 0.017058
| 0.019901
| 0.039801
| 0.098081
| 0.098081
| 0.098081
| 0.098081
| 0.098081
| 0.098081
| 0
| 0.005679
| 0.235013
| 2,302
| 83
| 79
| 27.73494
| 0.793299
| 0.212858
| 0
| 0.26087
| 0
| 0
| 0.074436
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.108696
| false
| 0.043478
| 0.065217
| 0.021739
| 0.413043
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92b002b9d57e933962f9291a749b365792c1b9a
| 1,444
|
py
|
Python
|
src/thornfield/caches/cache_compression_decorator.py
|
drorvinkler/thornfield
|
3c5bb8afaa96097bc71cccb119394a0f351d828f
|
[
"MIT"
] | 2
|
2020-11-24T13:27:14.000Z
|
2020-11-24T13:29:40.000Z
|
src/thornfield/caches/cache_compression_decorator.py
|
drorvinkler/thornfield
|
3c5bb8afaa96097bc71cccb119394a0f351d828f
|
[
"MIT"
] | 1
|
2020-11-24T13:33:45.000Z
|
2020-11-24T15:10:41.000Z
|
src/thornfield/caches/cache_compression_decorator.py
|
drorvinkler/thornfield
|
3c5bb8afaa96097bc71cccb119394a0f351d828f
|
[
"MIT"
] | null | null | null |
from typing import Callable, AnyStr, Optional
from zlib import compress as default_compress, decompress as default_decompress
from .cache import Cache
from ..constants import NOT_FOUND
class CacheCompressionDecorator(Cache):
def __init__(
self,
cache: Cache,
compress: Optional[Callable[[str], AnyStr]] = ...,
decompress: Optional[Callable[[AnyStr], str]] = ...,
) -> None:
super().__init__()
self._cache = cache
if compress is None:
self._compress = self._noop
elif compress is ...:
self._compress = self._default_compress
else:
self._compress = compress
if decompress is None:
self._decompress = self._noop
elif decompress is ...:
self._decompress = self._default_decompress
else:
self._decompress = decompress
def get(self, key):
value = self._cache.get(key)
return value if value is NOT_FOUND else self._decompress(value)
def set(self, key, value, expiration: int) -> None:
self._cache.set(key, self._compress(value), expiration)
@staticmethod
def _noop(x):
return x
@staticmethod
def _default_compress(obj: str) -> bytes:
return default_compress(obj.encode("UTF-8"))
@staticmethod
def _default_decompress(data: bytes) -> str:
return default_decompress(data).decode("UTF-8")
| 29.469388
| 79
| 0.628809
| 160
| 1,444
| 5.45
| 0.275
| 0.068807
| 0.029817
| 0.041284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001905
| 0.272853
| 1,444
| 48
| 80
| 30.083333
| 0.828571
| 0
| 0
| 0.128205
| 0
| 0
| 0.006925
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.102564
| 0.076923
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92be50c97841e71ffe31a7d7baa405cc9ba5537
| 38,846
|
py
|
Python
|
manim/mobject/vector_field.py
|
kdkasad/manim
|
249b1dcab0f18a43e953b5fda517734084c0a941
|
[
"MIT"
] | 2
|
2021-12-07T14:25:07.000Z
|
2021-12-09T14:16:10.000Z
|
manim/mobject/vector_field.py
|
kdkasad/manim
|
249b1dcab0f18a43e953b5fda517734084c0a941
|
[
"MIT"
] | 3
|
2021-09-15T08:11:29.000Z
|
2021-10-06T02:00:03.000Z
|
manim/mobject/vector_field.py
|
kdkasad/manim
|
249b1dcab0f18a43e953b5fda517734084c0a941
|
[
"MIT"
] | 3
|
2020-04-10T20:38:06.000Z
|
2020-09-30T03:03:45.000Z
|
"""Mobjects representing vector fields."""
__all__ = [
"VectorField",
"ArrowVectorField",
"StreamLines",
]
import itertools as it
import random
from math import ceil, floor
from typing import Callable, Iterable, Optional, Sequence, Tuple, Type
import numpy as np
from colour import Color
from PIL import Image
from .. import config
from ..animation.composition import AnimationGroup, Succession
from ..animation.creation import Create
from ..animation.indication import ShowPassingFlash
from ..animation.update import UpdateFromAlphaFunc
from ..constants import OUT, RIGHT, UP
from ..mobject.geometry import Vector
from ..mobject.mobject import Mobject
from ..mobject.types.vectorized_mobject import VGroup, VMobject
from ..utils.bezier import interpolate, inverse_interpolate
from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color
from ..utils.deprecation import deprecated_params
from ..utils.rate_functions import ease_out_sine, linear
from ..utils.simple_functions import sigmoid
from .types.opengl_vectorized_mobject import OpenGLVMobject
DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED]
class VectorField(VGroup):
"""A vector field.
Vector fields are based on a function defining a vector at every position.
This class does by default not include any visible elements but provides
methods to move other :class:`~.Mobject` s along the vector field.
Parameters
----------
func
The function defining the rate of change at every position of the `VectorField`.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
**kwargs
):
super().__init__(**kwargs)
self.func = func
if color is None:
self.single_color = False
if color_scheme is None:
def color_scheme(p):
return np.linalg.norm(p)
self.color_scheme = color_scheme # TODO maybe other default for direction?
self.rgbs = np.array(list(map(color_to_rgb, colors)))
def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]:
vec = self.func(pos)
color_value = np.clip(
self.color_scheme(vec),
min_color_scheme_value,
max_color_scheme_value,
)
alpha = inverse_interpolate(
min_color_scheme_value,
max_color_scheme_value,
color_value,
)
alpha *= len(self.rgbs) - 1
c1 = self.rgbs[int(alpha)]
c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)]
alpha %= 1
return interpolate(c1, c2, alpha)
self.pos_to_rgb = pos_to_rgb
self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos))
else:
self.single_color = True
self.color = color
self.submob_movement_updater = None
@staticmethod
def shift_func(
func: Callable[[np.ndarray], np.ndarray],
shift_vector: np.ndarray,
) -> Callable[[np.ndarray], np.ndarray]:
"""Shift a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The shift to be applied to the vector field.
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The shifted vector field function.
"""
return lambda p: func(p - shift_vector)
@staticmethod
def scale_func(
func: Callable[[np.ndarray], np.ndarray],
scalar: float,
) -> Callable[[np.ndarray], np.ndarray]:
"""Scale a vector field function.
Parameters
----------
func
The function defining a vector field.
shift_vector
The scalar to be applied to the vector field.
Examples
--------
.. manim:: ScaleVectorFieldFunction
class ScaleVectorFieldFunction(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP
vector_field = ArrowVectorField(func)
self.add(vector_field)
self.wait()
func = VectorField.scale_func(func, 0.5)
self.play(vector_field.animate.become(ArrowVectorField(func)))
self.wait()
Returns
-------
`Callable[[np.ndarray], np.ndarray]`
The scaled vector field function.
"""
return lambda p: func(p * scalar)
def nudge(
self,
mob: Mobject,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Nudge a :class:`~.Mobject` along the vector field.
Parameters
----------
mob
The mobject to move along the vector field
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. If `False` the
vector field takes effect on the center of the given
:class:`~.Mobject`. If `True` the vector field takes effect on the
points of the individual points of the :class:`~.Mobject`,
potentially distorting it.
Returns
-------
VectorField
This vector field.
Examples
--------
.. manim:: Nudging
class Nudging(Scene):
def construct(self):
func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP
vector_field = ArrowVectorField(
func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2
)
self.add(vector_field)
circle = Circle(radius=2).shift(LEFT)
self.add(circle.copy().set_color(GRAY))
dot = Dot().move_to(circle)
vector_field.nudge(circle, -2, 60, True)
vector_field.nudge(dot, -2, 60)
circle.add_updater(vector_field.get_nudge_updater(pointwise=True))
dot.add_updater(vector_field.get_nudge_updater())
self.add(circle, dot)
self.wait(6)
"""
def runge_kutta(self, p: Sequence[float], step_size: float) -> float:
"""Returns the change in position of a point along a vector field.
Parameters
----------
p
The position of each point being moved along the vector field.
step_size
A scalar that is used to determine how much a point is shifted in a single step.
Returns
-------
float
How much the point is shifted.
"""
k_1 = self.func(p)
k_2 = self.func(p + step_size * (k_1 * 0.5))
k_3 = self.func(p + step_size * (k_2 * 0.5))
k_4 = self.func(p + step_size * k_3)
return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4)
step_size = dt / substeps
for _ in range(substeps):
if pointwise:
mob.apply_function(lambda p: p + runge_kutta(self, p, step_size))
else:
mob.shift(runge_kutta(self, mob.get_center(), step_size))
return self
def nudge_submobjects(
self,
dt: float = 1,
substeps: int = 1,
pointwise: bool = False,
) -> "VectorField":
"""Apply a nudge along the vector field to all submobjects.
Parameters
----------
dt
A scalar to the amount the mobject is moved along the vector field.
The actual distance is based on the magnitude of the vector field.
substeps
The amount of steps the whole nudge is divided into. Higher values
give more accurate approximations.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
for mob in self.submobjects:
self.nudge(mob, dt, substeps, pointwise)
return self
def get_nudge_updater(
self,
speed: float = 1,
pointwise: bool = False,
) -> Callable[[Mobject, float], Mobject]:
"""Get an update function to move a :class:`~.Mobject` along the vector field.
When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field.
Parameters
----------
speed
At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
Callable[[Mobject, float], Mobject]
The update function.
"""
return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise)
def start_submobject_movement(
self,
speed: float = 1,
pointwise: bool = False,
) -> "VectorField":
"""Start continuously moving all submobjects along the vector field.
Calling this method multiple times will result in removing the previous updater created by this method.
Parameters
----------
speed
The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details.
pointwise
Whether to move the mobject along the vector field. See :meth:`nudge` for details.
Returns
-------
VectorField
This vector field.
"""
self.stop_submobject_movement()
self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects(
dt * speed,
pointwise=pointwise,
)
self.add_updater(self.submob_movement_updater)
return self
def stop_submobject_movement(self) -> "VectorField":
"""Stops the continuous movement started using :meth:`start_submobject_movement`.
Returns
-------
VectorField
This vector field.
"""
self.remove_updater(self.submob_movement_updater)
self.submob_movement_updater = None
return self
def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image:
"""Generate an image that displays the vector field.
The color at each position is calculated by passing the positing through a
series of steps:
Calculate the vector field function at that position, map that vector to a
single value using `self.color_scheme` and finally generate a color from
that value using the color gradient.
Parameters
----------
sampling_rate
The stepsize at which pixels get included in the image. Lower values give
more accurate results, but may take a long time to compute.
Returns
-------
Image.Imgae
The vector field image.
"""
if self.single_color:
raise ValueError(
"There is no point in generating an image if the vector field uses a single color.",
)
ph = int(config["pixel_height"] / sampling_rate)
pw = int(config["pixel_width"] / sampling_rate)
fw = config["frame_width"]
fh = config["frame_height"]
points_array = np.zeros((ph, pw, 3))
x_array = np.linspace(-fw / 2, fw / 2, pw)
y_array = np.linspace(fh / 2, -fh / 2, ph)
x_array = x_array.reshape((1, len(x_array)))
y_array = y_array.reshape((len(y_array), 1))
x_array = x_array.repeat(ph, axis=0)
y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)?
points_array[:, :, 0] = x_array
points_array[:, :, 1] = y_array
rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array)
return Image.fromarray((rgbs * 255).astype("uint8"))
def get_vectorized_rgba_gradient_function(
self,
start: float,
end: float,
colors: Iterable,
):
"""
Generates a gradient of rgbas as a numpy array
Parameters
----------
start
start value used for inverse interpolation at :func:`~.inverse_interpolate`
end
end value used for inverse interpolation at :func:`~.inverse_interpolate`
colors
list of colors to generate the gradient
Returns
-------
function to generate the gradients as numpy arrays representing rgba values
"""
rgbs = np.array([color_to_rgb(c) for c in colors])
def func(values, opacity=1):
alphas = inverse_interpolate(start, end, np.array(values))
alphas = np.clip(alphas, 0, 1)
scaled_alphas = alphas * (len(rgbs) - 1)
indices = scaled_alphas.astype(int)
next_indices = np.clip(indices + 1, 0, len(rgbs) - 1)
inter_alphas = scaled_alphas % 1
inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3))
result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas)
result = np.concatenate(
(result, np.full([len(result), 1], opacity)),
axis=1,
)
return result
return func
class ArrowVectorField(VectorField):
"""A :class:`VectorField` represented by a set of change vectors.
Vector fields are always based on a function defining the :class:`~.Vector` at every position.
The values of this functions is displayed as a grid of vectors.
By default the color of each vector is determined by it's magnitude.
Other color schemes can be used however.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
length_func
The function determining the displayed size of the vectors. The actual size
of the vector is passed, the returned value will be used as display size for the
vector. By default this is used to cap the displayed size of vectors to reduce the clutter.
opacity
The opacity of the arrows.
vector_config
Additional arguments to be passed to the :class:`~.Vector` constructor
kwargs : Any
Additional arguments to be passed to the :class:`~.VGroup` constructor
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(ArrowVectorField(func))
.. manim:: SizingAndSpacing
class SizingAndSpacing(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
vf = ArrowVectorField(func, x_range=[-7, 7, 1])
self.add(vf)
self.wait()
length_func = lambda x: x / 3
vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func)
self.play(vf.animate.become(vf2))
self.wait()
.. manim:: Coloring
:save_last_frame:
class Coloring(Scene):
def construct(self):
func = lambda pos: pos - LEFT * 5
colors = [RED, YELLOW, BLUE, DARK_GRAY]
min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5)
max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5)
vf = ArrowVectorField(
func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors
)
self.add(vf, min_radius, max_radius)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining Vector positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False, # Automatically True if z_range is set
# Takes in actual norm, spits out displayed norm
length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm),
opacity: float = 1.0,
vector_config: Optional[dict] = None,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.length_func = length_func
self.opacity = opacity
if vector_config is None:
vector_config = {}
self.vector_config = vector_config
self.func = func
x_range = np.arange(*self.x_range)
y_range = np.arange(*self.y_range)
z_range = np.arange(*self.z_range)
for x, y, z in it.product(x_range, y_range, z_range):
self.add(self.get_vector(x * RIGHT + y * UP + z * OUT))
self.set_opacity(self.opacity)
def get_vector(self, point: np.ndarray):
"""Creates a vector in the vector field.
The created vector is based on the function of the vector field and is
rooted in the given point. Color and length fit the specifications of
this vector field.
Parameters
----------
point
The root point of the vector.
kwargs : Any
Additional arguments to be passed to the :class:`~.Vector` constructor
"""
output = np.array(self.func(point))
norm = np.linalg.norm(output)
if norm != 0:
output *= self.length_func(norm) / norm
vect = Vector(output, **self.vector_config)
vect.shift(point)
if self.single_color:
vect.set_color(self.color)
else:
vect.set_color(self.pos_to_color(point))
return vect
class StreamLines(VectorField):
"""StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents.
Vector fields are always based on a function defining the vector at every position.
The values of this functions is displayed by moving many agents along the vector field
and showing their trace.
Parameters
----------
func
The function defining the rate of change at every position of the vector field.
color
The color of the vector field. If set, position-specific coloring is disabled.
color_scheme
A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`.
min_color_scheme_value
The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient.
max_color_scheme_value
The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient.
colors
The colors defining the color gradient of the vector field.
x_range
A sequence of x_min, x_max, delta_x
y_range
A sequence of y_min, y_max, delta_y
z_range
A sequence of z_min, z_max, delta_z
three_dimensions
Enables three_dimensions. Default set to False, automatically turns True if
z_range is not None.
noise_factor
The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined.
n_repeats
The number of agents generated at each starting point.
dt
The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field.
virtual_time
The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation.
max_anchors_per_line
The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length.
padding
The distance agents can move out of the generation area before being terminated.
stroke_width
The stroke with of the stream lines.
opacity
The opacity of the stream lines.
Examples
--------
.. manim:: BasicUsage
:save_last_frame:
class BasicUsage(Scene):
def construct(self):
func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3
self.add(StreamLines(func))
.. manim:: SpawningAndFlowingArea
:save_last_frame:
class SpawningAndFlowingArea(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5
stream_lines = StreamLines(
func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1
)
spawning_area = Rectangle(width=6, height=4)
flowing_area = Rectangle(width=8, height=6)
labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)]
for lbl in labels:
lbl.add_background_rectangle(opacity=0.6, buff=0.05)
self.add(stream_lines, spawning_area, flowing_area, *labels)
"""
def __init__(
self,
func: Callable[[np.ndarray], np.ndarray],
color: Optional[Color] = None,
color_scheme: Optional[Callable[[np.ndarray], float]] = None,
min_color_scheme_value: float = 0,
max_color_scheme_value: float = 2,
colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS,
# Determining stream line starting positions:
x_range: Sequence[float] = None,
y_range: Sequence[float] = None,
z_range: Sequence[float] = None,
three_dimensions: bool = False,
noise_factor: Optional[float] = None,
n_repeats=1,
# Determining how lines are drawn
dt=0.05,
virtual_time=3,
max_anchors_per_line=100,
padding=3,
# Determining stream line appearance:
stroke_width=1,
opacity=1,
**kwargs
):
self.x_range = x_range or [
floor(-config["frame_width"] / 2),
ceil(config["frame_width"] / 2),
]
self.y_range = y_range or [
floor(-config["frame_height"] / 2),
ceil(config["frame_height"] / 2),
]
self.ranges = [self.x_range, self.y_range]
if three_dimensions or z_range:
self.z_range = z_range or self.y_range.copy()
self.ranges += [self.z_range]
else:
self.ranges += [[0, 0]]
for i in range(len(self.ranges)):
if len(self.ranges[i]) == 2:
self.ranges[i] += [0.5]
self.ranges[i][1] += self.ranges[i][2]
self.x_range, self.y_range, self.z_range = self.ranges
super().__init__(
func,
color,
color_scheme,
min_color_scheme_value,
max_color_scheme_value,
colors,
**kwargs,
)
self.noise_factor = (
noise_factor if noise_factor is not None else self.y_range[2] / 2
)
self.n_repeats = n_repeats
self.virtual_time = virtual_time
self.max_anchors_per_line = max_anchors_per_line
self.padding = padding
self.stroke_width = stroke_width
half_noise = self.noise_factor / 2
np.random.seed(0)
start_points = np.array(
[
(x - half_noise) * RIGHT
+ (y - half_noise) * UP
+ (z - half_noise) * OUT
+ self.noise_factor * np.random.random(3)
for n in range(self.n_repeats)
for x in np.arange(*self.x_range)
for y in np.arange(*self.y_range)
for z in np.arange(*self.z_range)
],
)
def outside_box(p):
return (
p[0] < self.x_range[0] - self.padding
or p[0] > self.x_range[1] + self.padding - self.x_range[2]
or p[1] < self.y_range[0] - self.padding
or p[1] > self.y_range[1] + self.padding - self.y_range[2]
or p[2] < self.z_range[0] - self.padding
or p[2] > self.z_range[1] + self.padding - self.z_range[2]
)
max_steps = ceil(virtual_time / dt) + 1
if not self.single_color:
self.background_img = self.get_colored_background_image()
if config["renderer"] == "opengl":
self.values_to_rgbas = self.get_vectorized_rgba_gradient_function(
min_color_scheme_value,
max_color_scheme_value,
colors,
)
for point in start_points:
points = [point]
for _ in range(max_steps):
last_point = points[-1]
new_point = last_point + dt * func(last_point)
if outside_box(new_point):
break
points.append(new_point)
step = max_steps
if not step:
continue
if config["renderer"] == "opengl":
line = OpenGLVMobject()
else:
line = VMobject()
line.duration = step * dt
step = max(1, int(len(points) / self.max_anchors_per_line))
line.set_points_smoothly(points[::step])
if self.single_color:
line.set_stroke(self.color)
else:
if config["renderer"] == "opengl":
# scaled for compatibility with cairo
line.set_stroke(width=self.stroke_width / 4.0)
norms = np.array(
[np.linalg.norm(self.func(point)) for point in line.points],
)
line.set_rgba_array_direct(
self.values_to_rgbas(norms, opacity),
name="stroke_rgba",
)
else:
if np.any(self.z_range != np.array([0, 0.5, 0.5])):
line.set_stroke(
[self.pos_to_color(p) for p in line.get_anchors()],
)
else:
line.color_using_background_image(self.background_img)
line.set_stroke(width=self.stroke_width, opacity=opacity)
self.add(line)
self.stream_lines = [*self.submobjects]
def create(
self,
lag_ratio: Optional[float] = None,
run_time: Optional[Callable[[float], float]] = None,
**kwargs
) -> AnimationGroup:
"""The creation animation of the stream lines.
The stream lines appear in random order.
Parameters
----------
lag_ratio
The lag ratio of the animation.
If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation.
run_time
The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`.
If undefined, the virtual time of the stream lines is used as run time.
Returns
-------
:class:`~.AnimationGroup`
The creation animation of the stream lines.
Examples
--------
.. manim:: StreamLineCreation
class StreamLineCreation(Scene):
def construct(self):
func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos
stream_lines = StreamLines(
func,
color=YELLOW,
x_range=[-7, 7, 1],
y_range=[-4, 4, 1],
stroke_width=3,
virtual_time=1, # use shorter lines
max_anchors_per_line=5, # better performance with fewer anchors
)
self.play(stream_lines.create()) # uses virtual_time as run_time
self.wait()
"""
if run_time is None:
run_time = self.virtual_time
if lag_ratio is None:
lag_ratio = run_time / 2 / len(self.submobjects)
animations = [
Create(line, run_time=run_time, **kwargs) for line in self.stream_lines
]
random.shuffle(animations)
return AnimationGroup(*animations, lag_ratio=lag_ratio)
def start_animation(
self,
warm_up=True,
flow_speed: float = 1,
time_width: float = 0.3,
rate_func: Callable[[float], float] = linear,
line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash,
**kwargs
) -> None:
"""Animates the stream lines using an updater.
The stream lines will continuously flow
Parameters
----------
warm_up : bool, optional
If `True` the animation is initialized line by line. Otherwise it starts with all lines shown.
flow_speed
At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow.
time_width
The proportion of the stream line shown while being animated
rate_func
The rate function of each stream line flashing
line_animation_class
The animation class being used
Examples
--------
.. manim:: ContinuousMotion
class ContinuousMotion(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5)
self.wait(stream_lines.virtual_time / stream_lines.flow_speed)
"""
for line in self.stream_lines:
run_time = line.duration / flow_speed
line.anim = line_animation_class(
line,
run_time=run_time,
rate_func=rate_func,
time_width=time_width,
**kwargs,
)
line.anim.begin()
line.time = random.random() * self.virtual_time
if warm_up:
line.time *= -1
self.add(line.anim.mobject)
def updater(mob, dt):
for line in mob.stream_lines:
line.time += dt * flow_speed
if line.time >= self.virtual_time:
line.time -= self.virtual_time
line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1))
self.add_updater(updater)
self.flow_animation = updater
self.flow_speed = flow_speed
self.time_width = time_width
def end_animation(self) -> AnimationGroup:
"""End the stream line animation smoothly.
Returns an animation resulting in fully displayed stream lines without a noticeable cut.
Returns
-------
:class:`~.AnimationGroup`
The animation fading out the running stream animation.
Raises
------
ValueError
if no stream line animation is running
Examples
--------
.. manim:: EndAnimation
class EndAnimation(Scene):
def construct(self):
func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT
stream_lines = StreamLines(
func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE
)
self.add(stream_lines)
stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5)
self.wait(1)
self.play(stream_lines.end_animation())
"""
if self.flow_animation is None:
raise ValueError("You have to start the animation before fading it out.")
def hide_and_wait(mob, alpha):
if alpha == 0:
mob.set_stroke(opacity=0)
elif alpha == 1:
mob.set_stroke(opacity=1)
def finish_updater_cycle(line, alpha):
line.time += dt * self.flow_speed
line.anim.interpolate(min(line.time / line.anim.run_time, 1))
if alpha == 1:
self.remove(line.anim.mobject)
line.anim.finish()
max_run_time = self.virtual_time / self.flow_speed
creation_rate_func = ease_out_sine
creation_staring_speed = creation_rate_func(0.001) * 1000
creation_run_time = (
max_run_time / (1 + self.time_width) * creation_staring_speed
)
# creation_run_time is calculated so that the creation animation starts at the same speed
# as the regular line flash animation but eases out.
dt = 1 / config["frame_rate"]
animations = []
self.remove_updater(self.flow_animation)
self.flow_animation = None
for line in self.stream_lines:
create = Create(
line,
run_time=creation_run_time,
rate_func=creation_rate_func,
)
if line.time <= 0:
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
hide_and_wait,
run_time=-line.time / self.flow_speed,
),
create,
),
)
self.remove(line.anim.mobject)
line.anim.finish()
else:
remaining_time = max_run_time - line.time / self.flow_speed
animations.append(
Succession(
UpdateFromAlphaFunc(
line,
finish_updater_cycle,
run_time=remaining_time,
),
create,
),
)
return AnimationGroup(*animations)
# TODO: Variant of StreamLines that is able to respond to changes in the vector field function
| 36.96099
| 185
| 0.57298
| 4,720
| 38,846
| 4.567161
| 0.1125
| 0.032147
| 0.026627
| 0.013221
| 0.425291
| 0.375006
| 0.353853
| 0.323422
| 0.30649
| 0.299253
| 0
| 0.010795
| 0.341837
| 38,846
| 1,050
| 186
| 36.99619
| 0.832362
| 0.438604
| 0
| 0.351351
| 0
| 0
| 0.022145
| 0
| 0
| 0
| 0
| 0.002857
| 0
| 1
| 0.049896
| false
| 0.004158
| 0.045738
| 0.004158
| 0.137214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92e1fb5ed102dbd1d7dc2d4b0ef720e265a976f
| 1,045
|
py
|
Python
|
electrum_trc/scripts/txradar.py
|
TheSin-/electrum-trc
|
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
|
[
"MIT"
] | 1
|
2019-08-20T18:05:32.000Z
|
2019-08-20T18:05:32.000Z
|
electrum_trc/scripts/txradar.py
|
TheSin-/electrum-trc
|
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
|
[
"MIT"
] | 1
|
2022-03-14T19:45:31.000Z
|
2022-03-14T19:45:31.000Z
|
electrum_trc/scripts/txradar.py
|
TheSin-/electrum-trc
|
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import asyncio
from electrum_trc.network import filter_protocol, Network
from electrum_trc.util import create_and_start_event_loop, log_exceptions
try:
txid = sys.argv[1]
except:
print("usage: txradar txid")
sys.exit(1)
loop, stopping_fut, loop_thread = create_and_start_event_loop()
network = Network()
network.start()
@log_exceptions
async def f():
try:
peers = await network.get_peers()
peers = filter_protocol(peers, 's')
results = await network.send_multiple_requests(peers, 'blockchain.transaction.get', [txid])
r1, r2 = [], []
for k, v in results.items():
(r1 if not isinstance(v, Exception) else r2).append(k)
print(f"Received {len(results)} answers")
try: propagation = len(r1) * 100. / (len(r1) + len(r2))
except ZeroDivisionError: propagation = 0
print(f"Propagation rate: {propagation:.1f} percent")
finally:
stopping_fut.set_result(1)
asyncio.run_coroutine_threadsafe(f(), loop)
| 28.243243
| 99
| 0.675598
| 138
| 1,045
| 4.949275
| 0.543478
| 0.035139
| 0.043924
| 0.055637
| 0.06735
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019277
| 0.205742
| 1,045
| 36
| 100
| 29.027778
| 0.803614
| 0.020096
| 0
| 0.071429
| 0
| 0
| 0.117302
| 0.025415
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b92ef9143bb84fe6d37501129ff559d015cf231e
| 1,091
|
py
|
Python
|
jp.atcoder/dp/dp_g/24586988.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-09T03:06:25.000Z
|
2022-02-09T03:06:25.000Z
|
jp.atcoder/dp/dp_g/24586988.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | 1
|
2022-02-05T22:53:18.000Z
|
2022-02-09T01:29:30.000Z
|
jp.atcoder/dp/dp_g/24586988.py
|
kagemeka/atcoder-submissions
|
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
|
[
"MIT"
] | null | null | null |
import sys
import typing
import numpy as np
def solve(
n: int,
g: np.array,
) -> typing.NoReturn:
indeg = np.zeros(
n,
dtype=np.int64,
)
for v in g[:, 1]:
indeg[v] += 1
g = g[g[:, 0].argsort()]
i = np.searchsorted(
g[:, 0],
np.arange(n + 1)
)
q = [
v for v in range(n)
if not indeg[v]
]
dist = np.zeros(
n,
dtype=np.int64,
)
for u in q:
for j in range(
i[u], i[u + 1],
):
v = g[j, 1]
indeg[v] -= 1
dist[v] = max(
dist[v],
dist[u] + 1,
)
if indeg[v]: continue
q.append(v)
print(dist.max())
def main() -> typing.NoReturn:
n, m = map(
int, input().split(),
)
g = np.array(
sys.stdin.read().split(),
dtype=np.int64,
).reshape(m, 2) - 1
solve(n, g)
OJ = 'ONLINE_JUDGE'
if sys.argv[-1] == OJ:
from numba import i8, njit
from numba.pycc import CC
cc = CC('my_module')
fn = solve
signature = (i8, i8[:, :])
cc.export(
fn.__name__,
signature,
)(fn)
cc.compile()
exit(0)
from my_module import solve
main()
| 13.810127
| 30
| 0.505041
| 172
| 1,091
| 3.162791
| 0.372093
| 0.044118
| 0.066176
| 0.047794
| 0.084559
| 0.084559
| 0.084559
| 0
| 0
| 0
| 0
| 0.02969
| 0.320807
| 1,091
| 78
| 31
| 13.987179
| 0.704453
| 0
| 0
| 0.079365
| 0
| 0
| 0.019248
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031746
| false
| 0
| 0.095238
| 0
| 0.126984
| 0.015873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b930187de467bdc99d38231d4b217f6589a62613
| 2,039
|
py
|
Python
|
starteMessung.py
|
jkerpe/TroubleBubble
|
813ad797398b9f338f136bcb96c6c92186d92ebf
|
[
"MIT"
] | null | null | null |
starteMessung.py
|
jkerpe/TroubleBubble
|
813ad797398b9f338f136bcb96c6c92186d92ebf
|
[
"MIT"
] | null | null | null |
starteMessung.py
|
jkerpe/TroubleBubble
|
813ad797398b9f338f136bcb96c6c92186d92ebf
|
[
"MIT"
] | 1
|
2021-08-09T14:57:57.000Z
|
2021-08-09T14:57:57.000Z
|
from datetime import datetime
from pypylon import pylon
import nimmAuf
import smbus2
import os
import argparse
import bestimmeVolumen
from threading import Thread
import time
programmstart = time.time()
# Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben)
ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der
Volumenbestimmung von Luftblasen""")
ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder")
ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps")
args = vars(ap.parse_args())
# Argumente des Parsers extrahieren
numberOfImagesToGrab = args['number']
framerate = args['framerate']
if __name__ == '__main__':
startzeit = time.time()
#Test ob Kamera angeschlossen ist
devices = pylon.TlFactory.GetInstance().EnumerateDevices()
if len(devices) == 0:
print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.")
return False
# Test ob Drucksensor angeschlossen ist
try:
bus = smbus2.SMBus(0)
bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen
except OSError:
print("Kein Drucksensor angeschlossen")
exit()
# Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren
dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}'
os.mkdir(dirname) # Ordner erstellen
print(f"Ordnername: {dirname}")
beginn = time.time()-programmstart
# Threads zum Aufnehmen und Verarbeiten starten
t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit))
t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab))
t_aufnahme.start()
t_tracke.start()
t_aufnahme.join()
t_tracke.join()
| 34.559322
| 169
| 0.703776
| 254
| 2,039
| 5.570866
| 0.570866
| 0.016961
| 0.018375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022975
| 0.188818
| 2,039
| 58
| 170
| 35.155172
| 0.832527
| 0.172143
| 0
| 0
| 0
| 0.025641
| 0.299166
| 0.027414
| 0
| 0
| 0.002384
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.25641
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b93050ad4c3c78860eb79accbddb8566a673cb7e
| 3,211
|
py
|
Python
|
application/services/decart.py
|
Sapfir0/web-premier-eye
|
f060b01e98a923374ea60360ba133caaa654b6c7
|
[
"MIT"
] | null | null | null |
application/services/decart.py
|
Sapfir0/web-premier-eye
|
f060b01e98a923374ea60360ba133caaa654b6c7
|
[
"MIT"
] | null | null | null |
application/services/decart.py
|
Sapfir0/web-premier-eye
|
f060b01e98a923374ea60360ba133caaa654b6c7
|
[
"MIT"
] | 1
|
2020-01-06T18:27:45.000Z
|
2020-01-06T18:27:45.000Z
|
import os
import tempfile
def hasOnePointInside(bigRect, minRect): # хотя бы одна точка лежит внутри
minY, minX, maxY, maxX = bigRect
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a or b or c or d
def isCompletelyInside(bigRect, minRect): # объект полностью внутри прямоугольника
y1, x1, y2, x2 = bigRect
minX = x1
minY = y1 # вроде верно
maxX = x2
maxY = y2
y1, x1, y2, x2 = minRect
a = (minY <= y1 <= maxY)
b = (minX <= x1 <= maxX)
c = (minY <= y2 <= maxY)
d = (minX <= x2 <= maxX)
return a and b and c and d # если тру, то объект полностью внутри большого прямоугольника
def isPartiallyInside(bigRect, minRect, innerPercent=0.5): # объект частично внутри прямоугольника
bigLUy, bigLUx, bigRDy, bigRDx = bigRect
minLUy, minLUx, minRDy, minRDx = minRect
fullSquare = (minLUy - minRDy) * (minRDx - minLUx) # не уверен что правильно
# Не уверен в ифах
if bigLUy < minLUy:
minLUy = bigLUy
if bigRDy < minRDy:
minRDy = bigRDy
if bigLUx > minLUx:
minLUx = bigLUx
if bigRDx > minRDx:
minRDx = bigRDx
inObjSquare = (minLUy - minRDy) * (minRDx - minLUx)
return inObjSquare / fullSquare >= innerPercent
def createGraphic(imagePath: str, searchRect: list, objectsListRect: list):
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import matplotlib.patches as patches
im = np.array(Image.open(imagePath), dtype=np.uint8)
fig, ax = plt.subplots(1)
ax.imshow(im)
bigRect = Rectangle(searchRect)
minRects = [Rectangle(i) for i in objectsListRect]
rect = patches.Rectangle(*bigRect.getMTparam(), linewidth=1, edgecolor='g', facecolor='None')
ax.add_patch(rect)
for i in minRects:
rect = patches.Rectangle(*i.getMTparam(), linewidth=1, edgecolor='r', facecolor='none')
ax.add_patch(rect)
temp = tempfile.NamedTemporaryFile()
path = os.path.join(os.getcwd(), temp.name)
plt.savefig(path)
return os.path.split(temp.name + ".png")
class Rectangle:
LDx = 0
LDy = 0
RUx = 0
RUy = 0
def __init__(self, coordinates: list):
if len(coordinates) != 4:
raise ValueError("Нужно подавать координаты(х,у) двух противоложных вершин")
if coordinates[0] >= coordinates[2] or coordinates[1] >= coordinates[3]:
raise ValueError(
"Неверно заданы вершины, сначала подаются 2 координаты нижнего левого угла, потом верхнего правого")
self.LDx, self.LDy, self.RUx, self.RUy = coordinates
def getWidth(self):
return self.RUx - self.LDx
def getHeight(self):
return self.RUy - self.LDy
def getLUx(self):
return self.LDx
def getLUy(self):
return self.RUy
def getMTparam(self):
return ((self.getLUy(), self.getLUx()), # почему -? я не знаю
-self.getHeight(), self.getWidth()) # все абсолютно в другом порядке, чем должно быть? что ха дринся
def getCenterOfDown(self):
return [(self.LDx + self.RUx) / 2, self.LDy]
| 28.927928
| 117
| 0.62753
| 408
| 3,211
| 4.92402
| 0.392157
| 0.029866
| 0.041812
| 0.011946
| 0.092583
| 0.092583
| 0.065704
| 0.065704
| 0.065704
| 0.065704
| 0
| 0.017307
| 0.262224
| 3,211
| 110
| 118
| 29.190909
| 0.83073
| 0.094986
| 0
| 0.15
| 0
| 0
| 0.057686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1375
| false
| 0
| 0.075
| 0.075
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b931a37de7e1f1ed0fc213effed503351b163f01
| 9,946
|
py
|
Python
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopylib
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 25
|
2020-07-09T10:57:16.000Z
|
2022-02-06T10:31:34.000Z
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopy
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 48
|
2020-07-02T20:08:40.000Z
|
2020-07-06T16:09:25.000Z
|
goopylib/objects/_BBox.py
|
BhavyeMathur/goopy
|
f9eb1458e9218a8dd4add6693ce70b804624bf91
|
[
"MIT"
] | 1
|
2020-12-01T13:45:53.000Z
|
2020-12-01T13:45:53.000Z
|
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
class BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
resizing_objects = []
def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0,
tag=None):
self.p1 = p1
self.p2 = p2
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag)
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
self.min_width = None
self.min_height = None
self.max_width = None
self.max_height = None
self.resizing_bounds = {}
self.is_resizing = {}
self.bounds_thickness = 0
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
def __repr__(self):
return "_BBox"
def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None,
thickness=10):
"""Override in subclasses"""
pass
def _move(self, dx, dy):
self.p1[0] += dx
self.p1[1] += dy
self.p2[0] += dx
self.p2[1] += dy
self.anchor[0] += dx
self.anchor[1] += dy
def is_clicked(self, mouse_pos):
if self.bounds is None:
if mouse_pos is None:
return False
else:
if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \
(self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]):
return True
else:
return False
else:
return self.bounds.is_clicked(mouse_pos)
def get_p1(self):
return self.p1.copy()
def get_p2(self):
return self.p2.copy()
def get_top_right(self):
return self.p1.copy()
def get_top_left(self):
return [self.p2[0], self.p1[1]]
def get_bottom_left(self):
return [self.p1[0], self.p2[1]]
def get_bottom_right(self):
return self.p2.copy()
def get_top(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]]
def get_bottom(self):
return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]]
def get_left(self):
return [self.p1[0], (self.p1[1] + self.p2[1]) / 2]
def get_right(self):
return [self.p2[0], (self.p1[1] + self.p2[1]) / 2]
def get_width(self):
return self.width
def get_height(self):
return self.height
def get_fill(self):
return self.fill
def get_outline(self):
return self.outline
def get_outline_width(self):
return self.outline_width
def get_anchor(self):
return self.anchor
def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"):
self.set_width(width, horizontal_align)
self.set_height(height, vertical_align)
return self
def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40,
bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None):
if min_width < 1 or min_height < 1:
raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than "
f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}")
self.min_width = min_width
self.min_height = min_height
self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right}
self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds,
left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width)
if top is False and bottom is False and left is False and right is False:
if self in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.remove(self)
elif self not in GraphicsObject.resizing_objects:
GraphicsObject.resizing_objects.add(self)
self.bounds_thickness = bounds_width
return self
def set_coords(self, p1, p2):
self.p1 = p1.copy()
self.p2 = p2.copy()
# These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1
if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values
self.p1[0], self.p2[0] = self.p2[0], self.p1[0]
if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values
self.p1[1], self.p2[1] = self.p2[1], self.p1[1]
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = self.p2[0] - self.p1[0]
self.height = self.p2[1] - self.p1[1]
width_scale = (p2[0] - p1[0]) / self.width
height_scale = (p2[1] - p1[1]) / self.height
# abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value
self.width = p2[0] - p1[0]
self.height = p2[1] - p1[1]
self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2]
self._update_layer()
return self
def set_width(self, width, center="center"):
if center not in {"center", "right", "left"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_outline_width) needs to be one of "
f'{["center", "right", "left"]}')
if center == "left":
self.set_coords(self.p1, self.p2.add_x(width - self.width))
elif center == "right":
self.set_coords(self.p1.add_x(-(width - self.width)), self.p2)
else:
self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width))
return self
def set_height(self, height, center="center"):
if center not in {"center", "top", "bottom"}:
raise GraphicsError(
"\n\nThe center argument for resizing the object (set_height) needs to be one of "
f'{["center", "top", "bottom"]}')
if center == "top":
self.set_coords(self.p1, self.p2.add_y(height - self.height))
elif center == "bottom":
self.set_coords(self.p1.add_y(-(height - self.height)), self.p2)
else:
self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height))
return self
def set_fill(self, fill):
if fill is None:
self.fill = STYLES["default"]["fill"]
elif isinstance(fill, Colour): # Checking if the option is a colour
self.fill = fill
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}")
self._update_layer()
return self
def set_outline(self, outline):
if outline is None:
self.outline = STYLES["default"]["outline"]
elif isinstance(outline, Colour): # Checking if the option is a colour
self.outline = outline
else: # If not, raise an error
raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}")
self._update_layer()
return self
def set_outline_width(self, outline_width):
if outline_width is None:
self.outline_width = STYLES["default"]["width"]
elif isinstance(outline_width, int): # Checking if the option is an integer
self.outline_width = outline_width
else: # If not, raise an error
raise GraphicsError(
f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}")
self._update_layer()
return self
| 37.81749
| 122
| 0.594008
| 1,426
| 9,946
| 4.037868
| 0.095372
| 0.047933
| 0.020667
| 0.016672
| 0.659083
| 0.613234
| 0.597951
| 0.536124
| 0.484022
| 0.456929
| 0
| 0.034951
| 0.289463
| 9,946
| 262
| 123
| 37.961832
| 0.779822
| 0.129801
| 0
| 0.397849
| 0
| 0
| 0.120204
| 0.0051
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155914
| false
| 0.005376
| 0.010753
| 0.091398
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b931c0b51c15ef9d8f1fe028562964e4cc16bd70
| 670
|
py
|
Python
|
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | 1
|
2021-04-01T06:19:02.000Z
|
2021-04-01T06:19:02.000Z
|
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | null | null | null |
Graph/DFS&BFS.py
|
Mayner0220/Programmers
|
42e4783a526506fb7d8208841a76201909ed5c5c
|
[
"Apache-2.0"
] | null | null | null |
# https://www.acmicpc.net/problem/1260
n, m, v = map(int, input().split())
graph = [[0] * (n+1) for _ in range(n+1)]
visit = [False] * (n+1)
for _ in range(m):
R, C = map(int, input().split())
graph[R][C] = 1
graph[C][R] = 1
def dfs(v):
visit[v] = True
print(v, end=" ")
for i in range(1, n+1):
if not visit[i] and graph[v][i]==1:
dfs(i)
def bfs(v):
queue = [v]
visit[v] = False
while queue:
v = queue.pop(0)
print(v, end=" ")
for i in range(1, n+1):
if visit[i] and graph[v][i]==1:
queue.append(i)
visit[i] = False
dfs(v)
print()
bfs(v)
| 19.142857
| 43
| 0.470149
| 114
| 670
| 2.745614
| 0.324561
| 0.031949
| 0.070288
| 0.102236
| 0.479233
| 0.268371
| 0.268371
| 0.159744
| 0.159744
| 0.159744
| 0
| 0.037778
| 0.328358
| 670
| 35
| 44
| 19.142857
| 0.657778
| 0.053731
| 0
| 0.153846
| 0
| 0
| 0.00316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0.115385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b932e9aa7c1cc0da8573d5baaf3b16b4549529cd
| 347
|
py
|
Python
|
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
|
Jahidul007/Python-Bootcamp
|
3c870587465ff66c2c1871c8d3c4eea72463abda
|
[
"MIT"
] | 2
|
2020-12-07T16:07:07.000Z
|
2020-12-07T16:08:53.000Z
|
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
|
purusharthmalik/Python-Bootcamp
|
2ed1cf886d1081de200b0fdd4cb4e28008c7e3d1
|
[
"MIT"
] | null | null | null |
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
|
purusharthmalik/Python-Bootcamp
|
2ed1cf886d1081de200b0fdd4cb4e28008c7e3d1
|
[
"MIT"
] | 1
|
2020-10-03T16:38:02.000Z
|
2020-10-03T16:38:02.000Z
|
class Solution:
def modifyString(self, s: str) -> str:
s = list(s)
for i in range(len(s)):
if s[i] == "?":
for c in "abc":
if (i == 0 or s[i-1] != c) and (i+1 == len(s) or s[i+1] != c):
s[i] = c
break
return "".join(s)
| 31.545455
| 83
| 0.337176
| 49
| 347
| 2.387755
| 0.469388
| 0.068376
| 0.068376
| 0.08547
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 0.498559
| 347
| 10
| 84
| 34.7
| 0.649425
| 0
| 0
| 0
| 0
| 0
| 0.011527
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|